dev.c 151 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240
  1. /*
  2. * NET3 Protocol independent device support routines.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Derived from the non IP parts of dev.c 1.0.19
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  13. *
  14. * Additional Authors:
  15. * Florian la Roche <rzsfl@rz.uni-sb.de>
  16. * Alan Cox <gw4pts@gw4pts.ampr.org>
  17. * David Hinds <dahinds@users.sourceforge.net>
  18. * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  19. * Adam Sulmicki <adam@cfar.umd.edu>
  20. * Pekka Riikonen <priikone@poesidon.pspt.fi>
  21. *
  22. * Changes:
  23. * D.J. Barrow : Fixed bug where dev->refcnt gets set
  24. * to 2 if register_netdev gets called
  25. * before net_dev_init & also removed a
  26. * few lines of code in the process.
  27. * Alan Cox : device private ioctl copies fields back.
  28. * Alan Cox : Transmit queue code does relevant
  29. * stunts to keep the queue safe.
  30. * Alan Cox : Fixed double lock.
  31. * Alan Cox : Fixed promisc NULL pointer trap
  32. * ???????? : Support the full private ioctl range
  33. * Alan Cox : Moved ioctl permission check into
  34. * drivers
  35. * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
  36. * Alan Cox : 100 backlog just doesn't cut it when
  37. * you start doing multicast video 8)
  38. * Alan Cox : Rewrote net_bh and list manager.
  39. * Alan Cox : Fix ETH_P_ALL echoback lengths.
  40. * Alan Cox : Took out transmit every packet pass
  41. * Saved a few bytes in the ioctl handler
  42. * Alan Cox : Network driver sets packet type before
  43. * calling netif_rx. Saves a function
  44. * call a packet.
  45. * Alan Cox : Hashed net_bh()
  46. * Richard Kooijman: Timestamp fixes.
  47. * Alan Cox : Wrong field in SIOCGIFDSTADDR
  48. * Alan Cox : Device lock protection.
  49. * Alan Cox : Fixed nasty side effect of device close
  50. * changes.
  51. * Rudi Cilibrasi : Pass the right thing to
  52. * set_mac_address()
  53. * Dave Miller : 32bit quantity for the device lock to
  54. * make it work out on a Sparc.
  55. * Bjorn Ekwall : Added KERNELD hack.
  56. * Alan Cox : Cleaned up the backlog initialise.
  57. * Craig Metz : SIOCGIFCONF fix if space for under
  58. * 1 device.
  59. * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
  60. * is no device open function.
  61. * Andi Kleen : Fix error reporting for SIOCGIFCONF
  62. * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
  63. * Cyrus Durgin : Cleaned for KMOD
  64. * Adam Sulmicki : Bug Fix : Network Device Unload
  65. * A network device unload needs to purge
  66. * the backlog queue.
  67. * Paul Rusty Russell : SIOCSIFNAME
  68. * Pekka Riikonen : Netdev boot-time settings code
  69. * Andrew Morton : Make unregister_netdevice wait
  70. * indefinitely on dev->refcnt
  71. * J Hadi Salim : - Backlog queue sampling
  72. * - netif_rx() feedback
  73. */
  74. #include <asm/uaccess.h>
  75. #include <asm/system.h>
  76. #include <linux/bitops.h>
  77. #include <linux/capability.h>
  78. #include <linux/cpu.h>
  79. #include <linux/types.h>
  80. #include <linux/kernel.h>
  81. #include <linux/hash.h>
  82. #include <linux/slab.h>
  83. #include <linux/sched.h>
  84. #include <linux/mutex.h>
  85. #include <linux/string.h>
  86. #include <linux/mm.h>
  87. #include <linux/socket.h>
  88. #include <linux/sockios.h>
  89. #include <linux/errno.h>
  90. #include <linux/interrupt.h>
  91. #include <linux/if_ether.h>
  92. #include <linux/netdevice.h>
  93. #include <linux/etherdevice.h>
  94. #include <linux/ethtool.h>
  95. #include <linux/notifier.h>
  96. #include <linux/skbuff.h>
  97. #include <net/net_namespace.h>
  98. #include <net/sock.h>
  99. #include <linux/rtnetlink.h>
  100. #include <linux/proc_fs.h>
  101. #include <linux/seq_file.h>
  102. #include <linux/stat.h>
  103. #include <net/dst.h>
  104. #include <net/pkt_sched.h>
  105. #include <net/checksum.h>
  106. #include <net/xfrm.h>
  107. #include <linux/highmem.h>
  108. #include <linux/init.h>
  109. #include <linux/kmod.h>
  110. #include <linux/module.h>
  111. #include <linux/netpoll.h>
  112. #include <linux/rcupdate.h>
  113. #include <linux/delay.h>
  114. #include <net/wext.h>
  115. #include <net/iw_handler.h>
  116. #include <asm/current.h>
  117. #include <linux/audit.h>
  118. #include <linux/dmaengine.h>
  119. #include <linux/err.h>
  120. #include <linux/ctype.h>
  121. #include <linux/if_arp.h>
  122. #include <linux/if_vlan.h>
  123. #include <linux/ip.h>
  124. #include <net/ip.h>
  125. #include <linux/ipv6.h>
  126. #include <linux/in.h>
  127. #include <linux/jhash.h>
  128. #include <linux/random.h>
  129. #include <trace/events/napi.h>
  130. #include <trace/events/net.h>
  131. #include <trace/events/skb.h>
  132. #include <linux/pci.h>
  133. #include <linux/inetdevice.h>
  134. #include "net-sysfs.h"
  135. /* Instead of increasing this, you should create a hash table. */
  136. #define MAX_GRO_SKBS 8
  137. /* This should be increased if a protocol with a bigger head is added. */
  138. #define GRO_MAX_HEAD (MAX_HEADER + 128)
  139. /*
  140. * The list of packet types we will receive (as opposed to discard)
  141. * and the routines to invoke.
  142. *
  143. * Why 16. Because with 16 the only overlap we get on a hash of the
  144. * low nibble of the protocol value is RARP/SNAP/X.25.
  145. *
  146. * NOTE: That is no longer true with the addition of VLAN tags. Not
  147. * sure which should go first, but I bet it won't make much
  148. * difference if we are running VLANs. The good news is that
  149. * this protocol won't be in the list unless compiled in, so
  150. * the average user (w/out VLANs) will not be adversely affected.
  151. * --BLG
  152. *
  153. * 0800 IP
  154. * 8100 802.1Q VLAN
  155. * 0001 802.3
  156. * 0002 AX.25
  157. * 0004 802.2
  158. * 8035 RARP
  159. * 0005 SNAP
  160. * 0805 X.25
  161. * 0806 ARP
  162. * 8137 IPX
  163. * 0009 Localtalk
  164. * 86DD IPv6
  165. */
  166. #define PTYPE_HASH_SIZE (16)
  167. #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
  168. static DEFINE_SPINLOCK(ptype_lock);
  169. static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
  170. static struct list_head ptype_all __read_mostly; /* Taps */
  171. /*
  172. * The @dev_base_head list is protected by @dev_base_lock and the rtnl
  173. * semaphore.
  174. *
  175. * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
  176. *
  177. * Writers must hold the rtnl semaphore while they loop through the
  178. * dev_base_head list, and hold dev_base_lock for writing when they do the
  179. * actual updates. This allows pure readers to access the list even
  180. * while a writer is preparing to update it.
  181. *
  182. * To put it another way, dev_base_lock is held for writing only to
  183. * protect against pure readers; the rtnl semaphore provides the
  184. * protection against other writers.
  185. *
  186. * See, for example usages, register_netdevice() and
  187. * unregister_netdevice(), which must be called with the rtnl
  188. * semaphore held.
  189. */
  190. DEFINE_RWLOCK(dev_base_lock);
  191. EXPORT_SYMBOL(dev_base_lock);
  192. static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
  193. {
  194. unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
  195. return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
  196. }
  197. static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
  198. {
  199. return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
  200. }
  201. static inline void rps_lock(struct softnet_data *sd)
  202. {
  203. #ifdef CONFIG_RPS
  204. spin_lock(&sd->input_pkt_queue.lock);
  205. #endif
  206. }
  207. static inline void rps_unlock(struct softnet_data *sd)
  208. {
  209. #ifdef CONFIG_RPS
  210. spin_unlock(&sd->input_pkt_queue.lock);
  211. #endif
  212. }
  213. /* Device list insertion */
  214. static int list_netdevice(struct net_device *dev)
  215. {
  216. struct net *net = dev_net(dev);
  217. ASSERT_RTNL();
  218. write_lock_bh(&dev_base_lock);
  219. list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
  220. hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
  221. hlist_add_head_rcu(&dev->index_hlist,
  222. dev_index_hash(net, dev->ifindex));
  223. write_unlock_bh(&dev_base_lock);
  224. return 0;
  225. }
  226. /* Device list removal
  227. * caller must respect a RCU grace period before freeing/reusing dev
  228. */
  229. static void unlist_netdevice(struct net_device *dev)
  230. {
  231. ASSERT_RTNL();
  232. /* Unlink dev from the device chain */
  233. write_lock_bh(&dev_base_lock);
  234. list_del_rcu(&dev->dev_list);
  235. hlist_del_rcu(&dev->name_hlist);
  236. hlist_del_rcu(&dev->index_hlist);
  237. write_unlock_bh(&dev_base_lock);
  238. }
  239. /*
  240. * Our notifier list
  241. */
  242. static RAW_NOTIFIER_HEAD(netdev_chain);
  243. /*
  244. * Device drivers call our routines to queue packets here. We empty the
  245. * queue in the local softnet handler.
  246. */
  247. DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
  248. EXPORT_PER_CPU_SYMBOL(softnet_data);
  249. #ifdef CONFIG_LOCKDEP
  250. /*
  251. * register_netdevice() inits txq->_xmit_lock and sets lockdep class
  252. * according to dev->type
  253. */
  254. static const unsigned short netdev_lock_type[] =
  255. {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
  256. ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
  257. ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
  258. ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
  259. ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
  260. ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
  261. ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
  262. ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
  263. ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
  264. ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
  265. ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
  266. ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
  267. ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
  268. ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
  269. ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
  270. ARPHRD_VOID, ARPHRD_NONE};
  271. static const char *const netdev_lock_name[] =
  272. {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
  273. "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
  274. "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
  275. "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
  276. "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
  277. "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
  278. "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
  279. "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
  280. "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
  281. "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
  282. "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
  283. "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
  284. "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
  285. "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
  286. "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
  287. "_xmit_VOID", "_xmit_NONE"};
  288. static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
  289. static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
  290. static inline unsigned short netdev_lock_pos(unsigned short dev_type)
  291. {
  292. int i;
  293. for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
  294. if (netdev_lock_type[i] == dev_type)
  295. return i;
  296. /* the last key is used by default */
  297. return ARRAY_SIZE(netdev_lock_type) - 1;
  298. }
  299. static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
  300. unsigned short dev_type)
  301. {
  302. int i;
  303. i = netdev_lock_pos(dev_type);
  304. lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
  305. netdev_lock_name[i]);
  306. }
  307. static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
  308. {
  309. int i;
  310. i = netdev_lock_pos(dev->type);
  311. lockdep_set_class_and_name(&dev->addr_list_lock,
  312. &netdev_addr_lock_key[i],
  313. netdev_lock_name[i]);
  314. }
  315. #else
  316. static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
  317. unsigned short dev_type)
  318. {
  319. }
  320. static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
  321. {
  322. }
  323. #endif
  324. /*******************************************************************************
  325. Protocol management and registration routines
  326. *******************************************************************************/
  327. /*
  328. * Add a protocol ID to the list. Now that the input handler is
  329. * smarter we can dispense with all the messy stuff that used to be
  330. * here.
  331. *
  332. * BEWARE!!! Protocol handlers, mangling input packets,
  333. * MUST BE last in hash buckets and checking protocol handlers
  334. * MUST start from promiscuous ptype_all chain in net_bh.
  335. * It is true now, do not change it.
  336. * Explanation follows: if protocol handler, mangling packet, will
  337. * be the first on list, it is not able to sense, that packet
  338. * is cloned and should be copied-on-write, so that it will
  339. * change it and subsequent readers will get broken packet.
  340. * --ANK (980803)
  341. */
  342. static inline struct list_head *ptype_head(const struct packet_type *pt)
  343. {
  344. if (pt->type == htons(ETH_P_ALL))
  345. return &ptype_all;
  346. else
  347. return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
  348. }
  349. /**
  350. * dev_add_pack - add packet handler
  351. * @pt: packet type declaration
  352. *
  353. * Add a protocol handler to the networking stack. The passed &packet_type
  354. * is linked into kernel lists and may not be freed until it has been
  355. * removed from the kernel lists.
  356. *
  357. * This call does not sleep therefore it can not
  358. * guarantee all CPU's that are in middle of receiving packets
  359. * will see the new packet type (until the next received packet).
  360. */
  361. void dev_add_pack(struct packet_type *pt)
  362. {
  363. struct list_head *head = ptype_head(pt);
  364. spin_lock(&ptype_lock);
  365. list_add_rcu(&pt->list, head);
  366. spin_unlock(&ptype_lock);
  367. }
  368. EXPORT_SYMBOL(dev_add_pack);
  369. /**
  370. * __dev_remove_pack - remove packet handler
  371. * @pt: packet type declaration
  372. *
  373. * Remove a protocol handler that was previously added to the kernel
  374. * protocol handlers by dev_add_pack(). The passed &packet_type is removed
  375. * from the kernel lists and can be freed or reused once this function
  376. * returns.
  377. *
  378. * The packet type might still be in use by receivers
  379. * and must not be freed until after all the CPU's have gone
  380. * through a quiescent state.
  381. */
  382. void __dev_remove_pack(struct packet_type *pt)
  383. {
  384. struct list_head *head = ptype_head(pt);
  385. struct packet_type *pt1;
  386. spin_lock(&ptype_lock);
  387. list_for_each_entry(pt1, head, list) {
  388. if (pt == pt1) {
  389. list_del_rcu(&pt->list);
  390. goto out;
  391. }
  392. }
  393. printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
  394. out:
  395. spin_unlock(&ptype_lock);
  396. }
  397. EXPORT_SYMBOL(__dev_remove_pack);
  398. /**
  399. * dev_remove_pack - remove packet handler
  400. * @pt: packet type declaration
  401. *
  402. * Remove a protocol handler that was previously added to the kernel
  403. * protocol handlers by dev_add_pack(). The passed &packet_type is removed
  404. * from the kernel lists and can be freed or reused once this function
  405. * returns.
  406. *
  407. * This call sleeps to guarantee that no CPU is looking at the packet
  408. * type after return.
  409. */
  410. void dev_remove_pack(struct packet_type *pt)
  411. {
  412. __dev_remove_pack(pt);
  413. synchronize_net();
  414. }
  415. EXPORT_SYMBOL(dev_remove_pack);
  416. /******************************************************************************
  417. Device Boot-time Settings Routines
  418. *******************************************************************************/
  419. /* Boot time configuration table */
  420. static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
  421. /**
  422. * netdev_boot_setup_add - add new setup entry
  423. * @name: name of the device
  424. * @map: configured settings for the device
  425. *
  426. * Adds new setup entry to the dev_boot_setup list. The function
  427. * returns 0 on error and 1 on success. This is a generic routine to
  428. * all netdevices.
  429. */
  430. static int netdev_boot_setup_add(char *name, struct ifmap *map)
  431. {
  432. struct netdev_boot_setup *s;
  433. int i;
  434. s = dev_boot_setup;
  435. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
  436. if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
  437. memset(s[i].name, 0, sizeof(s[i].name));
  438. strlcpy(s[i].name, name, IFNAMSIZ);
  439. memcpy(&s[i].map, map, sizeof(s[i].map));
  440. break;
  441. }
  442. }
  443. return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
  444. }
  445. /**
  446. * netdev_boot_setup_check - check boot time settings
  447. * @dev: the netdevice
  448. *
  449. * Check boot time settings for the device.
  450. * The found settings are set for the device to be used
  451. * later in the device probing.
  452. * Returns 0 if no settings found, 1 if they are.
  453. */
  454. int netdev_boot_setup_check(struct net_device *dev)
  455. {
  456. struct netdev_boot_setup *s = dev_boot_setup;
  457. int i;
  458. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
  459. if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
  460. !strcmp(dev->name, s[i].name)) {
  461. dev->irq = s[i].map.irq;
  462. dev->base_addr = s[i].map.base_addr;
  463. dev->mem_start = s[i].map.mem_start;
  464. dev->mem_end = s[i].map.mem_end;
  465. return 1;
  466. }
  467. }
  468. return 0;
  469. }
  470. EXPORT_SYMBOL(netdev_boot_setup_check);
  471. /**
  472. * netdev_boot_base - get address from boot time settings
  473. * @prefix: prefix for network device
  474. * @unit: id for network device
  475. *
  476. * Check boot time settings for the base address of device.
  477. * The found settings are set for the device to be used
  478. * later in the device probing.
  479. * Returns 0 if no settings found.
  480. */
  481. unsigned long netdev_boot_base(const char *prefix, int unit)
  482. {
  483. const struct netdev_boot_setup *s = dev_boot_setup;
  484. char name[IFNAMSIZ];
  485. int i;
  486. sprintf(name, "%s%d", prefix, unit);
  487. /*
  488. * If device already registered then return base of 1
  489. * to indicate not to probe for this interface
  490. */
  491. if (__dev_get_by_name(&init_net, name))
  492. return 1;
  493. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
  494. if (!strcmp(name, s[i].name))
  495. return s[i].map.base_addr;
  496. return 0;
  497. }
  498. /*
  499. * Saves at boot time configured settings for any netdevice.
  500. */
  501. int __init netdev_boot_setup(char *str)
  502. {
  503. int ints[5];
  504. struct ifmap map;
  505. str = get_options(str, ARRAY_SIZE(ints), ints);
  506. if (!str || !*str)
  507. return 0;
  508. /* Save settings */
  509. memset(&map, 0, sizeof(map));
  510. if (ints[0] > 0)
  511. map.irq = ints[1];
  512. if (ints[0] > 1)
  513. map.base_addr = ints[2];
  514. if (ints[0] > 2)
  515. map.mem_start = ints[3];
  516. if (ints[0] > 3)
  517. map.mem_end = ints[4];
  518. /* Add new entry to the list */
  519. return netdev_boot_setup_add(str, &map);
  520. }
  521. __setup("netdev=", netdev_boot_setup);
  522. /*******************************************************************************
  523. Device Interface Subroutines
  524. *******************************************************************************/
  525. /**
  526. * __dev_get_by_name - find a device by its name
  527. * @net: the applicable net namespace
  528. * @name: name to find
  529. *
  530. * Find an interface by name. Must be called under RTNL semaphore
  531. * or @dev_base_lock. If the name is found a pointer to the device
  532. * is returned. If the name is not found then %NULL is returned. The
  533. * reference counters are not incremented so the caller must be
  534. * careful with locks.
  535. */
  536. struct net_device *__dev_get_by_name(struct net *net, const char *name)
  537. {
  538. struct hlist_node *p;
  539. struct net_device *dev;
  540. struct hlist_head *head = dev_name_hash(net, name);
  541. hlist_for_each_entry(dev, p, head, name_hlist)
  542. if (!strncmp(dev->name, name, IFNAMSIZ))
  543. return dev;
  544. return NULL;
  545. }
  546. EXPORT_SYMBOL(__dev_get_by_name);
  547. /**
  548. * dev_get_by_name_rcu - find a device by its name
  549. * @net: the applicable net namespace
  550. * @name: name to find
  551. *
  552. * Find an interface by name.
  553. * If the name is found a pointer to the device is returned.
  554. * If the name is not found then %NULL is returned.
  555. * The reference counters are not incremented so the caller must be
  556. * careful with locks. The caller must hold RCU lock.
  557. */
  558. struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
  559. {
  560. struct hlist_node *p;
  561. struct net_device *dev;
  562. struct hlist_head *head = dev_name_hash(net, name);
  563. hlist_for_each_entry_rcu(dev, p, head, name_hlist)
  564. if (!strncmp(dev->name, name, IFNAMSIZ))
  565. return dev;
  566. return NULL;
  567. }
  568. EXPORT_SYMBOL(dev_get_by_name_rcu);
  569. /**
  570. * dev_get_by_name - find a device by its name
  571. * @net: the applicable net namespace
  572. * @name: name to find
  573. *
  574. * Find an interface by name. This can be called from any
  575. * context and does its own locking. The returned handle has
  576. * the usage count incremented and the caller must use dev_put() to
  577. * release it when it is no longer needed. %NULL is returned if no
  578. * matching device is found.
  579. */
  580. struct net_device *dev_get_by_name(struct net *net, const char *name)
  581. {
  582. struct net_device *dev;
  583. rcu_read_lock();
  584. dev = dev_get_by_name_rcu(net, name);
  585. if (dev)
  586. dev_hold(dev);
  587. rcu_read_unlock();
  588. return dev;
  589. }
  590. EXPORT_SYMBOL(dev_get_by_name);
  591. /**
  592. * __dev_get_by_index - find a device by its ifindex
  593. * @net: the applicable net namespace
  594. * @ifindex: index of device
  595. *
  596. * Search for an interface by index. Returns %NULL if the device
  597. * is not found or a pointer to the device. The device has not
  598. * had its reference counter increased so the caller must be careful
  599. * about locking. The caller must hold either the RTNL semaphore
  600. * or @dev_base_lock.
  601. */
  602. struct net_device *__dev_get_by_index(struct net *net, int ifindex)
  603. {
  604. struct hlist_node *p;
  605. struct net_device *dev;
  606. struct hlist_head *head = dev_index_hash(net, ifindex);
  607. hlist_for_each_entry(dev, p, head, index_hlist)
  608. if (dev->ifindex == ifindex)
  609. return dev;
  610. return NULL;
  611. }
  612. EXPORT_SYMBOL(__dev_get_by_index);
  613. /**
  614. * dev_get_by_index_rcu - find a device by its ifindex
  615. * @net: the applicable net namespace
  616. * @ifindex: index of device
  617. *
  618. * Search for an interface by index. Returns %NULL if the device
  619. * is not found or a pointer to the device. The device has not
  620. * had its reference counter increased so the caller must be careful
  621. * about locking. The caller must hold RCU lock.
  622. */
  623. struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
  624. {
  625. struct hlist_node *p;
  626. struct net_device *dev;
  627. struct hlist_head *head = dev_index_hash(net, ifindex);
  628. hlist_for_each_entry_rcu(dev, p, head, index_hlist)
  629. if (dev->ifindex == ifindex)
  630. return dev;
  631. return NULL;
  632. }
  633. EXPORT_SYMBOL(dev_get_by_index_rcu);
  634. /**
  635. * dev_get_by_index - find a device by its ifindex
  636. * @net: the applicable net namespace
  637. * @ifindex: index of device
  638. *
  639. * Search for an interface by index. Returns NULL if the device
  640. * is not found or a pointer to the device. The device returned has
  641. * had a reference added and the pointer is safe until the user calls
  642. * dev_put to indicate they have finished with it.
  643. */
  644. struct net_device *dev_get_by_index(struct net *net, int ifindex)
  645. {
  646. struct net_device *dev;
  647. rcu_read_lock();
  648. dev = dev_get_by_index_rcu(net, ifindex);
  649. if (dev)
  650. dev_hold(dev);
  651. rcu_read_unlock();
  652. return dev;
  653. }
  654. EXPORT_SYMBOL(dev_get_by_index);
  655. /**
  656. * dev_getbyhwaddr - find a device by its hardware address
  657. * @net: the applicable net namespace
  658. * @type: media type of device
  659. * @ha: hardware address
  660. *
  661. * Search for an interface by MAC address. Returns NULL if the device
  662. * is not found or a pointer to the device. The caller must hold the
  663. * rtnl semaphore. The returned device has not had its ref count increased
  664. * and the caller must therefore be careful about locking
  665. *
  666. * BUGS:
  667. * If the API was consistent this would be __dev_get_by_hwaddr
  668. */
  669. struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
  670. {
  671. struct net_device *dev;
  672. ASSERT_RTNL();
  673. for_each_netdev(net, dev)
  674. if (dev->type == type &&
  675. !memcmp(dev->dev_addr, ha, dev->addr_len))
  676. return dev;
  677. return NULL;
  678. }
  679. EXPORT_SYMBOL(dev_getbyhwaddr);
  680. struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
  681. {
  682. struct net_device *dev;
  683. ASSERT_RTNL();
  684. for_each_netdev(net, dev)
  685. if (dev->type == type)
  686. return dev;
  687. return NULL;
  688. }
  689. EXPORT_SYMBOL(__dev_getfirstbyhwtype);
  690. struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
  691. {
  692. struct net_device *dev, *ret = NULL;
  693. rcu_read_lock();
  694. for_each_netdev_rcu(net, dev)
  695. if (dev->type == type) {
  696. dev_hold(dev);
  697. ret = dev;
  698. break;
  699. }
  700. rcu_read_unlock();
  701. return ret;
  702. }
  703. EXPORT_SYMBOL(dev_getfirstbyhwtype);
  704. /**
  705. * dev_get_by_flags_rcu - find any device with given flags
  706. * @net: the applicable net namespace
  707. * @if_flags: IFF_* values
  708. * @mask: bitmask of bits in if_flags to check
  709. *
  710. * Search for any interface with the given flags. Returns NULL if a device
  711. * is not found or a pointer to the device. Must be called inside
  712. * rcu_read_lock(), and result refcount is unchanged.
  713. */
  714. struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
  715. unsigned short mask)
  716. {
  717. struct net_device *dev, *ret;
  718. ret = NULL;
  719. for_each_netdev_rcu(net, dev) {
  720. if (((dev->flags ^ if_flags) & mask) == 0) {
  721. ret = dev;
  722. break;
  723. }
  724. }
  725. return ret;
  726. }
  727. EXPORT_SYMBOL(dev_get_by_flags_rcu);
  728. /**
  729. * dev_valid_name - check if name is okay for network device
  730. * @name: name string
  731. *
  732. * Network device names need to be valid file names to
  733. * to allow sysfs to work. We also disallow any kind of
  734. * whitespace.
  735. */
  736. int dev_valid_name(const char *name)
  737. {
  738. if (*name == '\0')
  739. return 0;
  740. if (strlen(name) >= IFNAMSIZ)
  741. return 0;
  742. if (!strcmp(name, ".") || !strcmp(name, ".."))
  743. return 0;
  744. while (*name) {
  745. if (*name == '/' || isspace(*name))
  746. return 0;
  747. name++;
  748. }
  749. return 1;
  750. }
  751. EXPORT_SYMBOL(dev_valid_name);
  752. /**
  753. * __dev_alloc_name - allocate a name for a device
  754. * @net: network namespace to allocate the device name in
  755. * @name: name format string
  756. * @buf: scratch buffer and result name string
  757. *
  758. * Passed a format string - eg "lt%d" it will try and find a suitable
  759. * id. It scans list of devices to build up a free map, then chooses
  760. * the first empty slot. The caller must hold the dev_base or rtnl lock
  761. * while allocating the name and adding the device in order to avoid
  762. * duplicates.
  763. * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
  764. * Returns the number of the unit assigned or a negative errno code.
  765. */
  766. static int __dev_alloc_name(struct net *net, const char *name, char *buf)
  767. {
  768. int i = 0;
  769. const char *p;
  770. const int max_netdevices = 8*PAGE_SIZE;
  771. unsigned long *inuse;
  772. struct net_device *d;
  773. p = strnchr(name, IFNAMSIZ-1, '%');
  774. if (p) {
  775. /*
  776. * Verify the string as this thing may have come from
  777. * the user. There must be either one "%d" and no other "%"
  778. * characters.
  779. */
  780. if (p[1] != 'd' || strchr(p + 2, '%'))
  781. return -EINVAL;
  782. /* Use one page as a bit array of possible slots */
  783. inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
  784. if (!inuse)
  785. return -ENOMEM;
  786. for_each_netdev(net, d) {
  787. if (!sscanf(d->name, name, &i))
  788. continue;
  789. if (i < 0 || i >= max_netdevices)
  790. continue;
  791. /* avoid cases where sscanf is not exact inverse of printf */
  792. snprintf(buf, IFNAMSIZ, name, i);
  793. if (!strncmp(buf, d->name, IFNAMSIZ))
  794. set_bit(i, inuse);
  795. }
  796. i = find_first_zero_bit(inuse, max_netdevices);
  797. free_page((unsigned long) inuse);
  798. }
  799. if (buf != name)
  800. snprintf(buf, IFNAMSIZ, name, i);
  801. if (!__dev_get_by_name(net, buf))
  802. return i;
  803. /* It is possible to run out of possible slots
  804. * when the name is long and there isn't enough space left
  805. * for the digits, or if all bits are used.
  806. */
  807. return -ENFILE;
  808. }
  809. /**
  810. * dev_alloc_name - allocate a name for a device
  811. * @dev: device
  812. * @name: name format string
  813. *
  814. * Passed a format string - eg "lt%d" it will try and find a suitable
  815. * id. It scans list of devices to build up a free map, then chooses
  816. * the first empty slot. The caller must hold the dev_base or rtnl lock
  817. * while allocating the name and adding the device in order to avoid
  818. * duplicates.
  819. * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
  820. * Returns the number of the unit assigned or a negative errno code.
  821. */
  822. int dev_alloc_name(struct net_device *dev, const char *name)
  823. {
  824. char buf[IFNAMSIZ];
  825. struct net *net;
  826. int ret;
  827. BUG_ON(!dev_net(dev));
  828. net = dev_net(dev);
  829. ret = __dev_alloc_name(net, name, buf);
  830. if (ret >= 0)
  831. strlcpy(dev->name, buf, IFNAMSIZ);
  832. return ret;
  833. }
  834. EXPORT_SYMBOL(dev_alloc_name);
  835. static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt)
  836. {
  837. struct net *net;
  838. BUG_ON(!dev_net(dev));
  839. net = dev_net(dev);
  840. if (!dev_valid_name(name))
  841. return -EINVAL;
  842. if (fmt && strchr(name, '%'))
  843. return dev_alloc_name(dev, name);
  844. else if (__dev_get_by_name(net, name))
  845. return -EEXIST;
  846. else if (dev->name != name)
  847. strlcpy(dev->name, name, IFNAMSIZ);
  848. return 0;
  849. }
  850. /**
  851. * dev_change_name - change name of a device
  852. * @dev: device
  853. * @newname: name (or format string) must be at least IFNAMSIZ
  854. *
  855. * Change name of a device, can pass format strings "eth%d".
  856. * for wildcarding.
  857. */
  858. int dev_change_name(struct net_device *dev, const char *newname)
  859. {
  860. char oldname[IFNAMSIZ];
  861. int err = 0;
  862. int ret;
  863. struct net *net;
  864. ASSERT_RTNL();
  865. BUG_ON(!dev_net(dev));
  866. net = dev_net(dev);
  867. if (dev->flags & IFF_UP)
  868. return -EBUSY;
  869. if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
  870. return 0;
  871. memcpy(oldname, dev->name, IFNAMSIZ);
  872. err = dev_get_valid_name(dev, newname, 1);
  873. if (err < 0)
  874. return err;
  875. rollback:
  876. ret = device_rename(&dev->dev, dev->name);
  877. if (ret) {
  878. memcpy(dev->name, oldname, IFNAMSIZ);
  879. return ret;
  880. }
  881. write_lock_bh(&dev_base_lock);
  882. hlist_del(&dev->name_hlist);
  883. write_unlock_bh(&dev_base_lock);
  884. synchronize_rcu();
  885. write_lock_bh(&dev_base_lock);
  886. hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
  887. write_unlock_bh(&dev_base_lock);
  888. ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
  889. ret = notifier_to_errno(ret);
  890. if (ret) {
  891. /* err >= 0 after dev_alloc_name() or stores the first errno */
  892. if (err >= 0) {
  893. err = ret;
  894. memcpy(dev->name, oldname, IFNAMSIZ);
  895. goto rollback;
  896. } else {
  897. printk(KERN_ERR
  898. "%s: name change rollback failed: %d.\n",
  899. dev->name, ret);
  900. }
  901. }
  902. return err;
  903. }
  904. /**
  905. * dev_set_alias - change ifalias of a device
  906. * @dev: device
  907. * @alias: name up to IFALIASZ
  908. * @len: limit of bytes to copy from info
  909. *
  910. * Set ifalias for a device,
  911. */
  912. int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
  913. {
  914. ASSERT_RTNL();
  915. if (len >= IFALIASZ)
  916. return -EINVAL;
  917. if (!len) {
  918. if (dev->ifalias) {
  919. kfree(dev->ifalias);
  920. dev->ifalias = NULL;
  921. }
  922. return 0;
  923. }
  924. dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
  925. if (!dev->ifalias)
  926. return -ENOMEM;
  927. strlcpy(dev->ifalias, alias, len+1);
  928. return len;
  929. }
  930. /**
  931. * netdev_features_change - device changes features
  932. * @dev: device to cause notification
  933. *
  934. * Called to indicate a device has changed features.
  935. */
  936. void netdev_features_change(struct net_device *dev)
  937. {
  938. call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
  939. }
  940. EXPORT_SYMBOL(netdev_features_change);
  941. /**
  942. * netdev_state_change - device changes state
  943. * @dev: device to cause notification
  944. *
  945. * Called to indicate a device has changed state. This function calls
  946. * the notifier chains for netdev_chain and sends a NEWLINK message
  947. * to the routing socket.
  948. */
  949. void netdev_state_change(struct net_device *dev)
  950. {
  951. if (dev->flags & IFF_UP) {
  952. call_netdevice_notifiers(NETDEV_CHANGE, dev);
  953. rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
  954. }
  955. }
  956. EXPORT_SYMBOL(netdev_state_change);
  957. int netdev_bonding_change(struct net_device *dev, unsigned long event)
  958. {
  959. return call_netdevice_notifiers(event, dev);
  960. }
  961. EXPORT_SYMBOL(netdev_bonding_change);
  962. /**
  963. * dev_load - load a network module
  964. * @net: the applicable net namespace
  965. * @name: name of interface
  966. *
  967. * If a network interface is not present and the process has suitable
  968. * privileges this function loads the module. If module loading is not
  969. * available in this kernel then it becomes a nop.
  970. */
  971. void dev_load(struct net *net, const char *name)
  972. {
  973. struct net_device *dev;
  974. rcu_read_lock();
  975. dev = dev_get_by_name_rcu(net, name);
  976. rcu_read_unlock();
  977. if (!dev && capable(CAP_NET_ADMIN))
  978. request_module("%s", name);
  979. }
  980. EXPORT_SYMBOL(dev_load);
  981. static int __dev_open(struct net_device *dev)
  982. {
  983. const struct net_device_ops *ops = dev->netdev_ops;
  984. int ret;
  985. ASSERT_RTNL();
  986. /*
  987. * Is it even present?
  988. */
  989. if (!netif_device_present(dev))
  990. return -ENODEV;
  991. ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
  992. ret = notifier_to_errno(ret);
  993. if (ret)
  994. return ret;
  995. /*
  996. * Call device private open method
  997. */
  998. set_bit(__LINK_STATE_START, &dev->state);
  999. if (ops->ndo_validate_addr)
  1000. ret = ops->ndo_validate_addr(dev);
  1001. if (!ret && ops->ndo_open)
  1002. ret = ops->ndo_open(dev);
  1003. /*
  1004. * If it went open OK then:
  1005. */
  1006. if (ret)
  1007. clear_bit(__LINK_STATE_START, &dev->state);
  1008. else {
  1009. /*
  1010. * Set the flags.
  1011. */
  1012. dev->flags |= IFF_UP;
  1013. /*
  1014. * Enable NET_DMA
  1015. */
  1016. net_dmaengine_get();
  1017. /*
  1018. * Initialize multicasting status
  1019. */
  1020. dev_set_rx_mode(dev);
  1021. /*
  1022. * Wakeup transmit queue engine
  1023. */
  1024. dev_activate(dev);
  1025. }
  1026. return ret;
  1027. }
  1028. /**
  1029. * dev_open - prepare an interface for use.
  1030. * @dev: device to open
  1031. *
  1032. * Takes a device from down to up state. The device's private open
  1033. * function is invoked and then the multicast lists are loaded. Finally
  1034. * the device is moved into the up state and a %NETDEV_UP message is
  1035. * sent to the netdev notifier chain.
  1036. *
  1037. * Calling this function on an active interface is a nop. On a failure
  1038. * a negative errno code is returned.
  1039. */
  1040. int dev_open(struct net_device *dev)
  1041. {
  1042. int ret;
  1043. /*
  1044. * Is it already up?
  1045. */
  1046. if (dev->flags & IFF_UP)
  1047. return 0;
  1048. /*
  1049. * Open device
  1050. */
  1051. ret = __dev_open(dev);
  1052. if (ret < 0)
  1053. return ret;
  1054. /*
  1055. * ... and announce new interface.
  1056. */
  1057. rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
  1058. call_netdevice_notifiers(NETDEV_UP, dev);
  1059. return ret;
  1060. }
  1061. EXPORT_SYMBOL(dev_open);
  1062. static int __dev_close(struct net_device *dev)
  1063. {
  1064. const struct net_device_ops *ops = dev->netdev_ops;
  1065. ASSERT_RTNL();
  1066. might_sleep();
  1067. /*
  1068. * Tell people we are going down, so that they can
  1069. * prepare to death, when device is still operating.
  1070. */
  1071. call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
  1072. clear_bit(__LINK_STATE_START, &dev->state);
  1073. /* Synchronize to scheduled poll. We cannot touch poll list,
  1074. * it can be even on different cpu. So just clear netif_running().
  1075. *
  1076. * dev->stop() will invoke napi_disable() on all of it's
  1077. * napi_struct instances on this device.
  1078. */
  1079. smp_mb__after_clear_bit(); /* Commit netif_running(). */
  1080. dev_deactivate(dev);
  1081. /*
  1082. * Call the device specific close. This cannot fail.
  1083. * Only if device is UP
  1084. *
  1085. * We allow it to be called even after a DETACH hot-plug
  1086. * event.
  1087. */
  1088. if (ops->ndo_stop)
  1089. ops->ndo_stop(dev);
  1090. /*
  1091. * Device is now down.
  1092. */
  1093. dev->flags &= ~IFF_UP;
  1094. /*
  1095. * Shutdown NET_DMA
  1096. */
  1097. net_dmaengine_put();
  1098. return 0;
  1099. }
  1100. /**
  1101. * dev_close - shutdown an interface.
  1102. * @dev: device to shutdown
  1103. *
  1104. * This function moves an active device into down state. A
  1105. * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
  1106. * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
  1107. * chain.
  1108. */
  1109. int dev_close(struct net_device *dev)
  1110. {
  1111. if (!(dev->flags & IFF_UP))
  1112. return 0;
  1113. __dev_close(dev);
  1114. /*
  1115. * Tell people we are down
  1116. */
  1117. rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
  1118. call_netdevice_notifiers(NETDEV_DOWN, dev);
  1119. return 0;
  1120. }
  1121. EXPORT_SYMBOL(dev_close);
  1122. /**
  1123. * dev_disable_lro - disable Large Receive Offload on a device
  1124. * @dev: device
  1125. *
  1126. * Disable Large Receive Offload (LRO) on a net device. Must be
  1127. * called under RTNL. This is needed if received packets may be
  1128. * forwarded to another interface.
  1129. */
  1130. void dev_disable_lro(struct net_device *dev)
  1131. {
  1132. if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
  1133. dev->ethtool_ops->set_flags) {
  1134. u32 flags = dev->ethtool_ops->get_flags(dev);
  1135. if (flags & ETH_FLAG_LRO) {
  1136. flags &= ~ETH_FLAG_LRO;
  1137. dev->ethtool_ops->set_flags(dev, flags);
  1138. }
  1139. }
  1140. WARN_ON(dev->features & NETIF_F_LRO);
  1141. }
  1142. EXPORT_SYMBOL(dev_disable_lro);
  1143. static int dev_boot_phase = 1;
  1144. /*
  1145. * Device change register/unregister. These are not inline or static
  1146. * as we export them to the world.
  1147. */
  1148. /**
  1149. * register_netdevice_notifier - register a network notifier block
  1150. * @nb: notifier
  1151. *
  1152. * Register a notifier to be called when network device events occur.
  1153. * The notifier passed is linked into the kernel structures and must
  1154. * not be reused until it has been unregistered. A negative errno code
  1155. * is returned on a failure.
  1156. *
  1157. * When registered all registration and up events are replayed
  1158. * to the new notifier to allow device to have a race free
  1159. * view of the network device list.
  1160. */
  1161. int register_netdevice_notifier(struct notifier_block *nb)
  1162. {
  1163. struct net_device *dev;
  1164. struct net_device *last;
  1165. struct net *net;
  1166. int err;
  1167. rtnl_lock();
  1168. err = raw_notifier_chain_register(&netdev_chain, nb);
  1169. if (err)
  1170. goto unlock;
  1171. if (dev_boot_phase)
  1172. goto unlock;
  1173. for_each_net(net) {
  1174. for_each_netdev(net, dev) {
  1175. err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
  1176. err = notifier_to_errno(err);
  1177. if (err)
  1178. goto rollback;
  1179. if (!(dev->flags & IFF_UP))
  1180. continue;
  1181. nb->notifier_call(nb, NETDEV_UP, dev);
  1182. }
  1183. }
  1184. unlock:
  1185. rtnl_unlock();
  1186. return err;
  1187. rollback:
  1188. last = dev;
  1189. for_each_net(net) {
  1190. for_each_netdev(net, dev) {
  1191. if (dev == last)
  1192. break;
  1193. if (dev->flags & IFF_UP) {
  1194. nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
  1195. nb->notifier_call(nb, NETDEV_DOWN, dev);
  1196. }
  1197. nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
  1198. nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
  1199. }
  1200. }
  1201. raw_notifier_chain_unregister(&netdev_chain, nb);
  1202. goto unlock;
  1203. }
  1204. EXPORT_SYMBOL(register_netdevice_notifier);
  1205. /**
  1206. * unregister_netdevice_notifier - unregister a network notifier block
  1207. * @nb: notifier
  1208. *
  1209. * Unregister a notifier previously registered by
  1210. * register_netdevice_notifier(). The notifier is unlinked into the
  1211. * kernel structures and may then be reused. A negative errno code
  1212. * is returned on a failure.
  1213. */
  1214. int unregister_netdevice_notifier(struct notifier_block *nb)
  1215. {
  1216. int err;
  1217. rtnl_lock();
  1218. err = raw_notifier_chain_unregister(&netdev_chain, nb);
  1219. rtnl_unlock();
  1220. return err;
  1221. }
  1222. EXPORT_SYMBOL(unregister_netdevice_notifier);
  1223. /**
  1224. * call_netdevice_notifiers - call all network notifier blocks
  1225. * @val: value passed unmodified to notifier function
  1226. * @dev: net_device pointer passed unmodified to notifier function
  1227. *
  1228. * Call all network notifier blocks. Parameters and return value
  1229. * are as for raw_notifier_call_chain().
  1230. */
  1231. int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
  1232. {
  1233. ASSERT_RTNL();
  1234. return raw_notifier_call_chain(&netdev_chain, val, dev);
  1235. }
  1236. /* When > 0 there are consumers of rx skb time stamps */
  1237. static atomic_t netstamp_needed = ATOMIC_INIT(0);
  1238. void net_enable_timestamp(void)
  1239. {
  1240. atomic_inc(&netstamp_needed);
  1241. }
  1242. EXPORT_SYMBOL(net_enable_timestamp);
  1243. void net_disable_timestamp(void)
  1244. {
  1245. atomic_dec(&netstamp_needed);
  1246. }
  1247. EXPORT_SYMBOL(net_disable_timestamp);
  1248. static inline void net_timestamp_set(struct sk_buff *skb)
  1249. {
  1250. if (atomic_read(&netstamp_needed))
  1251. __net_timestamp(skb);
  1252. else
  1253. skb->tstamp.tv64 = 0;
  1254. }
  1255. static inline void net_timestamp_check(struct sk_buff *skb)
  1256. {
  1257. if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
  1258. __net_timestamp(skb);
  1259. }
  1260. /**
  1261. * dev_forward_skb - loopback an skb to another netif
  1262. *
  1263. * @dev: destination network device
  1264. * @skb: buffer to forward
  1265. *
  1266. * return values:
  1267. * NET_RX_SUCCESS (no congestion)
  1268. * NET_RX_DROP (packet was dropped, but freed)
  1269. *
  1270. * dev_forward_skb can be used for injecting an skb from the
  1271. * start_xmit function of one device into the receive queue
  1272. * of another device.
  1273. *
  1274. * The receiving device may be in another namespace, so
  1275. * we have to clear all information in the skb that could
  1276. * impact namespace isolation.
  1277. */
  1278. int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
  1279. {
  1280. skb_orphan(skb);
  1281. nf_reset(skb);
  1282. if (unlikely(!(dev->flags & IFF_UP) ||
  1283. (skb->len > (dev->mtu + dev->hard_header_len + VLAN_HLEN)))) {
  1284. atomic_long_inc(&dev->rx_dropped);
  1285. kfree_skb(skb);
  1286. return NET_RX_DROP;
  1287. }
  1288. skb_set_dev(skb, dev);
  1289. skb->tstamp.tv64 = 0;
  1290. skb->pkt_type = PACKET_HOST;
  1291. skb->protocol = eth_type_trans(skb, dev);
  1292. return netif_rx(skb);
  1293. }
  1294. EXPORT_SYMBOL_GPL(dev_forward_skb);
  1295. /*
  1296. * Support routine. Sends outgoing frames to any network
  1297. * taps currently in use.
  1298. */
  1299. static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  1300. {
  1301. struct packet_type *ptype;
  1302. #ifdef CONFIG_NET_CLS_ACT
  1303. if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
  1304. net_timestamp_set(skb);
  1305. #else
  1306. net_timestamp_set(skb);
  1307. #endif
  1308. rcu_read_lock();
  1309. list_for_each_entry_rcu(ptype, &ptype_all, list) {
  1310. /* Never send packets back to the socket
  1311. * they originated from - MvS (miquels@drinkel.ow.org)
  1312. */
  1313. if ((ptype->dev == dev || !ptype->dev) &&
  1314. (ptype->af_packet_priv == NULL ||
  1315. (struct sock *)ptype->af_packet_priv != skb->sk)) {
  1316. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  1317. if (!skb2)
  1318. break;
  1319. /* skb->nh should be correctly
  1320. set by sender, so that the second statement is
  1321. just protection against buggy protocols.
  1322. */
  1323. skb_reset_mac_header(skb2);
  1324. if (skb_network_header(skb2) < skb2->data ||
  1325. skb2->network_header > skb2->tail) {
  1326. if (net_ratelimit())
  1327. printk(KERN_CRIT "protocol %04x is "
  1328. "buggy, dev %s\n",
  1329. ntohs(skb2->protocol),
  1330. dev->name);
  1331. skb_reset_network_header(skb2);
  1332. }
  1333. skb2->transport_header = skb2->network_header;
  1334. skb2->pkt_type = PACKET_OUTGOING;
  1335. ptype->func(skb2, skb->dev, ptype, skb->dev);
  1336. }
  1337. }
  1338. rcu_read_unlock();
  1339. }
  1340. /*
  1341. * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
  1342. * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
  1343. */
  1344. int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
  1345. {
  1346. if (txq < 1 || txq > dev->num_tx_queues)
  1347. return -EINVAL;
  1348. if (dev->reg_state == NETREG_REGISTERED) {
  1349. ASSERT_RTNL();
  1350. if (txq < dev->real_num_tx_queues)
  1351. qdisc_reset_all_tx_gt(dev, txq);
  1352. }
  1353. dev->real_num_tx_queues = txq;
  1354. return 0;
  1355. }
  1356. EXPORT_SYMBOL(netif_set_real_num_tx_queues);
  1357. #ifdef CONFIG_RPS
  1358. /**
  1359. * netif_set_real_num_rx_queues - set actual number of RX queues used
  1360. * @dev: Network device
  1361. * @rxq: Actual number of RX queues
  1362. *
  1363. * This must be called either with the rtnl_lock held or before
  1364. * registration of the net device. Returns 0 on success, or a
  1365. * negative error code. If called before registration, it always
  1366. * succeeds.
  1367. */
  1368. int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
  1369. {
  1370. int rc;
  1371. if (rxq < 1 || rxq > dev->num_rx_queues)
  1372. return -EINVAL;
  1373. if (dev->reg_state == NETREG_REGISTERED) {
  1374. ASSERT_RTNL();
  1375. rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
  1376. rxq);
  1377. if (rc)
  1378. return rc;
  1379. }
  1380. dev->real_num_rx_queues = rxq;
  1381. return 0;
  1382. }
  1383. EXPORT_SYMBOL(netif_set_real_num_rx_queues);
  1384. #endif
  1385. static inline void __netif_reschedule(struct Qdisc *q)
  1386. {
  1387. struct softnet_data *sd;
  1388. unsigned long flags;
  1389. local_irq_save(flags);
  1390. sd = &__get_cpu_var(softnet_data);
  1391. q->next_sched = NULL;
  1392. *sd->output_queue_tailp = q;
  1393. sd->output_queue_tailp = &q->next_sched;
  1394. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  1395. local_irq_restore(flags);
  1396. }
  1397. void __netif_schedule(struct Qdisc *q)
  1398. {
  1399. if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
  1400. __netif_reschedule(q);
  1401. }
  1402. EXPORT_SYMBOL(__netif_schedule);
  1403. void dev_kfree_skb_irq(struct sk_buff *skb)
  1404. {
  1405. if (atomic_dec_and_test(&skb->users)) {
  1406. struct softnet_data *sd;
  1407. unsigned long flags;
  1408. local_irq_save(flags);
  1409. sd = &__get_cpu_var(softnet_data);
  1410. skb->next = sd->completion_queue;
  1411. sd->completion_queue = skb;
  1412. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  1413. local_irq_restore(flags);
  1414. }
  1415. }
  1416. EXPORT_SYMBOL(dev_kfree_skb_irq);
  1417. void dev_kfree_skb_any(struct sk_buff *skb)
  1418. {
  1419. if (in_irq() || irqs_disabled())
  1420. dev_kfree_skb_irq(skb);
  1421. else
  1422. dev_kfree_skb(skb);
  1423. }
  1424. EXPORT_SYMBOL(dev_kfree_skb_any);
  1425. /**
  1426. * netif_device_detach - mark device as removed
  1427. * @dev: network device
  1428. *
  1429. * Mark device as removed from system and therefore no longer available.
  1430. */
  1431. void netif_device_detach(struct net_device *dev)
  1432. {
  1433. if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
  1434. netif_running(dev)) {
  1435. netif_tx_stop_all_queues(dev);
  1436. }
  1437. }
  1438. EXPORT_SYMBOL(netif_device_detach);
  1439. /**
  1440. * netif_device_attach - mark device as attached
  1441. * @dev: network device
  1442. *
  1443. * Mark device as attached from system and restart if needed.
  1444. */
  1445. void netif_device_attach(struct net_device *dev)
  1446. {
  1447. if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
  1448. netif_running(dev)) {
  1449. netif_tx_wake_all_queues(dev);
  1450. __netdev_watchdog_up(dev);
  1451. }
  1452. }
  1453. EXPORT_SYMBOL(netif_device_attach);
  1454. static bool can_checksum_protocol(unsigned long features, __be16 protocol)
  1455. {
  1456. return ((features & NETIF_F_NO_CSUM) ||
  1457. ((features & NETIF_F_V4_CSUM) &&
  1458. protocol == htons(ETH_P_IP)) ||
  1459. ((features & NETIF_F_V6_CSUM) &&
  1460. protocol == htons(ETH_P_IPV6)) ||
  1461. ((features & NETIF_F_FCOE_CRC) &&
  1462. protocol == htons(ETH_P_FCOE)));
  1463. }
  1464. static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
  1465. {
  1466. __be16 protocol = skb->protocol;
  1467. int features = dev->features;
  1468. if (vlan_tx_tag_present(skb)) {
  1469. features &= dev->vlan_features;
  1470. } else if (protocol == htons(ETH_P_8021Q)) {
  1471. struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
  1472. protocol = veh->h_vlan_encapsulated_proto;
  1473. features &= dev->vlan_features;
  1474. }
  1475. return can_checksum_protocol(features, protocol);
  1476. }
  1477. /**
  1478. * skb_dev_set -- assign a new device to a buffer
  1479. * @skb: buffer for the new device
  1480. * @dev: network device
  1481. *
  1482. * If an skb is owned by a device already, we have to reset
  1483. * all data private to the namespace a device belongs to
  1484. * before assigning it a new device.
  1485. */
  1486. #ifdef CONFIG_NET_NS
  1487. void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
  1488. {
  1489. skb_dst_drop(skb);
  1490. if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
  1491. secpath_reset(skb);
  1492. nf_reset(skb);
  1493. skb_init_secmark(skb);
  1494. skb->mark = 0;
  1495. skb->priority = 0;
  1496. skb->nf_trace = 0;
  1497. skb->ipvs_property = 0;
  1498. #ifdef CONFIG_NET_SCHED
  1499. skb->tc_index = 0;
  1500. #endif
  1501. }
  1502. skb->dev = dev;
  1503. }
  1504. EXPORT_SYMBOL(skb_set_dev);
  1505. #endif /* CONFIG_NET_NS */
  1506. /*
  1507. * Invalidate hardware checksum when packet is to be mangled, and
  1508. * complete checksum manually on outgoing path.
  1509. */
  1510. int skb_checksum_help(struct sk_buff *skb)
  1511. {
  1512. __wsum csum;
  1513. int ret = 0, offset;
  1514. if (skb->ip_summed == CHECKSUM_COMPLETE)
  1515. goto out_set_summed;
  1516. if (unlikely(skb_shinfo(skb)->gso_size)) {
  1517. /* Let GSO fix up the checksum. */
  1518. goto out_set_summed;
  1519. }
  1520. offset = skb->csum_start - skb_headroom(skb);
  1521. BUG_ON(offset >= skb_headlen(skb));
  1522. csum = skb_checksum(skb, offset, skb->len - offset, 0);
  1523. offset += skb->csum_offset;
  1524. BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
  1525. if (skb_cloned(skb) &&
  1526. !skb_clone_writable(skb, offset + sizeof(__sum16))) {
  1527. ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  1528. if (ret)
  1529. goto out;
  1530. }
  1531. *(__sum16 *)(skb->data + offset) = csum_fold(csum);
  1532. out_set_summed:
  1533. skb->ip_summed = CHECKSUM_NONE;
  1534. out:
  1535. return ret;
  1536. }
  1537. EXPORT_SYMBOL(skb_checksum_help);
  1538. /**
  1539. * skb_gso_segment - Perform segmentation on skb.
  1540. * @skb: buffer to segment
  1541. * @features: features for the output path (see dev->features)
  1542. *
  1543. * This function segments the given skb and returns a list of segments.
  1544. *
  1545. * It may return NULL if the skb requires no segmentation. This is
  1546. * only possible when GSO is used for verifying header integrity.
  1547. */
  1548. struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
  1549. {
  1550. struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
  1551. struct packet_type *ptype;
  1552. __be16 type = skb->protocol;
  1553. int vlan_depth = ETH_HLEN;
  1554. int err;
  1555. while (type == htons(ETH_P_8021Q)) {
  1556. struct vlan_hdr *vh;
  1557. if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
  1558. return ERR_PTR(-EINVAL);
  1559. vh = (struct vlan_hdr *)(skb->data + vlan_depth);
  1560. type = vh->h_vlan_encapsulated_proto;
  1561. vlan_depth += VLAN_HLEN;
  1562. }
  1563. skb_reset_mac_header(skb);
  1564. skb->mac_len = skb->network_header - skb->mac_header;
  1565. __skb_pull(skb, skb->mac_len);
  1566. if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
  1567. struct net_device *dev = skb->dev;
  1568. struct ethtool_drvinfo info = {};
  1569. if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
  1570. dev->ethtool_ops->get_drvinfo(dev, &info);
  1571. WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
  1572. info.driver, dev ? dev->features : 0L,
  1573. skb->sk ? skb->sk->sk_route_caps : 0L,
  1574. skb->len, skb->data_len, skb->ip_summed);
  1575. if (skb_header_cloned(skb) &&
  1576. (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
  1577. return ERR_PTR(err);
  1578. }
  1579. rcu_read_lock();
  1580. list_for_each_entry_rcu(ptype,
  1581. &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
  1582. if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
  1583. if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
  1584. err = ptype->gso_send_check(skb);
  1585. segs = ERR_PTR(err);
  1586. if (err || skb_gso_ok(skb, features))
  1587. break;
  1588. __skb_push(skb, (skb->data -
  1589. skb_network_header(skb)));
  1590. }
  1591. segs = ptype->gso_segment(skb, features);
  1592. break;
  1593. }
  1594. }
  1595. rcu_read_unlock();
  1596. __skb_push(skb, skb->data - skb_mac_header(skb));
  1597. return segs;
  1598. }
  1599. EXPORT_SYMBOL(skb_gso_segment);
  1600. /* Take action when hardware reception checksum errors are detected. */
  1601. #ifdef CONFIG_BUG
  1602. void netdev_rx_csum_fault(struct net_device *dev)
  1603. {
  1604. if (net_ratelimit()) {
  1605. printk(KERN_ERR "%s: hw csum failure.\n",
  1606. dev ? dev->name : "<unknown>");
  1607. dump_stack();
  1608. }
  1609. }
  1610. EXPORT_SYMBOL(netdev_rx_csum_fault);
  1611. #endif
  1612. /* Actually, we should eliminate this check as soon as we know, that:
  1613. * 1. IOMMU is present and allows to map all the memory.
  1614. * 2. No high memory really exists on this machine.
  1615. */
  1616. static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
  1617. {
  1618. #ifdef CONFIG_HIGHMEM
  1619. int i;
  1620. if (!(dev->features & NETIF_F_HIGHDMA)) {
  1621. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
  1622. if (PageHighMem(skb_shinfo(skb)->frags[i].page))
  1623. return 1;
  1624. }
  1625. if (PCI_DMA_BUS_IS_PHYS) {
  1626. struct device *pdev = dev->dev.parent;
  1627. if (!pdev)
  1628. return 0;
  1629. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1630. dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
  1631. if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
  1632. return 1;
  1633. }
  1634. }
  1635. #endif
  1636. return 0;
  1637. }
  1638. struct dev_gso_cb {
  1639. void (*destructor)(struct sk_buff *skb);
  1640. };
  1641. #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
  1642. static void dev_gso_skb_destructor(struct sk_buff *skb)
  1643. {
  1644. struct dev_gso_cb *cb;
  1645. do {
  1646. struct sk_buff *nskb = skb->next;
  1647. skb->next = nskb->next;
  1648. nskb->next = NULL;
  1649. kfree_skb(nskb);
  1650. } while (skb->next);
  1651. cb = DEV_GSO_CB(skb);
  1652. if (cb->destructor)
  1653. cb->destructor(skb);
  1654. }
  1655. /**
  1656. * dev_gso_segment - Perform emulated hardware segmentation on skb.
  1657. * @skb: buffer to segment
  1658. *
  1659. * This function segments the given skb and stores the list of segments
  1660. * in skb->next.
  1661. */
  1662. static int dev_gso_segment(struct sk_buff *skb)
  1663. {
  1664. struct net_device *dev = skb->dev;
  1665. struct sk_buff *segs;
  1666. int features = dev->features & ~(illegal_highdma(dev, skb) ?
  1667. NETIF_F_SG : 0);
  1668. segs = skb_gso_segment(skb, features);
  1669. /* Verifying header integrity only. */
  1670. if (!segs)
  1671. return 0;
  1672. if (IS_ERR(segs))
  1673. return PTR_ERR(segs);
  1674. skb->next = segs;
  1675. DEV_GSO_CB(skb)->destructor = skb->destructor;
  1676. skb->destructor = dev_gso_skb_destructor;
  1677. return 0;
  1678. }
  1679. /*
  1680. * Try to orphan skb early, right before transmission by the device.
  1681. * We cannot orphan skb if tx timestamp is requested or the sk-reference
  1682. * is needed on driver level for other reasons, e.g. see net/can/raw.c
  1683. */
  1684. static inline void skb_orphan_try(struct sk_buff *skb)
  1685. {
  1686. struct sock *sk = skb->sk;
  1687. if (sk && !skb_shinfo(skb)->tx_flags) {
  1688. /* skb_tx_hash() wont be able to get sk.
  1689. * We copy sk_hash into skb->rxhash
  1690. */
  1691. if (!skb->rxhash)
  1692. skb->rxhash = sk->sk_hash;
  1693. skb_orphan(skb);
  1694. }
  1695. }
  1696. int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev)
  1697. {
  1698. __be16 protocol = skb->protocol;
  1699. if (protocol == htons(ETH_P_8021Q)) {
  1700. struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
  1701. protocol = veh->h_vlan_encapsulated_proto;
  1702. } else if (!skb->vlan_tci)
  1703. return dev->features;
  1704. if (protocol != htons(ETH_P_8021Q))
  1705. return dev->features & dev->vlan_features;
  1706. else
  1707. return 0;
  1708. }
  1709. /*
  1710. * Returns true if either:
  1711. * 1. skb has frag_list and the device doesn't support FRAGLIST, or
  1712. * 2. skb is fragmented and the device does not support SG, or if
  1713. * at least one of fragments is in highmem and device does not
  1714. * support DMA from it.
  1715. */
  1716. static inline int skb_needs_linearize(struct sk_buff *skb,
  1717. struct net_device *dev)
  1718. {
  1719. if (skb_is_nonlinear(skb)) {
  1720. int features = dev->features;
  1721. if (vlan_tx_tag_present(skb))
  1722. features &= dev->vlan_features;
  1723. return (skb_has_frag_list(skb) &&
  1724. !(features & NETIF_F_FRAGLIST)) ||
  1725. (skb_shinfo(skb)->nr_frags &&
  1726. (!(features & NETIF_F_SG) ||
  1727. illegal_highdma(dev, skb)));
  1728. }
  1729. return 0;
  1730. }
  1731. int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
  1732. struct netdev_queue *txq)
  1733. {
  1734. const struct net_device_ops *ops = dev->netdev_ops;
  1735. int rc = NETDEV_TX_OK;
  1736. if (likely(!skb->next)) {
  1737. if (!list_empty(&ptype_all))
  1738. dev_queue_xmit_nit(skb, dev);
  1739. /*
  1740. * If device doesnt need skb->dst, release it right now while
  1741. * its hot in this cpu cache
  1742. */
  1743. if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
  1744. skb_dst_drop(skb);
  1745. skb_orphan_try(skb);
  1746. if (vlan_tx_tag_present(skb) &&
  1747. !(dev->features & NETIF_F_HW_VLAN_TX)) {
  1748. skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
  1749. if (unlikely(!skb))
  1750. goto out;
  1751. skb->vlan_tci = 0;
  1752. }
  1753. if (netif_needs_gso(dev, skb)) {
  1754. if (unlikely(dev_gso_segment(skb)))
  1755. goto out_kfree_skb;
  1756. if (skb->next)
  1757. goto gso;
  1758. } else {
  1759. if (skb_needs_linearize(skb, dev) &&
  1760. __skb_linearize(skb))
  1761. goto out_kfree_skb;
  1762. /* If packet is not checksummed and device does not
  1763. * support checksumming for this protocol, complete
  1764. * checksumming here.
  1765. */
  1766. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1767. skb_set_transport_header(skb, skb->csum_start -
  1768. skb_headroom(skb));
  1769. if (!dev_can_checksum(dev, skb) &&
  1770. skb_checksum_help(skb))
  1771. goto out_kfree_skb;
  1772. }
  1773. }
  1774. rc = ops->ndo_start_xmit(skb, dev);
  1775. trace_net_dev_xmit(skb, rc);
  1776. if (rc == NETDEV_TX_OK)
  1777. txq_trans_update(txq);
  1778. return rc;
  1779. }
  1780. gso:
  1781. do {
  1782. struct sk_buff *nskb = skb->next;
  1783. skb->next = nskb->next;
  1784. nskb->next = NULL;
  1785. /*
  1786. * If device doesnt need nskb->dst, release it right now while
  1787. * its hot in this cpu cache
  1788. */
  1789. if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
  1790. skb_dst_drop(nskb);
  1791. rc = ops->ndo_start_xmit(nskb, dev);
  1792. trace_net_dev_xmit(nskb, rc);
  1793. if (unlikely(rc != NETDEV_TX_OK)) {
  1794. if (rc & ~NETDEV_TX_MASK)
  1795. goto out_kfree_gso_skb;
  1796. nskb->next = skb->next;
  1797. skb->next = nskb;
  1798. return rc;
  1799. }
  1800. txq_trans_update(txq);
  1801. if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
  1802. return NETDEV_TX_BUSY;
  1803. } while (skb->next);
  1804. out_kfree_gso_skb:
  1805. if (likely(skb->next == NULL))
  1806. skb->destructor = DEV_GSO_CB(skb)->destructor;
  1807. out_kfree_skb:
  1808. kfree_skb(skb);
  1809. out:
  1810. return rc;
  1811. }
  1812. static u32 hashrnd __read_mostly;
  1813. u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
  1814. {
  1815. u32 hash;
  1816. if (skb_rx_queue_recorded(skb)) {
  1817. hash = skb_get_rx_queue(skb);
  1818. while (unlikely(hash >= dev->real_num_tx_queues))
  1819. hash -= dev->real_num_tx_queues;
  1820. return hash;
  1821. }
  1822. if (skb->sk && skb->sk->sk_hash)
  1823. hash = skb->sk->sk_hash;
  1824. else
  1825. hash = (__force u16) skb->protocol ^ skb->rxhash;
  1826. hash = jhash_1word(hash, hashrnd);
  1827. return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
  1828. }
  1829. EXPORT_SYMBOL(skb_tx_hash);
  1830. static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
  1831. {
  1832. if (unlikely(queue_index >= dev->real_num_tx_queues)) {
  1833. if (net_ratelimit()) {
  1834. pr_warning("%s selects TX queue %d, but "
  1835. "real number of TX queues is %d\n",
  1836. dev->name, queue_index, dev->real_num_tx_queues);
  1837. }
  1838. return 0;
  1839. }
  1840. return queue_index;
  1841. }
  1842. static struct netdev_queue *dev_pick_tx(struct net_device *dev,
  1843. struct sk_buff *skb)
  1844. {
  1845. int queue_index;
  1846. const struct net_device_ops *ops = dev->netdev_ops;
  1847. if (ops->ndo_select_queue) {
  1848. queue_index = ops->ndo_select_queue(dev, skb);
  1849. queue_index = dev_cap_txqueue(dev, queue_index);
  1850. } else {
  1851. struct sock *sk = skb->sk;
  1852. queue_index = sk_tx_queue_get(sk);
  1853. if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) {
  1854. queue_index = 0;
  1855. if (dev->real_num_tx_queues > 1)
  1856. queue_index = skb_tx_hash(dev, skb);
  1857. if (sk) {
  1858. struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
  1859. if (dst && skb_dst(skb) == dst)
  1860. sk_tx_queue_set(sk, queue_index);
  1861. }
  1862. }
  1863. }
  1864. skb_set_queue_mapping(skb, queue_index);
  1865. return netdev_get_tx_queue(dev, queue_index);
  1866. }
  1867. static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
  1868. struct net_device *dev,
  1869. struct netdev_queue *txq)
  1870. {
  1871. spinlock_t *root_lock = qdisc_lock(q);
  1872. bool contended = qdisc_is_running(q);
  1873. int rc;
  1874. /*
  1875. * Heuristic to force contended enqueues to serialize on a
  1876. * separate lock before trying to get qdisc main lock.
  1877. * This permits __QDISC_STATE_RUNNING owner to get the lock more often
  1878. * and dequeue packets faster.
  1879. */
  1880. if (unlikely(contended))
  1881. spin_lock(&q->busylock);
  1882. spin_lock(root_lock);
  1883. if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
  1884. kfree_skb(skb);
  1885. rc = NET_XMIT_DROP;
  1886. } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
  1887. qdisc_run_begin(q)) {
  1888. /*
  1889. * This is a work-conserving queue; there are no old skbs
  1890. * waiting to be sent out; and the qdisc is not running -
  1891. * xmit the skb directly.
  1892. */
  1893. if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
  1894. skb_dst_force(skb);
  1895. __qdisc_update_bstats(q, skb->len);
  1896. if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
  1897. if (unlikely(contended)) {
  1898. spin_unlock(&q->busylock);
  1899. contended = false;
  1900. }
  1901. __qdisc_run(q);
  1902. } else
  1903. qdisc_run_end(q);
  1904. rc = NET_XMIT_SUCCESS;
  1905. } else {
  1906. skb_dst_force(skb);
  1907. rc = qdisc_enqueue_root(skb, q);
  1908. if (qdisc_run_begin(q)) {
  1909. if (unlikely(contended)) {
  1910. spin_unlock(&q->busylock);
  1911. contended = false;
  1912. }
  1913. __qdisc_run(q);
  1914. }
  1915. }
  1916. spin_unlock(root_lock);
  1917. if (unlikely(contended))
  1918. spin_unlock(&q->busylock);
  1919. return rc;
  1920. }
  1921. static DEFINE_PER_CPU(int, xmit_recursion);
  1922. #define RECURSION_LIMIT 10
  1923. /**
  1924. * dev_queue_xmit - transmit a buffer
  1925. * @skb: buffer to transmit
  1926. *
  1927. * Queue a buffer for transmission to a network device. The caller must
  1928. * have set the device and priority and built the buffer before calling
  1929. * this function. The function can be called from an interrupt.
  1930. *
  1931. * A negative errno code is returned on a failure. A success does not
  1932. * guarantee the frame will be transmitted as it may be dropped due
  1933. * to congestion or traffic shaping.
  1934. *
  1935. * -----------------------------------------------------------------------------------
  1936. * I notice this method can also return errors from the queue disciplines,
  1937. * including NET_XMIT_DROP, which is a positive value. So, errors can also
  1938. * be positive.
  1939. *
  1940. * Regardless of the return value, the skb is consumed, so it is currently
  1941. * difficult to retry a send to this method. (You can bump the ref count
  1942. * before sending to hold a reference for retry if you are careful.)
  1943. *
  1944. * When calling this method, interrupts MUST be enabled. This is because
  1945. * the BH enable code must have IRQs enabled so that it will not deadlock.
  1946. * --BLG
  1947. */
  1948. int dev_queue_xmit(struct sk_buff *skb)
  1949. {
  1950. struct net_device *dev = skb->dev;
  1951. struct netdev_queue *txq;
  1952. struct Qdisc *q;
  1953. int rc = -ENOMEM;
  1954. /* Disable soft irqs for various locks below. Also
  1955. * stops preemption for RCU.
  1956. */
  1957. rcu_read_lock_bh();
  1958. txq = dev_pick_tx(dev, skb);
  1959. q = rcu_dereference_bh(txq->qdisc);
  1960. #ifdef CONFIG_NET_CLS_ACT
  1961. skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
  1962. #endif
  1963. trace_net_dev_queue(skb);
  1964. if (q->enqueue) {
  1965. rc = __dev_xmit_skb(skb, q, dev, txq);
  1966. goto out;
  1967. }
  1968. /* The device has no queue. Common case for software devices:
  1969. loopback, all the sorts of tunnels...
  1970. Really, it is unlikely that netif_tx_lock protection is necessary
  1971. here. (f.e. loopback and IP tunnels are clean ignoring statistics
  1972. counters.)
  1973. However, it is possible, that they rely on protection
  1974. made by us here.
  1975. Check this and shot the lock. It is not prone from deadlocks.
  1976. Either shot noqueue qdisc, it is even simpler 8)
  1977. */
  1978. if (dev->flags & IFF_UP) {
  1979. int cpu = smp_processor_id(); /* ok because BHs are off */
  1980. if (txq->xmit_lock_owner != cpu) {
  1981. if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
  1982. goto recursion_alert;
  1983. HARD_TX_LOCK(dev, txq, cpu);
  1984. if (!netif_tx_queue_stopped(txq)) {
  1985. __this_cpu_inc(xmit_recursion);
  1986. rc = dev_hard_start_xmit(skb, dev, txq);
  1987. __this_cpu_dec(xmit_recursion);
  1988. if (dev_xmit_complete(rc)) {
  1989. HARD_TX_UNLOCK(dev, txq);
  1990. goto out;
  1991. }
  1992. }
  1993. HARD_TX_UNLOCK(dev, txq);
  1994. if (net_ratelimit())
  1995. printk(KERN_CRIT "Virtual device %s asks to "
  1996. "queue packet!\n", dev->name);
  1997. } else {
  1998. /* Recursion is detected! It is possible,
  1999. * unfortunately
  2000. */
  2001. recursion_alert:
  2002. if (net_ratelimit())
  2003. printk(KERN_CRIT "Dead loop on virtual device "
  2004. "%s, fix it urgently!\n", dev->name);
  2005. }
  2006. }
  2007. rc = -ENETDOWN;
  2008. rcu_read_unlock_bh();
  2009. kfree_skb(skb);
  2010. return rc;
  2011. out:
  2012. rcu_read_unlock_bh();
  2013. return rc;
  2014. }
  2015. EXPORT_SYMBOL(dev_queue_xmit);
  2016. /*=======================================================================
  2017. Receiver routines
  2018. =======================================================================*/
  2019. int netdev_max_backlog __read_mostly = 1000;
  2020. int netdev_tstamp_prequeue __read_mostly = 1;
  2021. int netdev_budget __read_mostly = 300;
  2022. int weight_p __read_mostly = 64; /* old backlog weight */
  2023. /* Called with irq disabled */
  2024. static inline void ____napi_schedule(struct softnet_data *sd,
  2025. struct napi_struct *napi)
  2026. {
  2027. list_add_tail(&napi->poll_list, &sd->poll_list);
  2028. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  2029. }
  2030. /*
  2031. * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
  2032. * and src/dst port numbers. Returns a non-zero hash number on success
  2033. * and 0 on failure.
  2034. */
  2035. __u32 __skb_get_rxhash(struct sk_buff *skb)
  2036. {
  2037. int nhoff, hash = 0, poff;
  2038. struct ipv6hdr *ip6;
  2039. struct iphdr *ip;
  2040. u8 ip_proto;
  2041. u32 addr1, addr2, ihl;
  2042. union {
  2043. u32 v32;
  2044. u16 v16[2];
  2045. } ports;
  2046. nhoff = skb_network_offset(skb);
  2047. switch (skb->protocol) {
  2048. case __constant_htons(ETH_P_IP):
  2049. if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
  2050. goto done;
  2051. ip = (struct iphdr *) (skb->data + nhoff);
  2052. if (ip->frag_off & htons(IP_MF | IP_OFFSET))
  2053. ip_proto = 0;
  2054. else
  2055. ip_proto = ip->protocol;
  2056. addr1 = (__force u32) ip->saddr;
  2057. addr2 = (__force u32) ip->daddr;
  2058. ihl = ip->ihl;
  2059. break;
  2060. case __constant_htons(ETH_P_IPV6):
  2061. if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
  2062. goto done;
  2063. ip6 = (struct ipv6hdr *) (skb->data + nhoff);
  2064. ip_proto = ip6->nexthdr;
  2065. addr1 = (__force u32) ip6->saddr.s6_addr32[3];
  2066. addr2 = (__force u32) ip6->daddr.s6_addr32[3];
  2067. ihl = (40 >> 2);
  2068. break;
  2069. default:
  2070. goto done;
  2071. }
  2072. ports.v32 = 0;
  2073. poff = proto_ports_offset(ip_proto);
  2074. if (poff >= 0) {
  2075. nhoff += ihl * 4 + poff;
  2076. if (pskb_may_pull(skb, nhoff + 4)) {
  2077. ports.v32 = * (__force u32 *) (skb->data + nhoff);
  2078. if (ports.v16[1] < ports.v16[0])
  2079. swap(ports.v16[0], ports.v16[1]);
  2080. }
  2081. }
  2082. /* get a consistent hash (same value on both flow directions) */
  2083. if (addr2 < addr1)
  2084. swap(addr1, addr2);
  2085. hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
  2086. if (!hash)
  2087. hash = 1;
  2088. done:
  2089. return hash;
  2090. }
  2091. EXPORT_SYMBOL(__skb_get_rxhash);
  2092. #ifdef CONFIG_RPS
  2093. /* One global table that all flow-based protocols share. */
  2094. struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
  2095. EXPORT_SYMBOL(rps_sock_flow_table);
  2096. /*
  2097. * get_rps_cpu is called from netif_receive_skb and returns the target
  2098. * CPU from the RPS map of the receiving queue for a given skb.
  2099. * rcu_read_lock must be held on entry.
  2100. */
  2101. static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
  2102. struct rps_dev_flow **rflowp)
  2103. {
  2104. struct netdev_rx_queue *rxqueue;
  2105. struct rps_map *map;
  2106. struct rps_dev_flow_table *flow_table;
  2107. struct rps_sock_flow_table *sock_flow_table;
  2108. int cpu = -1;
  2109. u16 tcpu;
  2110. if (skb_rx_queue_recorded(skb)) {
  2111. u16 index = skb_get_rx_queue(skb);
  2112. if (unlikely(index >= dev->real_num_rx_queues)) {
  2113. WARN_ONCE(dev->real_num_rx_queues > 1,
  2114. "%s received packet on queue %u, but number "
  2115. "of RX queues is %u\n",
  2116. dev->name, index, dev->real_num_rx_queues);
  2117. goto done;
  2118. }
  2119. rxqueue = dev->_rx + index;
  2120. } else
  2121. rxqueue = dev->_rx;
  2122. map = rcu_dereference(rxqueue->rps_map);
  2123. if (map) {
  2124. if (map->len == 1) {
  2125. tcpu = map->cpus[0];
  2126. if (cpu_online(tcpu))
  2127. cpu = tcpu;
  2128. goto done;
  2129. }
  2130. } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
  2131. goto done;
  2132. }
  2133. skb_reset_network_header(skb);
  2134. if (!skb_get_rxhash(skb))
  2135. goto done;
  2136. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  2137. sock_flow_table = rcu_dereference(rps_sock_flow_table);
  2138. if (flow_table && sock_flow_table) {
  2139. u16 next_cpu;
  2140. struct rps_dev_flow *rflow;
  2141. rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
  2142. tcpu = rflow->cpu;
  2143. next_cpu = sock_flow_table->ents[skb->rxhash &
  2144. sock_flow_table->mask];
  2145. /*
  2146. * If the desired CPU (where last recvmsg was done) is
  2147. * different from current CPU (one in the rx-queue flow
  2148. * table entry), switch if one of the following holds:
  2149. * - Current CPU is unset (equal to RPS_NO_CPU).
  2150. * - Current CPU is offline.
  2151. * - The current CPU's queue tail has advanced beyond the
  2152. * last packet that was enqueued using this table entry.
  2153. * This guarantees that all previous packets for the flow
  2154. * have been dequeued, thus preserving in order delivery.
  2155. */
  2156. if (unlikely(tcpu != next_cpu) &&
  2157. (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
  2158. ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
  2159. rflow->last_qtail)) >= 0)) {
  2160. tcpu = rflow->cpu = next_cpu;
  2161. if (tcpu != RPS_NO_CPU)
  2162. rflow->last_qtail = per_cpu(softnet_data,
  2163. tcpu).input_queue_head;
  2164. }
  2165. if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
  2166. *rflowp = rflow;
  2167. cpu = tcpu;
  2168. goto done;
  2169. }
  2170. }
  2171. if (map) {
  2172. tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
  2173. if (cpu_online(tcpu)) {
  2174. cpu = tcpu;
  2175. goto done;
  2176. }
  2177. }
  2178. done:
  2179. return cpu;
  2180. }
  2181. /* Called from hardirq (IPI) context */
  2182. static void rps_trigger_softirq(void *data)
  2183. {
  2184. struct softnet_data *sd = data;
  2185. ____napi_schedule(sd, &sd->backlog);
  2186. sd->received_rps++;
  2187. }
  2188. #endif /* CONFIG_RPS */
  2189. /*
  2190. * Check if this softnet_data structure is another cpu one
  2191. * If yes, queue it to our IPI list and return 1
  2192. * If no, return 0
  2193. */
  2194. static int rps_ipi_queued(struct softnet_data *sd)
  2195. {
  2196. #ifdef CONFIG_RPS
  2197. struct softnet_data *mysd = &__get_cpu_var(softnet_data);
  2198. if (sd != mysd) {
  2199. sd->rps_ipi_next = mysd->rps_ipi_list;
  2200. mysd->rps_ipi_list = sd;
  2201. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  2202. return 1;
  2203. }
  2204. #endif /* CONFIG_RPS */
  2205. return 0;
  2206. }
  2207. /*
  2208. * enqueue_to_backlog is called to queue an skb to a per CPU backlog
  2209. * queue (may be a remote CPU queue).
  2210. */
  2211. static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
  2212. unsigned int *qtail)
  2213. {
  2214. struct softnet_data *sd;
  2215. unsigned long flags;
  2216. sd = &per_cpu(softnet_data, cpu);
  2217. local_irq_save(flags);
  2218. rps_lock(sd);
  2219. if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
  2220. if (skb_queue_len(&sd->input_pkt_queue)) {
  2221. enqueue:
  2222. __skb_queue_tail(&sd->input_pkt_queue, skb);
  2223. input_queue_tail_incr_save(sd, qtail);
  2224. rps_unlock(sd);
  2225. local_irq_restore(flags);
  2226. return NET_RX_SUCCESS;
  2227. }
  2228. /* Schedule NAPI for backlog device
  2229. * We can use non atomic operation since we own the queue lock
  2230. */
  2231. if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
  2232. if (!rps_ipi_queued(sd))
  2233. ____napi_schedule(sd, &sd->backlog);
  2234. }
  2235. goto enqueue;
  2236. }
  2237. sd->dropped++;
  2238. rps_unlock(sd);
  2239. local_irq_restore(flags);
  2240. atomic_long_inc(&skb->dev->rx_dropped);
  2241. kfree_skb(skb);
  2242. return NET_RX_DROP;
  2243. }
  2244. /**
  2245. * netif_rx - post buffer to the network code
  2246. * @skb: buffer to post
  2247. *
  2248. * This function receives a packet from a device driver and queues it for
  2249. * the upper (protocol) levels to process. It always succeeds. The buffer
  2250. * may be dropped during processing for congestion control or by the
  2251. * protocol layers.
  2252. *
  2253. * return values:
  2254. * NET_RX_SUCCESS (no congestion)
  2255. * NET_RX_DROP (packet was dropped)
  2256. *
  2257. */
  2258. int netif_rx(struct sk_buff *skb)
  2259. {
  2260. int ret;
  2261. /* if netpoll wants it, pretend we never saw it */
  2262. if (netpoll_rx(skb))
  2263. return NET_RX_DROP;
  2264. if (netdev_tstamp_prequeue)
  2265. net_timestamp_check(skb);
  2266. trace_netif_rx(skb);
  2267. #ifdef CONFIG_RPS
  2268. {
  2269. struct rps_dev_flow voidflow, *rflow = &voidflow;
  2270. int cpu;
  2271. preempt_disable();
  2272. rcu_read_lock();
  2273. cpu = get_rps_cpu(skb->dev, skb, &rflow);
  2274. if (cpu < 0)
  2275. cpu = smp_processor_id();
  2276. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  2277. rcu_read_unlock();
  2278. preempt_enable();
  2279. }
  2280. #else
  2281. {
  2282. unsigned int qtail;
  2283. ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
  2284. put_cpu();
  2285. }
  2286. #endif
  2287. return ret;
  2288. }
  2289. EXPORT_SYMBOL(netif_rx);
  2290. int netif_rx_ni(struct sk_buff *skb)
  2291. {
  2292. int err;
  2293. preempt_disable();
  2294. err = netif_rx(skb);
  2295. if (local_softirq_pending())
  2296. do_softirq();
  2297. preempt_enable();
  2298. return err;
  2299. }
  2300. EXPORT_SYMBOL(netif_rx_ni);
  2301. static void net_tx_action(struct softirq_action *h)
  2302. {
  2303. struct softnet_data *sd = &__get_cpu_var(softnet_data);
  2304. if (sd->completion_queue) {
  2305. struct sk_buff *clist;
  2306. local_irq_disable();
  2307. clist = sd->completion_queue;
  2308. sd->completion_queue = NULL;
  2309. local_irq_enable();
  2310. while (clist) {
  2311. struct sk_buff *skb = clist;
  2312. clist = clist->next;
  2313. WARN_ON(atomic_read(&skb->users));
  2314. trace_kfree_skb(skb, net_tx_action);
  2315. __kfree_skb(skb);
  2316. }
  2317. }
  2318. if (sd->output_queue) {
  2319. struct Qdisc *head;
  2320. local_irq_disable();
  2321. head = sd->output_queue;
  2322. sd->output_queue = NULL;
  2323. sd->output_queue_tailp = &sd->output_queue;
  2324. local_irq_enable();
  2325. while (head) {
  2326. struct Qdisc *q = head;
  2327. spinlock_t *root_lock;
  2328. head = head->next_sched;
  2329. root_lock = qdisc_lock(q);
  2330. if (spin_trylock(root_lock)) {
  2331. smp_mb__before_clear_bit();
  2332. clear_bit(__QDISC_STATE_SCHED,
  2333. &q->state);
  2334. qdisc_run(q);
  2335. spin_unlock(root_lock);
  2336. } else {
  2337. if (!test_bit(__QDISC_STATE_DEACTIVATED,
  2338. &q->state)) {
  2339. __netif_reschedule(q);
  2340. } else {
  2341. smp_mb__before_clear_bit();
  2342. clear_bit(__QDISC_STATE_SCHED,
  2343. &q->state);
  2344. }
  2345. }
  2346. }
  2347. }
  2348. }
  2349. static inline int deliver_skb(struct sk_buff *skb,
  2350. struct packet_type *pt_prev,
  2351. struct net_device *orig_dev)
  2352. {
  2353. atomic_inc(&skb->users);
  2354. return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  2355. }
  2356. #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
  2357. (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
  2358. /* This hook is defined here for ATM LANE */
  2359. int (*br_fdb_test_addr_hook)(struct net_device *dev,
  2360. unsigned char *addr) __read_mostly;
  2361. EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
  2362. #endif
  2363. #ifdef CONFIG_NET_CLS_ACT
  2364. /* TODO: Maybe we should just force sch_ingress to be compiled in
  2365. * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
  2366. * a compare and 2 stores extra right now if we dont have it on
  2367. * but have CONFIG_NET_CLS_ACT
  2368. * NOTE: This doesnt stop any functionality; if you dont have
  2369. * the ingress scheduler, you just cant add policies on ingress.
  2370. *
  2371. */
  2372. static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
  2373. {
  2374. struct net_device *dev = skb->dev;
  2375. u32 ttl = G_TC_RTTL(skb->tc_verd);
  2376. int result = TC_ACT_OK;
  2377. struct Qdisc *q;
  2378. if (unlikely(MAX_RED_LOOP < ttl++)) {
  2379. if (net_ratelimit())
  2380. pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
  2381. skb->skb_iif, dev->ifindex);
  2382. return TC_ACT_SHOT;
  2383. }
  2384. skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
  2385. skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
  2386. q = rxq->qdisc;
  2387. if (q != &noop_qdisc) {
  2388. spin_lock(qdisc_lock(q));
  2389. if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
  2390. result = qdisc_enqueue_root(skb, q);
  2391. spin_unlock(qdisc_lock(q));
  2392. }
  2393. return result;
  2394. }
  2395. static inline struct sk_buff *handle_ing(struct sk_buff *skb,
  2396. struct packet_type **pt_prev,
  2397. int *ret, struct net_device *orig_dev)
  2398. {
  2399. struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
  2400. if (!rxq || rxq->qdisc == &noop_qdisc)
  2401. goto out;
  2402. if (*pt_prev) {
  2403. *ret = deliver_skb(skb, *pt_prev, orig_dev);
  2404. *pt_prev = NULL;
  2405. }
  2406. switch (ing_filter(skb, rxq)) {
  2407. case TC_ACT_SHOT:
  2408. case TC_ACT_STOLEN:
  2409. kfree_skb(skb);
  2410. return NULL;
  2411. }
  2412. out:
  2413. skb->tc_verd = 0;
  2414. return skb;
  2415. }
  2416. #endif
  2417. /**
  2418. * netdev_rx_handler_register - register receive handler
  2419. * @dev: device to register a handler for
  2420. * @rx_handler: receive handler to register
  2421. * @rx_handler_data: data pointer that is used by rx handler
  2422. *
  2423. * Register a receive hander for a device. This handler will then be
  2424. * called from __netif_receive_skb. A negative errno code is returned
  2425. * on a failure.
  2426. *
  2427. * The caller must hold the rtnl_mutex.
  2428. */
  2429. int netdev_rx_handler_register(struct net_device *dev,
  2430. rx_handler_func_t *rx_handler,
  2431. void *rx_handler_data)
  2432. {
  2433. ASSERT_RTNL();
  2434. if (dev->rx_handler)
  2435. return -EBUSY;
  2436. rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
  2437. rcu_assign_pointer(dev->rx_handler, rx_handler);
  2438. return 0;
  2439. }
  2440. EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
  2441. /**
  2442. * netdev_rx_handler_unregister - unregister receive handler
  2443. * @dev: device to unregister a handler from
  2444. *
  2445. * Unregister a receive hander from a device.
  2446. *
  2447. * The caller must hold the rtnl_mutex.
  2448. */
  2449. void netdev_rx_handler_unregister(struct net_device *dev)
  2450. {
  2451. ASSERT_RTNL();
  2452. rcu_assign_pointer(dev->rx_handler, NULL);
  2453. rcu_assign_pointer(dev->rx_handler_data, NULL);
  2454. }
  2455. EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
  2456. static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
  2457. struct net_device *master)
  2458. {
  2459. if (skb->pkt_type == PACKET_HOST) {
  2460. u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
  2461. memcpy(dest, master->dev_addr, ETH_ALEN);
  2462. }
  2463. }
  2464. /* On bonding slaves other than the currently active slave, suppress
  2465. * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
  2466. * ARP on active-backup slaves with arp_validate enabled.
  2467. */
  2468. int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
  2469. {
  2470. struct net_device *dev = skb->dev;
  2471. if (master->priv_flags & IFF_MASTER_ARPMON)
  2472. dev->last_rx = jiffies;
  2473. if ((master->priv_flags & IFF_MASTER_ALB) &&
  2474. (master->priv_flags & IFF_BRIDGE_PORT)) {
  2475. /* Do address unmangle. The local destination address
  2476. * will be always the one master has. Provides the right
  2477. * functionality in a bridge.
  2478. */
  2479. skb_bond_set_mac_by_master(skb, master);
  2480. }
  2481. if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
  2482. if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
  2483. skb->protocol == __cpu_to_be16(ETH_P_ARP))
  2484. return 0;
  2485. if (master->priv_flags & IFF_MASTER_ALB) {
  2486. if (skb->pkt_type != PACKET_BROADCAST &&
  2487. skb->pkt_type != PACKET_MULTICAST)
  2488. return 0;
  2489. }
  2490. if (master->priv_flags & IFF_MASTER_8023AD &&
  2491. skb->protocol == __cpu_to_be16(ETH_P_SLOW))
  2492. return 0;
  2493. return 1;
  2494. }
  2495. return 0;
  2496. }
  2497. EXPORT_SYMBOL(__skb_bond_should_drop);
  2498. static int __netif_receive_skb(struct sk_buff *skb)
  2499. {
  2500. struct packet_type *ptype, *pt_prev;
  2501. rx_handler_func_t *rx_handler;
  2502. struct net_device *orig_dev;
  2503. struct net_device *master;
  2504. struct net_device *null_or_orig;
  2505. struct net_device *orig_or_bond;
  2506. int ret = NET_RX_DROP;
  2507. __be16 type;
  2508. if (!netdev_tstamp_prequeue)
  2509. net_timestamp_check(skb);
  2510. trace_netif_receive_skb(skb);
  2511. /* if we've gotten here through NAPI, check netpoll */
  2512. if (netpoll_receive_skb(skb))
  2513. return NET_RX_DROP;
  2514. if (!skb->skb_iif)
  2515. skb->skb_iif = skb->dev->ifindex;
  2516. /*
  2517. * bonding note: skbs received on inactive slaves should only
  2518. * be delivered to pkt handlers that are exact matches. Also
  2519. * the deliver_no_wcard flag will be set. If packet handlers
  2520. * are sensitive to duplicate packets these skbs will need to
  2521. * be dropped at the handler.
  2522. */
  2523. null_or_orig = NULL;
  2524. orig_dev = skb->dev;
  2525. master = ACCESS_ONCE(orig_dev->master);
  2526. if (skb->deliver_no_wcard)
  2527. null_or_orig = orig_dev;
  2528. else if (master) {
  2529. if (skb_bond_should_drop(skb, master)) {
  2530. skb->deliver_no_wcard = 1;
  2531. null_or_orig = orig_dev; /* deliver only exact match */
  2532. } else
  2533. skb->dev = master;
  2534. }
  2535. __this_cpu_inc(softnet_data.processed);
  2536. skb_reset_network_header(skb);
  2537. skb_reset_transport_header(skb);
  2538. skb->mac_len = skb->network_header - skb->mac_header;
  2539. pt_prev = NULL;
  2540. rcu_read_lock();
  2541. #ifdef CONFIG_NET_CLS_ACT
  2542. if (skb->tc_verd & TC_NCLS) {
  2543. skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
  2544. goto ncls;
  2545. }
  2546. #endif
  2547. list_for_each_entry_rcu(ptype, &ptype_all, list) {
  2548. if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
  2549. ptype->dev == orig_dev) {
  2550. if (pt_prev)
  2551. ret = deliver_skb(skb, pt_prev, orig_dev);
  2552. pt_prev = ptype;
  2553. }
  2554. }
  2555. #ifdef CONFIG_NET_CLS_ACT
  2556. skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
  2557. if (!skb)
  2558. goto out;
  2559. ncls:
  2560. #endif
  2561. /* Handle special case of bridge or macvlan */
  2562. rx_handler = rcu_dereference(skb->dev->rx_handler);
  2563. if (rx_handler) {
  2564. if (pt_prev) {
  2565. ret = deliver_skb(skb, pt_prev, orig_dev);
  2566. pt_prev = NULL;
  2567. }
  2568. skb = rx_handler(skb);
  2569. if (!skb)
  2570. goto out;
  2571. }
  2572. if (vlan_tx_tag_present(skb)) {
  2573. if (pt_prev) {
  2574. ret = deliver_skb(skb, pt_prev, orig_dev);
  2575. pt_prev = NULL;
  2576. }
  2577. if (vlan_hwaccel_do_receive(&skb)) {
  2578. ret = __netif_receive_skb(skb);
  2579. goto out;
  2580. } else if (unlikely(!skb))
  2581. goto out;
  2582. }
  2583. /*
  2584. * Make sure frames received on VLAN interfaces stacked on
  2585. * bonding interfaces still make their way to any base bonding
  2586. * device that may have registered for a specific ptype. The
  2587. * handler may have to adjust skb->dev and orig_dev.
  2588. */
  2589. orig_or_bond = orig_dev;
  2590. if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
  2591. (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
  2592. orig_or_bond = vlan_dev_real_dev(skb->dev);
  2593. }
  2594. type = skb->protocol;
  2595. list_for_each_entry_rcu(ptype,
  2596. &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
  2597. if (ptype->type == type && (ptype->dev == null_or_orig ||
  2598. ptype->dev == skb->dev || ptype->dev == orig_dev ||
  2599. ptype->dev == orig_or_bond)) {
  2600. if (pt_prev)
  2601. ret = deliver_skb(skb, pt_prev, orig_dev);
  2602. pt_prev = ptype;
  2603. }
  2604. }
  2605. if (pt_prev) {
  2606. ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  2607. } else {
  2608. atomic_long_inc(&skb->dev->rx_dropped);
  2609. kfree_skb(skb);
  2610. /* Jamal, now you will not able to escape explaining
  2611. * me how you were going to use this. :-)
  2612. */
  2613. ret = NET_RX_DROP;
  2614. }
  2615. out:
  2616. rcu_read_unlock();
  2617. return ret;
  2618. }
  2619. /**
  2620. * netif_receive_skb - process receive buffer from network
  2621. * @skb: buffer to process
  2622. *
  2623. * netif_receive_skb() is the main receive data processing function.
  2624. * It always succeeds. The buffer may be dropped during processing
  2625. * for congestion control or by the protocol layers.
  2626. *
  2627. * This function may only be called from softirq context and interrupts
  2628. * should be enabled.
  2629. *
  2630. * Return values (usually ignored):
  2631. * NET_RX_SUCCESS: no congestion
  2632. * NET_RX_DROP: packet was dropped
  2633. */
  2634. int netif_receive_skb(struct sk_buff *skb)
  2635. {
  2636. if (netdev_tstamp_prequeue)
  2637. net_timestamp_check(skb);
  2638. if (skb_defer_rx_timestamp(skb))
  2639. return NET_RX_SUCCESS;
  2640. #ifdef CONFIG_RPS
  2641. {
  2642. struct rps_dev_flow voidflow, *rflow = &voidflow;
  2643. int cpu, ret;
  2644. rcu_read_lock();
  2645. cpu = get_rps_cpu(skb->dev, skb, &rflow);
  2646. if (cpu >= 0) {
  2647. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  2648. rcu_read_unlock();
  2649. } else {
  2650. rcu_read_unlock();
  2651. ret = __netif_receive_skb(skb);
  2652. }
  2653. return ret;
  2654. }
  2655. #else
  2656. return __netif_receive_skb(skb);
  2657. #endif
  2658. }
  2659. EXPORT_SYMBOL(netif_receive_skb);
  2660. /* Network device is going away, flush any packets still pending
  2661. * Called with irqs disabled.
  2662. */
  2663. static void flush_backlog(void *arg)
  2664. {
  2665. struct net_device *dev = arg;
  2666. struct softnet_data *sd = &__get_cpu_var(softnet_data);
  2667. struct sk_buff *skb, *tmp;
  2668. rps_lock(sd);
  2669. skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
  2670. if (skb->dev == dev) {
  2671. __skb_unlink(skb, &sd->input_pkt_queue);
  2672. kfree_skb(skb);
  2673. input_queue_head_incr(sd);
  2674. }
  2675. }
  2676. rps_unlock(sd);
  2677. skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
  2678. if (skb->dev == dev) {
  2679. __skb_unlink(skb, &sd->process_queue);
  2680. kfree_skb(skb);
  2681. input_queue_head_incr(sd);
  2682. }
  2683. }
  2684. }
  2685. static int napi_gro_complete(struct sk_buff *skb)
  2686. {
  2687. struct packet_type *ptype;
  2688. __be16 type = skb->protocol;
  2689. struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
  2690. int err = -ENOENT;
  2691. if (NAPI_GRO_CB(skb)->count == 1) {
  2692. skb_shinfo(skb)->gso_size = 0;
  2693. goto out;
  2694. }
  2695. rcu_read_lock();
  2696. list_for_each_entry_rcu(ptype, head, list) {
  2697. if (ptype->type != type || ptype->dev || !ptype->gro_complete)
  2698. continue;
  2699. err = ptype->gro_complete(skb);
  2700. break;
  2701. }
  2702. rcu_read_unlock();
  2703. if (err) {
  2704. WARN_ON(&ptype->list == head);
  2705. kfree_skb(skb);
  2706. return NET_RX_SUCCESS;
  2707. }
  2708. out:
  2709. return netif_receive_skb(skb);
  2710. }
  2711. inline void napi_gro_flush(struct napi_struct *napi)
  2712. {
  2713. struct sk_buff *skb, *next;
  2714. for (skb = napi->gro_list; skb; skb = next) {
  2715. next = skb->next;
  2716. skb->next = NULL;
  2717. napi_gro_complete(skb);
  2718. }
  2719. napi->gro_count = 0;
  2720. napi->gro_list = NULL;
  2721. }
  2722. EXPORT_SYMBOL(napi_gro_flush);
  2723. enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  2724. {
  2725. struct sk_buff **pp = NULL;
  2726. struct packet_type *ptype;
  2727. __be16 type = skb->protocol;
  2728. struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
  2729. int same_flow;
  2730. int mac_len;
  2731. enum gro_result ret;
  2732. if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
  2733. goto normal;
  2734. if (skb_is_gso(skb) || skb_has_frag_list(skb))
  2735. goto normal;
  2736. rcu_read_lock();
  2737. list_for_each_entry_rcu(ptype, head, list) {
  2738. if (ptype->type != type || ptype->dev || !ptype->gro_receive)
  2739. continue;
  2740. skb_set_network_header(skb, skb_gro_offset(skb));
  2741. mac_len = skb->network_header - skb->mac_header;
  2742. skb->mac_len = mac_len;
  2743. NAPI_GRO_CB(skb)->same_flow = 0;
  2744. NAPI_GRO_CB(skb)->flush = 0;
  2745. NAPI_GRO_CB(skb)->free = 0;
  2746. pp = ptype->gro_receive(&napi->gro_list, skb);
  2747. break;
  2748. }
  2749. rcu_read_unlock();
  2750. if (&ptype->list == head)
  2751. goto normal;
  2752. same_flow = NAPI_GRO_CB(skb)->same_flow;
  2753. ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
  2754. if (pp) {
  2755. struct sk_buff *nskb = *pp;
  2756. *pp = nskb->next;
  2757. nskb->next = NULL;
  2758. napi_gro_complete(nskb);
  2759. napi->gro_count--;
  2760. }
  2761. if (same_flow)
  2762. goto ok;
  2763. if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
  2764. goto normal;
  2765. napi->gro_count++;
  2766. NAPI_GRO_CB(skb)->count = 1;
  2767. skb_shinfo(skb)->gso_size = skb_gro_len(skb);
  2768. skb->next = napi->gro_list;
  2769. napi->gro_list = skb;
  2770. ret = GRO_HELD;
  2771. pull:
  2772. if (skb_headlen(skb) < skb_gro_offset(skb)) {
  2773. int grow = skb_gro_offset(skb) - skb_headlen(skb);
  2774. BUG_ON(skb->end - skb->tail < grow);
  2775. memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
  2776. skb->tail += grow;
  2777. skb->data_len -= grow;
  2778. skb_shinfo(skb)->frags[0].page_offset += grow;
  2779. skb_shinfo(skb)->frags[0].size -= grow;
  2780. if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
  2781. put_page(skb_shinfo(skb)->frags[0].page);
  2782. memmove(skb_shinfo(skb)->frags,
  2783. skb_shinfo(skb)->frags + 1,
  2784. --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
  2785. }
  2786. }
  2787. ok:
  2788. return ret;
  2789. normal:
  2790. ret = GRO_NORMAL;
  2791. goto pull;
  2792. }
  2793. EXPORT_SYMBOL(dev_gro_receive);
  2794. static inline gro_result_t
  2795. __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  2796. {
  2797. struct sk_buff *p;
  2798. for (p = napi->gro_list; p; p = p->next) {
  2799. unsigned long diffs;
  2800. diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
  2801. diffs |= p->vlan_tci ^ skb->vlan_tci;
  2802. diffs |= compare_ether_header(skb_mac_header(p),
  2803. skb_gro_mac_header(skb));
  2804. NAPI_GRO_CB(p)->same_flow = !diffs;
  2805. NAPI_GRO_CB(p)->flush = 0;
  2806. }
  2807. return dev_gro_receive(napi, skb);
  2808. }
  2809. gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
  2810. {
  2811. switch (ret) {
  2812. case GRO_NORMAL:
  2813. if (netif_receive_skb(skb))
  2814. ret = GRO_DROP;
  2815. break;
  2816. case GRO_DROP:
  2817. case GRO_MERGED_FREE:
  2818. kfree_skb(skb);
  2819. break;
  2820. case GRO_HELD:
  2821. case GRO_MERGED:
  2822. break;
  2823. }
  2824. return ret;
  2825. }
  2826. EXPORT_SYMBOL(napi_skb_finish);
  2827. void skb_gro_reset_offset(struct sk_buff *skb)
  2828. {
  2829. NAPI_GRO_CB(skb)->data_offset = 0;
  2830. NAPI_GRO_CB(skb)->frag0 = NULL;
  2831. NAPI_GRO_CB(skb)->frag0_len = 0;
  2832. if (skb->mac_header == skb->tail &&
  2833. !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
  2834. NAPI_GRO_CB(skb)->frag0 =
  2835. page_address(skb_shinfo(skb)->frags[0].page) +
  2836. skb_shinfo(skb)->frags[0].page_offset;
  2837. NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
  2838. }
  2839. }
  2840. EXPORT_SYMBOL(skb_gro_reset_offset);
  2841. gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  2842. {
  2843. skb_gro_reset_offset(skb);
  2844. return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
  2845. }
  2846. EXPORT_SYMBOL(napi_gro_receive);
  2847. static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
  2848. {
  2849. __skb_pull(skb, skb_headlen(skb));
  2850. skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
  2851. skb->vlan_tci = 0;
  2852. napi->skb = skb;
  2853. }
  2854. struct sk_buff *napi_get_frags(struct napi_struct *napi)
  2855. {
  2856. struct sk_buff *skb = napi->skb;
  2857. if (!skb) {
  2858. skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
  2859. if (skb)
  2860. napi->skb = skb;
  2861. }
  2862. return skb;
  2863. }
  2864. EXPORT_SYMBOL(napi_get_frags);
  2865. gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
  2866. gro_result_t ret)
  2867. {
  2868. switch (ret) {
  2869. case GRO_NORMAL:
  2870. case GRO_HELD:
  2871. skb->protocol = eth_type_trans(skb, skb->dev);
  2872. if (ret == GRO_HELD)
  2873. skb_gro_pull(skb, -ETH_HLEN);
  2874. else if (netif_receive_skb(skb))
  2875. ret = GRO_DROP;
  2876. break;
  2877. case GRO_DROP:
  2878. case GRO_MERGED_FREE:
  2879. napi_reuse_skb(napi, skb);
  2880. break;
  2881. case GRO_MERGED:
  2882. break;
  2883. }
  2884. return ret;
  2885. }
  2886. EXPORT_SYMBOL(napi_frags_finish);
  2887. struct sk_buff *napi_frags_skb(struct napi_struct *napi)
  2888. {
  2889. struct sk_buff *skb = napi->skb;
  2890. struct ethhdr *eth;
  2891. unsigned int hlen;
  2892. unsigned int off;
  2893. napi->skb = NULL;
  2894. skb_reset_mac_header(skb);
  2895. skb_gro_reset_offset(skb);
  2896. off = skb_gro_offset(skb);
  2897. hlen = off + sizeof(*eth);
  2898. eth = skb_gro_header_fast(skb, off);
  2899. if (skb_gro_header_hard(skb, hlen)) {
  2900. eth = skb_gro_header_slow(skb, hlen, off);
  2901. if (unlikely(!eth)) {
  2902. napi_reuse_skb(napi, skb);
  2903. skb = NULL;
  2904. goto out;
  2905. }
  2906. }
  2907. skb_gro_pull(skb, sizeof(*eth));
  2908. /*
  2909. * This works because the only protocols we care about don't require
  2910. * special handling. We'll fix it up properly at the end.
  2911. */
  2912. skb->protocol = eth->h_proto;
  2913. out:
  2914. return skb;
  2915. }
  2916. EXPORT_SYMBOL(napi_frags_skb);
  2917. gro_result_t napi_gro_frags(struct napi_struct *napi)
  2918. {
  2919. struct sk_buff *skb = napi_frags_skb(napi);
  2920. if (!skb)
  2921. return GRO_DROP;
  2922. return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
  2923. }
  2924. EXPORT_SYMBOL(napi_gro_frags);
  2925. /*
  2926. * net_rps_action sends any pending IPI's for rps.
  2927. * Note: called with local irq disabled, but exits with local irq enabled.
  2928. */
  2929. static void net_rps_action_and_irq_enable(struct softnet_data *sd)
  2930. {
  2931. #ifdef CONFIG_RPS
  2932. struct softnet_data *remsd = sd->rps_ipi_list;
  2933. if (remsd) {
  2934. sd->rps_ipi_list = NULL;
  2935. local_irq_enable();
  2936. /* Send pending IPI's to kick RPS processing on remote cpus. */
  2937. while (remsd) {
  2938. struct softnet_data *next = remsd->rps_ipi_next;
  2939. if (cpu_online(remsd->cpu))
  2940. __smp_call_function_single(remsd->cpu,
  2941. &remsd->csd, 0);
  2942. remsd = next;
  2943. }
  2944. } else
  2945. #endif
  2946. local_irq_enable();
  2947. }
  2948. static int process_backlog(struct napi_struct *napi, int quota)
  2949. {
  2950. int work = 0;
  2951. struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
  2952. #ifdef CONFIG_RPS
  2953. /* Check if we have pending ipi, its better to send them now,
  2954. * not waiting net_rx_action() end.
  2955. */
  2956. if (sd->rps_ipi_list) {
  2957. local_irq_disable();
  2958. net_rps_action_and_irq_enable(sd);
  2959. }
  2960. #endif
  2961. napi->weight = weight_p;
  2962. local_irq_disable();
  2963. while (work < quota) {
  2964. struct sk_buff *skb;
  2965. unsigned int qlen;
  2966. while ((skb = __skb_dequeue(&sd->process_queue))) {
  2967. local_irq_enable();
  2968. __netif_receive_skb(skb);
  2969. local_irq_disable();
  2970. input_queue_head_incr(sd);
  2971. if (++work >= quota) {
  2972. local_irq_enable();
  2973. return work;
  2974. }
  2975. }
  2976. rps_lock(sd);
  2977. qlen = skb_queue_len(&sd->input_pkt_queue);
  2978. if (qlen)
  2979. skb_queue_splice_tail_init(&sd->input_pkt_queue,
  2980. &sd->process_queue);
  2981. if (qlen < quota - work) {
  2982. /*
  2983. * Inline a custom version of __napi_complete().
  2984. * only current cpu owns and manipulates this napi,
  2985. * and NAPI_STATE_SCHED is the only possible flag set on backlog.
  2986. * we can use a plain write instead of clear_bit(),
  2987. * and we dont need an smp_mb() memory barrier.
  2988. */
  2989. list_del(&napi->poll_list);
  2990. napi->state = 0;
  2991. quota = work + qlen;
  2992. }
  2993. rps_unlock(sd);
  2994. }
  2995. local_irq_enable();
  2996. return work;
  2997. }
  2998. /**
  2999. * __napi_schedule - schedule for receive
  3000. * @n: entry to schedule
  3001. *
  3002. * The entry's receive function will be scheduled to run
  3003. */
  3004. void __napi_schedule(struct napi_struct *n)
  3005. {
  3006. unsigned long flags;
  3007. local_irq_save(flags);
  3008. ____napi_schedule(&__get_cpu_var(softnet_data), n);
  3009. local_irq_restore(flags);
  3010. }
  3011. EXPORT_SYMBOL(__napi_schedule);
  3012. void __napi_complete(struct napi_struct *n)
  3013. {
  3014. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  3015. BUG_ON(n->gro_list);
  3016. list_del(&n->poll_list);
  3017. smp_mb__before_clear_bit();
  3018. clear_bit(NAPI_STATE_SCHED, &n->state);
  3019. }
  3020. EXPORT_SYMBOL(__napi_complete);
  3021. void napi_complete(struct napi_struct *n)
  3022. {
  3023. unsigned long flags;
  3024. /*
  3025. * don't let napi dequeue from the cpu poll list
  3026. * just in case its running on a different cpu
  3027. */
  3028. if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
  3029. return;
  3030. napi_gro_flush(n);
  3031. local_irq_save(flags);
  3032. __napi_complete(n);
  3033. local_irq_restore(flags);
  3034. }
  3035. EXPORT_SYMBOL(napi_complete);
  3036. void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
  3037. int (*poll)(struct napi_struct *, int), int weight)
  3038. {
  3039. INIT_LIST_HEAD(&napi->poll_list);
  3040. napi->gro_count = 0;
  3041. napi->gro_list = NULL;
  3042. napi->skb = NULL;
  3043. napi->poll = poll;
  3044. napi->weight = weight;
  3045. list_add(&napi->dev_list, &dev->napi_list);
  3046. napi->dev = dev;
  3047. #ifdef CONFIG_NETPOLL
  3048. spin_lock_init(&napi->poll_lock);
  3049. napi->poll_owner = -1;
  3050. #endif
  3051. set_bit(NAPI_STATE_SCHED, &napi->state);
  3052. }
  3053. EXPORT_SYMBOL(netif_napi_add);
  3054. void netif_napi_del(struct napi_struct *napi)
  3055. {
  3056. struct sk_buff *skb, *next;
  3057. list_del_init(&napi->dev_list);
  3058. napi_free_frags(napi);
  3059. for (skb = napi->gro_list; skb; skb = next) {
  3060. next = skb->next;
  3061. skb->next = NULL;
  3062. kfree_skb(skb);
  3063. }
  3064. napi->gro_list = NULL;
  3065. napi->gro_count = 0;
  3066. }
  3067. EXPORT_SYMBOL(netif_napi_del);
  3068. static void net_rx_action(struct softirq_action *h)
  3069. {
  3070. struct softnet_data *sd = &__get_cpu_var(softnet_data);
  3071. unsigned long time_limit = jiffies + 2;
  3072. int budget = netdev_budget;
  3073. void *have;
  3074. local_irq_disable();
  3075. while (!list_empty(&sd->poll_list)) {
  3076. struct napi_struct *n;
  3077. int work, weight;
  3078. /* If softirq window is exhuasted then punt.
  3079. * Allow this to run for 2 jiffies since which will allow
  3080. * an average latency of 1.5/HZ.
  3081. */
  3082. if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
  3083. goto softnet_break;
  3084. local_irq_enable();
  3085. /* Even though interrupts have been re-enabled, this
  3086. * access is safe because interrupts can only add new
  3087. * entries to the tail of this list, and only ->poll()
  3088. * calls can remove this head entry from the list.
  3089. */
  3090. n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
  3091. have = netpoll_poll_lock(n);
  3092. weight = n->weight;
  3093. /* This NAPI_STATE_SCHED test is for avoiding a race
  3094. * with netpoll's poll_napi(). Only the entity which
  3095. * obtains the lock and sees NAPI_STATE_SCHED set will
  3096. * actually make the ->poll() call. Therefore we avoid
  3097. * accidently calling ->poll() when NAPI is not scheduled.
  3098. */
  3099. work = 0;
  3100. if (test_bit(NAPI_STATE_SCHED, &n->state)) {
  3101. work = n->poll(n, weight);
  3102. trace_napi_poll(n);
  3103. }
  3104. WARN_ON_ONCE(work > weight);
  3105. budget -= work;
  3106. local_irq_disable();
  3107. /* Drivers must not modify the NAPI state if they
  3108. * consume the entire weight. In such cases this code
  3109. * still "owns" the NAPI instance and therefore can
  3110. * move the instance around on the list at-will.
  3111. */
  3112. if (unlikely(work == weight)) {
  3113. if (unlikely(napi_disable_pending(n))) {
  3114. local_irq_enable();
  3115. napi_complete(n);
  3116. local_irq_disable();
  3117. } else
  3118. list_move_tail(&n->poll_list, &sd->poll_list);
  3119. }
  3120. netpoll_poll_unlock(have);
  3121. }
  3122. out:
  3123. net_rps_action_and_irq_enable(sd);
  3124. #ifdef CONFIG_NET_DMA
  3125. /*
  3126. * There may not be any more sk_buffs coming right now, so push
  3127. * any pending DMA copies to hardware
  3128. */
  3129. dma_issue_pending_all();
  3130. #endif
  3131. return;
  3132. softnet_break:
  3133. sd->time_squeeze++;
  3134. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  3135. goto out;
  3136. }
  3137. static gifconf_func_t *gifconf_list[NPROTO];
  3138. /**
  3139. * register_gifconf - register a SIOCGIF handler
  3140. * @family: Address family
  3141. * @gifconf: Function handler
  3142. *
  3143. * Register protocol dependent address dumping routines. The handler
  3144. * that is passed must not be freed or reused until it has been replaced
  3145. * by another handler.
  3146. */
  3147. int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
  3148. {
  3149. if (family >= NPROTO)
  3150. return -EINVAL;
  3151. gifconf_list[family] = gifconf;
  3152. return 0;
  3153. }
  3154. EXPORT_SYMBOL(register_gifconf);
  3155. /*
  3156. * Map an interface index to its name (SIOCGIFNAME)
  3157. */
  3158. /*
  3159. * We need this ioctl for efficient implementation of the
  3160. * if_indextoname() function required by the IPv6 API. Without
  3161. * it, we would have to search all the interfaces to find a
  3162. * match. --pb
  3163. */
  3164. static int dev_ifname(struct net *net, struct ifreq __user *arg)
  3165. {
  3166. struct net_device *dev;
  3167. struct ifreq ifr;
  3168. /*
  3169. * Fetch the caller's info block.
  3170. */
  3171. if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
  3172. return -EFAULT;
  3173. rcu_read_lock();
  3174. dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
  3175. if (!dev) {
  3176. rcu_read_unlock();
  3177. return -ENODEV;
  3178. }
  3179. strcpy(ifr.ifr_name, dev->name);
  3180. rcu_read_unlock();
  3181. if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
  3182. return -EFAULT;
  3183. return 0;
  3184. }
  3185. /*
  3186. * Perform a SIOCGIFCONF call. This structure will change
  3187. * size eventually, and there is nothing I can do about it.
  3188. * Thus we will need a 'compatibility mode'.
  3189. */
  3190. static int dev_ifconf(struct net *net, char __user *arg)
  3191. {
  3192. struct ifconf ifc;
  3193. struct net_device *dev;
  3194. char __user *pos;
  3195. int len;
  3196. int total;
  3197. int i;
  3198. /*
  3199. * Fetch the caller's info block.
  3200. */
  3201. if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
  3202. return -EFAULT;
  3203. pos = ifc.ifc_buf;
  3204. len = ifc.ifc_len;
  3205. /*
  3206. * Loop over the interfaces, and write an info block for each.
  3207. */
  3208. total = 0;
  3209. for_each_netdev(net, dev) {
  3210. for (i = 0; i < NPROTO; i++) {
  3211. if (gifconf_list[i]) {
  3212. int done;
  3213. if (!pos)
  3214. done = gifconf_list[i](dev, NULL, 0);
  3215. else
  3216. done = gifconf_list[i](dev, pos + total,
  3217. len - total);
  3218. if (done < 0)
  3219. return -EFAULT;
  3220. total += done;
  3221. }
  3222. }
  3223. }
  3224. /*
  3225. * All done. Write the updated control block back to the caller.
  3226. */
  3227. ifc.ifc_len = total;
  3228. /*
  3229. * Both BSD and Solaris return 0 here, so we do too.
  3230. */
  3231. return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
  3232. }
  3233. #ifdef CONFIG_PROC_FS
  3234. /*
  3235. * This is invoked by the /proc filesystem handler to display a device
  3236. * in detail.
  3237. */
  3238. void *dev_seq_start(struct seq_file *seq, loff_t *pos)
  3239. __acquires(RCU)
  3240. {
  3241. struct net *net = seq_file_net(seq);
  3242. loff_t off;
  3243. struct net_device *dev;
  3244. rcu_read_lock();
  3245. if (!*pos)
  3246. return SEQ_START_TOKEN;
  3247. off = 1;
  3248. for_each_netdev_rcu(net, dev)
  3249. if (off++ == *pos)
  3250. return dev;
  3251. return NULL;
  3252. }
  3253. void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  3254. {
  3255. struct net_device *dev = (v == SEQ_START_TOKEN) ?
  3256. first_net_device(seq_file_net(seq)) :
  3257. next_net_device((struct net_device *)v);
  3258. ++*pos;
  3259. return rcu_dereference(dev);
  3260. }
  3261. void dev_seq_stop(struct seq_file *seq, void *v)
  3262. __releases(RCU)
  3263. {
  3264. rcu_read_unlock();
  3265. }
  3266. static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
  3267. {
  3268. struct rtnl_link_stats64 temp;
  3269. const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
  3270. seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
  3271. "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
  3272. dev->name, stats->rx_bytes, stats->rx_packets,
  3273. stats->rx_errors,
  3274. stats->rx_dropped + stats->rx_missed_errors,
  3275. stats->rx_fifo_errors,
  3276. stats->rx_length_errors + stats->rx_over_errors +
  3277. stats->rx_crc_errors + stats->rx_frame_errors,
  3278. stats->rx_compressed, stats->multicast,
  3279. stats->tx_bytes, stats->tx_packets,
  3280. stats->tx_errors, stats->tx_dropped,
  3281. stats->tx_fifo_errors, stats->collisions,
  3282. stats->tx_carrier_errors +
  3283. stats->tx_aborted_errors +
  3284. stats->tx_window_errors +
  3285. stats->tx_heartbeat_errors,
  3286. stats->tx_compressed);
  3287. }
  3288. /*
  3289. * Called from the PROCfs module. This now uses the new arbitrary sized
  3290. * /proc/net interface to create /proc/net/dev
  3291. */
  3292. static int dev_seq_show(struct seq_file *seq, void *v)
  3293. {
  3294. if (v == SEQ_START_TOKEN)
  3295. seq_puts(seq, "Inter-| Receive "
  3296. " | Transmit\n"
  3297. " face |bytes packets errs drop fifo frame "
  3298. "compressed multicast|bytes packets errs "
  3299. "drop fifo colls carrier compressed\n");
  3300. else
  3301. dev_seq_printf_stats(seq, v);
  3302. return 0;
  3303. }
  3304. static struct softnet_data *softnet_get_online(loff_t *pos)
  3305. {
  3306. struct softnet_data *sd = NULL;
  3307. while (*pos < nr_cpu_ids)
  3308. if (cpu_online(*pos)) {
  3309. sd = &per_cpu(softnet_data, *pos);
  3310. break;
  3311. } else
  3312. ++*pos;
  3313. return sd;
  3314. }
  3315. static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
  3316. {
  3317. return softnet_get_online(pos);
  3318. }
  3319. static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  3320. {
  3321. ++*pos;
  3322. return softnet_get_online(pos);
  3323. }
  3324. static void softnet_seq_stop(struct seq_file *seq, void *v)
  3325. {
  3326. }
  3327. static int softnet_seq_show(struct seq_file *seq, void *v)
  3328. {
  3329. struct softnet_data *sd = v;
  3330. seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
  3331. sd->processed, sd->dropped, sd->time_squeeze, 0,
  3332. 0, 0, 0, 0, /* was fastroute */
  3333. sd->cpu_collision, sd->received_rps);
  3334. return 0;
  3335. }
  3336. static const struct seq_operations dev_seq_ops = {
  3337. .start = dev_seq_start,
  3338. .next = dev_seq_next,
  3339. .stop = dev_seq_stop,
  3340. .show = dev_seq_show,
  3341. };
  3342. static int dev_seq_open(struct inode *inode, struct file *file)
  3343. {
  3344. return seq_open_net(inode, file, &dev_seq_ops,
  3345. sizeof(struct seq_net_private));
  3346. }
  3347. static const struct file_operations dev_seq_fops = {
  3348. .owner = THIS_MODULE,
  3349. .open = dev_seq_open,
  3350. .read = seq_read,
  3351. .llseek = seq_lseek,
  3352. .release = seq_release_net,
  3353. };
  3354. static const struct seq_operations softnet_seq_ops = {
  3355. .start = softnet_seq_start,
  3356. .next = softnet_seq_next,
  3357. .stop = softnet_seq_stop,
  3358. .show = softnet_seq_show,
  3359. };
  3360. static int softnet_seq_open(struct inode *inode, struct file *file)
  3361. {
  3362. return seq_open(file, &softnet_seq_ops);
  3363. }
  3364. static const struct file_operations softnet_seq_fops = {
  3365. .owner = THIS_MODULE,
  3366. .open = softnet_seq_open,
  3367. .read = seq_read,
  3368. .llseek = seq_lseek,
  3369. .release = seq_release,
  3370. };
  3371. static void *ptype_get_idx(loff_t pos)
  3372. {
  3373. struct packet_type *pt = NULL;
  3374. loff_t i = 0;
  3375. int t;
  3376. list_for_each_entry_rcu(pt, &ptype_all, list) {
  3377. if (i == pos)
  3378. return pt;
  3379. ++i;
  3380. }
  3381. for (t = 0; t < PTYPE_HASH_SIZE; t++) {
  3382. list_for_each_entry_rcu(pt, &ptype_base[t], list) {
  3383. if (i == pos)
  3384. return pt;
  3385. ++i;
  3386. }
  3387. }
  3388. return NULL;
  3389. }
  3390. static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
  3391. __acquires(RCU)
  3392. {
  3393. rcu_read_lock();
  3394. return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
  3395. }
  3396. static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  3397. {
  3398. struct packet_type *pt;
  3399. struct list_head *nxt;
  3400. int hash;
  3401. ++*pos;
  3402. if (v == SEQ_START_TOKEN)
  3403. return ptype_get_idx(0);
  3404. pt = v;
  3405. nxt = pt->list.next;
  3406. if (pt->type == htons(ETH_P_ALL)) {
  3407. if (nxt != &ptype_all)
  3408. goto found;
  3409. hash = 0;
  3410. nxt = ptype_base[0].next;
  3411. } else
  3412. hash = ntohs(pt->type) & PTYPE_HASH_MASK;
  3413. while (nxt == &ptype_base[hash]) {
  3414. if (++hash >= PTYPE_HASH_SIZE)
  3415. return NULL;
  3416. nxt = ptype_base[hash].next;
  3417. }
  3418. found:
  3419. return list_entry(nxt, struct packet_type, list);
  3420. }
  3421. static void ptype_seq_stop(struct seq_file *seq, void *v)
  3422. __releases(RCU)
  3423. {
  3424. rcu_read_unlock();
  3425. }
  3426. static int ptype_seq_show(struct seq_file *seq, void *v)
  3427. {
  3428. struct packet_type *pt = v;
  3429. if (v == SEQ_START_TOKEN)
  3430. seq_puts(seq, "Type Device Function\n");
  3431. else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
  3432. if (pt->type == htons(ETH_P_ALL))
  3433. seq_puts(seq, "ALL ");
  3434. else
  3435. seq_printf(seq, "%04x", ntohs(pt->type));
  3436. seq_printf(seq, " %-8s %pF\n",
  3437. pt->dev ? pt->dev->name : "", pt->func);
  3438. }
  3439. return 0;
  3440. }
  3441. static const struct seq_operations ptype_seq_ops = {
  3442. .start = ptype_seq_start,
  3443. .next = ptype_seq_next,
  3444. .stop = ptype_seq_stop,
  3445. .show = ptype_seq_show,
  3446. };
  3447. static int ptype_seq_open(struct inode *inode, struct file *file)
  3448. {
  3449. return seq_open_net(inode, file, &ptype_seq_ops,
  3450. sizeof(struct seq_net_private));
  3451. }
  3452. static const struct file_operations ptype_seq_fops = {
  3453. .owner = THIS_MODULE,
  3454. .open = ptype_seq_open,
  3455. .read = seq_read,
  3456. .llseek = seq_lseek,
  3457. .release = seq_release_net,
  3458. };
  3459. static int __net_init dev_proc_net_init(struct net *net)
  3460. {
  3461. int rc = -ENOMEM;
  3462. if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
  3463. goto out;
  3464. if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
  3465. goto out_dev;
  3466. if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
  3467. goto out_softnet;
  3468. if (wext_proc_init(net))
  3469. goto out_ptype;
  3470. rc = 0;
  3471. out:
  3472. return rc;
  3473. out_ptype:
  3474. proc_net_remove(net, "ptype");
  3475. out_softnet:
  3476. proc_net_remove(net, "softnet_stat");
  3477. out_dev:
  3478. proc_net_remove(net, "dev");
  3479. goto out;
  3480. }
  3481. static void __net_exit dev_proc_net_exit(struct net *net)
  3482. {
  3483. wext_proc_exit(net);
  3484. proc_net_remove(net, "ptype");
  3485. proc_net_remove(net, "softnet_stat");
  3486. proc_net_remove(net, "dev");
  3487. }
  3488. static struct pernet_operations __net_initdata dev_proc_ops = {
  3489. .init = dev_proc_net_init,
  3490. .exit = dev_proc_net_exit,
  3491. };
  3492. static int __init dev_proc_init(void)
  3493. {
  3494. return register_pernet_subsys(&dev_proc_ops);
  3495. }
  3496. #else
  3497. #define dev_proc_init() 0
  3498. #endif /* CONFIG_PROC_FS */
  3499. /**
  3500. * netdev_set_master - set up master/slave pair
  3501. * @slave: slave device
  3502. * @master: new master device
  3503. *
  3504. * Changes the master device of the slave. Pass %NULL to break the
  3505. * bonding. The caller must hold the RTNL semaphore. On a failure
  3506. * a negative errno code is returned. On success the reference counts
  3507. * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
  3508. * function returns zero.
  3509. */
  3510. int netdev_set_master(struct net_device *slave, struct net_device *master)
  3511. {
  3512. struct net_device *old = slave->master;
  3513. ASSERT_RTNL();
  3514. if (master) {
  3515. if (old)
  3516. return -EBUSY;
  3517. dev_hold(master);
  3518. }
  3519. slave->master = master;
  3520. if (old) {
  3521. synchronize_net();
  3522. dev_put(old);
  3523. }
  3524. if (master)
  3525. slave->flags |= IFF_SLAVE;
  3526. else
  3527. slave->flags &= ~IFF_SLAVE;
  3528. rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
  3529. return 0;
  3530. }
  3531. EXPORT_SYMBOL(netdev_set_master);
  3532. static void dev_change_rx_flags(struct net_device *dev, int flags)
  3533. {
  3534. const struct net_device_ops *ops = dev->netdev_ops;
  3535. if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
  3536. ops->ndo_change_rx_flags(dev, flags);
  3537. }
  3538. static int __dev_set_promiscuity(struct net_device *dev, int inc)
  3539. {
  3540. unsigned short old_flags = dev->flags;
  3541. uid_t uid;
  3542. gid_t gid;
  3543. ASSERT_RTNL();
  3544. dev->flags |= IFF_PROMISC;
  3545. dev->promiscuity += inc;
  3546. if (dev->promiscuity == 0) {
  3547. /*
  3548. * Avoid overflow.
  3549. * If inc causes overflow, untouch promisc and return error.
  3550. */
  3551. if (inc < 0)
  3552. dev->flags &= ~IFF_PROMISC;
  3553. else {
  3554. dev->promiscuity -= inc;
  3555. printk(KERN_WARNING "%s: promiscuity touches roof, "
  3556. "set promiscuity failed, promiscuity feature "
  3557. "of device might be broken.\n", dev->name);
  3558. return -EOVERFLOW;
  3559. }
  3560. }
  3561. if (dev->flags != old_flags) {
  3562. printk(KERN_INFO "device %s %s promiscuous mode\n",
  3563. dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
  3564. "left");
  3565. if (audit_enabled) {
  3566. current_uid_gid(&uid, &gid);
  3567. audit_log(current->audit_context, GFP_ATOMIC,
  3568. AUDIT_ANOM_PROMISCUOUS,
  3569. "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
  3570. dev->name, (dev->flags & IFF_PROMISC),
  3571. (old_flags & IFF_PROMISC),
  3572. audit_get_loginuid(current),
  3573. uid, gid,
  3574. audit_get_sessionid(current));
  3575. }
  3576. dev_change_rx_flags(dev, IFF_PROMISC);
  3577. }
  3578. return 0;
  3579. }
  3580. /**
  3581. * dev_set_promiscuity - update promiscuity count on a device
  3582. * @dev: device
  3583. * @inc: modifier
  3584. *
  3585. * Add or remove promiscuity from a device. While the count in the device
  3586. * remains above zero the interface remains promiscuous. Once it hits zero
  3587. * the device reverts back to normal filtering operation. A negative inc
  3588. * value is used to drop promiscuity on the device.
  3589. * Return 0 if successful or a negative errno code on error.
  3590. */
  3591. int dev_set_promiscuity(struct net_device *dev, int inc)
  3592. {
  3593. unsigned short old_flags = dev->flags;
  3594. int err;
  3595. err = __dev_set_promiscuity(dev, inc);
  3596. if (err < 0)
  3597. return err;
  3598. if (dev->flags != old_flags)
  3599. dev_set_rx_mode(dev);
  3600. return err;
  3601. }
  3602. EXPORT_SYMBOL(dev_set_promiscuity);
  3603. /**
  3604. * dev_set_allmulti - update allmulti count on a device
  3605. * @dev: device
  3606. * @inc: modifier
  3607. *
  3608. * Add or remove reception of all multicast frames to a device. While the
  3609. * count in the device remains above zero the interface remains listening
  3610. * to all interfaces. Once it hits zero the device reverts back to normal
  3611. * filtering operation. A negative @inc value is used to drop the counter
  3612. * when releasing a resource needing all multicasts.
  3613. * Return 0 if successful or a negative errno code on error.
  3614. */
  3615. int dev_set_allmulti(struct net_device *dev, int inc)
  3616. {
  3617. unsigned short old_flags = dev->flags;
  3618. ASSERT_RTNL();
  3619. dev->flags |= IFF_ALLMULTI;
  3620. dev->allmulti += inc;
  3621. if (dev->allmulti == 0) {
  3622. /*
  3623. * Avoid overflow.
  3624. * If inc causes overflow, untouch allmulti and return error.
  3625. */
  3626. if (inc < 0)
  3627. dev->flags &= ~IFF_ALLMULTI;
  3628. else {
  3629. dev->allmulti -= inc;
  3630. printk(KERN_WARNING "%s: allmulti touches roof, "
  3631. "set allmulti failed, allmulti feature of "
  3632. "device might be broken.\n", dev->name);
  3633. return -EOVERFLOW;
  3634. }
  3635. }
  3636. if (dev->flags ^ old_flags) {
  3637. dev_change_rx_flags(dev, IFF_ALLMULTI);
  3638. dev_set_rx_mode(dev);
  3639. }
  3640. return 0;
  3641. }
  3642. EXPORT_SYMBOL(dev_set_allmulti);
  3643. /*
  3644. * Upload unicast and multicast address lists to device and
  3645. * configure RX filtering. When the device doesn't support unicast
  3646. * filtering it is put in promiscuous mode while unicast addresses
  3647. * are present.
  3648. */
  3649. void __dev_set_rx_mode(struct net_device *dev)
  3650. {
  3651. const struct net_device_ops *ops = dev->netdev_ops;
  3652. /* dev_open will call this function so the list will stay sane. */
  3653. if (!(dev->flags&IFF_UP))
  3654. return;
  3655. if (!netif_device_present(dev))
  3656. return;
  3657. if (ops->ndo_set_rx_mode)
  3658. ops->ndo_set_rx_mode(dev);
  3659. else {
  3660. /* Unicast addresses changes may only happen under the rtnl,
  3661. * therefore calling __dev_set_promiscuity here is safe.
  3662. */
  3663. if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
  3664. __dev_set_promiscuity(dev, 1);
  3665. dev->uc_promisc = 1;
  3666. } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
  3667. __dev_set_promiscuity(dev, -1);
  3668. dev->uc_promisc = 0;
  3669. }
  3670. if (ops->ndo_set_multicast_list)
  3671. ops->ndo_set_multicast_list(dev);
  3672. }
  3673. }
  3674. void dev_set_rx_mode(struct net_device *dev)
  3675. {
  3676. netif_addr_lock_bh(dev);
  3677. __dev_set_rx_mode(dev);
  3678. netif_addr_unlock_bh(dev);
  3679. }
  3680. /**
  3681. * dev_get_flags - get flags reported to userspace
  3682. * @dev: device
  3683. *
  3684. * Get the combination of flag bits exported through APIs to userspace.
  3685. */
  3686. unsigned dev_get_flags(const struct net_device *dev)
  3687. {
  3688. unsigned flags;
  3689. flags = (dev->flags & ~(IFF_PROMISC |
  3690. IFF_ALLMULTI |
  3691. IFF_RUNNING |
  3692. IFF_LOWER_UP |
  3693. IFF_DORMANT)) |
  3694. (dev->gflags & (IFF_PROMISC |
  3695. IFF_ALLMULTI));
  3696. if (netif_running(dev)) {
  3697. if (netif_oper_up(dev))
  3698. flags |= IFF_RUNNING;
  3699. if (netif_carrier_ok(dev))
  3700. flags |= IFF_LOWER_UP;
  3701. if (netif_dormant(dev))
  3702. flags |= IFF_DORMANT;
  3703. }
  3704. return flags;
  3705. }
  3706. EXPORT_SYMBOL(dev_get_flags);
  3707. int __dev_change_flags(struct net_device *dev, unsigned int flags)
  3708. {
  3709. int old_flags = dev->flags;
  3710. int ret;
  3711. ASSERT_RTNL();
  3712. /*
  3713. * Set the flags on our device.
  3714. */
  3715. dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
  3716. IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
  3717. IFF_AUTOMEDIA)) |
  3718. (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
  3719. IFF_ALLMULTI));
  3720. /*
  3721. * Load in the correct multicast list now the flags have changed.
  3722. */
  3723. if ((old_flags ^ flags) & IFF_MULTICAST)
  3724. dev_change_rx_flags(dev, IFF_MULTICAST);
  3725. dev_set_rx_mode(dev);
  3726. /*
  3727. * Have we downed the interface. We handle IFF_UP ourselves
  3728. * according to user attempts to set it, rather than blindly
  3729. * setting it.
  3730. */
  3731. ret = 0;
  3732. if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
  3733. ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
  3734. if (!ret)
  3735. dev_set_rx_mode(dev);
  3736. }
  3737. if ((flags ^ dev->gflags) & IFF_PROMISC) {
  3738. int inc = (flags & IFF_PROMISC) ? 1 : -1;
  3739. dev->gflags ^= IFF_PROMISC;
  3740. dev_set_promiscuity(dev, inc);
  3741. }
  3742. /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
  3743. is important. Some (broken) drivers set IFF_PROMISC, when
  3744. IFF_ALLMULTI is requested not asking us and not reporting.
  3745. */
  3746. if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
  3747. int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
  3748. dev->gflags ^= IFF_ALLMULTI;
  3749. dev_set_allmulti(dev, inc);
  3750. }
  3751. return ret;
  3752. }
  3753. void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
  3754. {
  3755. unsigned int changes = dev->flags ^ old_flags;
  3756. if (changes & IFF_UP) {
  3757. if (dev->flags & IFF_UP)
  3758. call_netdevice_notifiers(NETDEV_UP, dev);
  3759. else
  3760. call_netdevice_notifiers(NETDEV_DOWN, dev);
  3761. }
  3762. if (dev->flags & IFF_UP &&
  3763. (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
  3764. call_netdevice_notifiers(NETDEV_CHANGE, dev);
  3765. }
  3766. /**
  3767. * dev_change_flags - change device settings
  3768. * @dev: device
  3769. * @flags: device state flags
  3770. *
  3771. * Change settings on device based state flags. The flags are
  3772. * in the userspace exported format.
  3773. */
  3774. int dev_change_flags(struct net_device *dev, unsigned flags)
  3775. {
  3776. int ret, changes;
  3777. int old_flags = dev->flags;
  3778. ret = __dev_change_flags(dev, flags);
  3779. if (ret < 0)
  3780. return ret;
  3781. changes = old_flags ^ dev->flags;
  3782. if (changes)
  3783. rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
  3784. __dev_notify_flags(dev, old_flags);
  3785. return ret;
  3786. }
  3787. EXPORT_SYMBOL(dev_change_flags);
  3788. /**
  3789. * dev_set_mtu - Change maximum transfer unit
  3790. * @dev: device
  3791. * @new_mtu: new transfer unit
  3792. *
  3793. * Change the maximum transfer size of the network device.
  3794. */
  3795. int dev_set_mtu(struct net_device *dev, int new_mtu)
  3796. {
  3797. const struct net_device_ops *ops = dev->netdev_ops;
  3798. int err;
  3799. if (new_mtu == dev->mtu)
  3800. return 0;
  3801. /* MTU must be positive. */
  3802. if (new_mtu < 0)
  3803. return -EINVAL;
  3804. if (!netif_device_present(dev))
  3805. return -ENODEV;
  3806. err = 0;
  3807. if (ops->ndo_change_mtu)
  3808. err = ops->ndo_change_mtu(dev, new_mtu);
  3809. else
  3810. dev->mtu = new_mtu;
  3811. if (!err && dev->flags & IFF_UP)
  3812. call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
  3813. return err;
  3814. }
  3815. EXPORT_SYMBOL(dev_set_mtu);
  3816. /**
  3817. * dev_set_mac_address - Change Media Access Control Address
  3818. * @dev: device
  3819. * @sa: new address
  3820. *
  3821. * Change the hardware (MAC) address of the device
  3822. */
  3823. int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
  3824. {
  3825. const struct net_device_ops *ops = dev->netdev_ops;
  3826. int err;
  3827. if (!ops->ndo_set_mac_address)
  3828. return -EOPNOTSUPP;
  3829. if (sa->sa_family != dev->type)
  3830. return -EINVAL;
  3831. if (!netif_device_present(dev))
  3832. return -ENODEV;
  3833. err = ops->ndo_set_mac_address(dev, sa);
  3834. if (!err)
  3835. call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
  3836. return err;
  3837. }
  3838. EXPORT_SYMBOL(dev_set_mac_address);
  3839. /*
  3840. * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
  3841. */
  3842. static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
  3843. {
  3844. int err;
  3845. struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
  3846. if (!dev)
  3847. return -ENODEV;
  3848. switch (cmd) {
  3849. case SIOCGIFFLAGS: /* Get interface flags */
  3850. ifr->ifr_flags = (short) dev_get_flags(dev);
  3851. return 0;
  3852. case SIOCGIFMETRIC: /* Get the metric on the interface
  3853. (currently unused) */
  3854. ifr->ifr_metric = 0;
  3855. return 0;
  3856. case SIOCGIFMTU: /* Get the MTU of a device */
  3857. ifr->ifr_mtu = dev->mtu;
  3858. return 0;
  3859. case SIOCGIFHWADDR:
  3860. if (!dev->addr_len)
  3861. memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
  3862. else
  3863. memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
  3864. min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
  3865. ifr->ifr_hwaddr.sa_family = dev->type;
  3866. return 0;
  3867. case SIOCGIFSLAVE:
  3868. err = -EINVAL;
  3869. break;
  3870. case SIOCGIFMAP:
  3871. ifr->ifr_map.mem_start = dev->mem_start;
  3872. ifr->ifr_map.mem_end = dev->mem_end;
  3873. ifr->ifr_map.base_addr = dev->base_addr;
  3874. ifr->ifr_map.irq = dev->irq;
  3875. ifr->ifr_map.dma = dev->dma;
  3876. ifr->ifr_map.port = dev->if_port;
  3877. return 0;
  3878. case SIOCGIFINDEX:
  3879. ifr->ifr_ifindex = dev->ifindex;
  3880. return 0;
  3881. case SIOCGIFTXQLEN:
  3882. ifr->ifr_qlen = dev->tx_queue_len;
  3883. return 0;
  3884. default:
  3885. /* dev_ioctl() should ensure this case
  3886. * is never reached
  3887. */
  3888. WARN_ON(1);
  3889. err = -EINVAL;
  3890. break;
  3891. }
  3892. return err;
  3893. }
  3894. /*
  3895. * Perform the SIOCxIFxxx calls, inside rtnl_lock()
  3896. */
  3897. static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
  3898. {
  3899. int err;
  3900. struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
  3901. const struct net_device_ops *ops;
  3902. if (!dev)
  3903. return -ENODEV;
  3904. ops = dev->netdev_ops;
  3905. switch (cmd) {
  3906. case SIOCSIFFLAGS: /* Set interface flags */
  3907. return dev_change_flags(dev, ifr->ifr_flags);
  3908. case SIOCSIFMETRIC: /* Set the metric on the interface
  3909. (currently unused) */
  3910. return -EOPNOTSUPP;
  3911. case SIOCSIFMTU: /* Set the MTU of a device */
  3912. return dev_set_mtu(dev, ifr->ifr_mtu);
  3913. case SIOCSIFHWADDR:
  3914. return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
  3915. case SIOCSIFHWBROADCAST:
  3916. if (ifr->ifr_hwaddr.sa_family != dev->type)
  3917. return -EINVAL;
  3918. memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
  3919. min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
  3920. call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
  3921. return 0;
  3922. case SIOCSIFMAP:
  3923. if (ops->ndo_set_config) {
  3924. if (!netif_device_present(dev))
  3925. return -ENODEV;
  3926. return ops->ndo_set_config(dev, &ifr->ifr_map);
  3927. }
  3928. return -EOPNOTSUPP;
  3929. case SIOCADDMULTI:
  3930. if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
  3931. ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
  3932. return -EINVAL;
  3933. if (!netif_device_present(dev))
  3934. return -ENODEV;
  3935. return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
  3936. case SIOCDELMULTI:
  3937. if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
  3938. ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
  3939. return -EINVAL;
  3940. if (!netif_device_present(dev))
  3941. return -ENODEV;
  3942. return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
  3943. case SIOCSIFTXQLEN:
  3944. if (ifr->ifr_qlen < 0)
  3945. return -EINVAL;
  3946. dev->tx_queue_len = ifr->ifr_qlen;
  3947. return 0;
  3948. case SIOCSIFNAME:
  3949. ifr->ifr_newname[IFNAMSIZ-1] = '\0';
  3950. return dev_change_name(dev, ifr->ifr_newname);
  3951. /*
  3952. * Unknown or private ioctl
  3953. */
  3954. default:
  3955. if ((cmd >= SIOCDEVPRIVATE &&
  3956. cmd <= SIOCDEVPRIVATE + 15) ||
  3957. cmd == SIOCBONDENSLAVE ||
  3958. cmd == SIOCBONDRELEASE ||
  3959. cmd == SIOCBONDSETHWADDR ||
  3960. cmd == SIOCBONDSLAVEINFOQUERY ||
  3961. cmd == SIOCBONDINFOQUERY ||
  3962. cmd == SIOCBONDCHANGEACTIVE ||
  3963. cmd == SIOCGMIIPHY ||
  3964. cmd == SIOCGMIIREG ||
  3965. cmd == SIOCSMIIREG ||
  3966. cmd == SIOCBRADDIF ||
  3967. cmd == SIOCBRDELIF ||
  3968. cmd == SIOCSHWTSTAMP ||
  3969. cmd == SIOCWANDEV) {
  3970. err = -EOPNOTSUPP;
  3971. if (ops->ndo_do_ioctl) {
  3972. if (netif_device_present(dev))
  3973. err = ops->ndo_do_ioctl(dev, ifr, cmd);
  3974. else
  3975. err = -ENODEV;
  3976. }
  3977. } else
  3978. err = -EINVAL;
  3979. }
  3980. return err;
  3981. }
  3982. /*
  3983. * This function handles all "interface"-type I/O control requests. The actual
  3984. * 'doing' part of this is dev_ifsioc above.
  3985. */
  3986. /**
  3987. * dev_ioctl - network device ioctl
  3988. * @net: the applicable net namespace
  3989. * @cmd: command to issue
  3990. * @arg: pointer to a struct ifreq in user space
  3991. *
  3992. * Issue ioctl functions to devices. This is normally called by the
  3993. * user space syscall interfaces but can sometimes be useful for
  3994. * other purposes. The return value is the return from the syscall if
  3995. * positive or a negative errno code on error.
  3996. */
  3997. int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
  3998. {
  3999. struct ifreq ifr;
  4000. int ret;
  4001. char *colon;
  4002. /* One special case: SIOCGIFCONF takes ifconf argument
  4003. and requires shared lock, because it sleeps writing
  4004. to user space.
  4005. */
  4006. if (cmd == SIOCGIFCONF) {
  4007. rtnl_lock();
  4008. ret = dev_ifconf(net, (char __user *) arg);
  4009. rtnl_unlock();
  4010. return ret;
  4011. }
  4012. if (cmd == SIOCGIFNAME)
  4013. return dev_ifname(net, (struct ifreq __user *)arg);
  4014. if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
  4015. return -EFAULT;
  4016. ifr.ifr_name[IFNAMSIZ-1] = 0;
  4017. colon = strchr(ifr.ifr_name, ':');
  4018. if (colon)
  4019. *colon = 0;
  4020. /*
  4021. * See which interface the caller is talking about.
  4022. */
  4023. switch (cmd) {
  4024. /*
  4025. * These ioctl calls:
  4026. * - can be done by all.
  4027. * - atomic and do not require locking.
  4028. * - return a value
  4029. */
  4030. case SIOCGIFFLAGS:
  4031. case SIOCGIFMETRIC:
  4032. case SIOCGIFMTU:
  4033. case SIOCGIFHWADDR:
  4034. case SIOCGIFSLAVE:
  4035. case SIOCGIFMAP:
  4036. case SIOCGIFINDEX:
  4037. case SIOCGIFTXQLEN:
  4038. dev_load(net, ifr.ifr_name);
  4039. rcu_read_lock();
  4040. ret = dev_ifsioc_locked(net, &ifr, cmd);
  4041. rcu_read_unlock();
  4042. if (!ret) {
  4043. if (colon)
  4044. *colon = ':';
  4045. if (copy_to_user(arg, &ifr,
  4046. sizeof(struct ifreq)))
  4047. ret = -EFAULT;
  4048. }
  4049. return ret;
  4050. case SIOCETHTOOL:
  4051. dev_load(net, ifr.ifr_name);
  4052. rtnl_lock();
  4053. ret = dev_ethtool(net, &ifr);
  4054. rtnl_unlock();
  4055. if (!ret) {
  4056. if (colon)
  4057. *colon = ':';
  4058. if (copy_to_user(arg, &ifr,
  4059. sizeof(struct ifreq)))
  4060. ret = -EFAULT;
  4061. }
  4062. return ret;
  4063. /*
  4064. * These ioctl calls:
  4065. * - require superuser power.
  4066. * - require strict serialization.
  4067. * - return a value
  4068. */
  4069. case SIOCGMIIPHY:
  4070. case SIOCGMIIREG:
  4071. case SIOCSIFNAME:
  4072. if (!capable(CAP_NET_ADMIN))
  4073. return -EPERM;
  4074. dev_load(net, ifr.ifr_name);
  4075. rtnl_lock();
  4076. ret = dev_ifsioc(net, &ifr, cmd);
  4077. rtnl_unlock();
  4078. if (!ret) {
  4079. if (colon)
  4080. *colon = ':';
  4081. if (copy_to_user(arg, &ifr,
  4082. sizeof(struct ifreq)))
  4083. ret = -EFAULT;
  4084. }
  4085. return ret;
  4086. /*
  4087. * These ioctl calls:
  4088. * - require superuser power.
  4089. * - require strict serialization.
  4090. * - do not return a value
  4091. */
  4092. case SIOCSIFFLAGS:
  4093. case SIOCSIFMETRIC:
  4094. case SIOCSIFMTU:
  4095. case SIOCSIFMAP:
  4096. case SIOCSIFHWADDR:
  4097. case SIOCSIFSLAVE:
  4098. case SIOCADDMULTI:
  4099. case SIOCDELMULTI:
  4100. case SIOCSIFHWBROADCAST:
  4101. case SIOCSIFTXQLEN:
  4102. case SIOCSMIIREG:
  4103. case SIOCBONDENSLAVE:
  4104. case SIOCBONDRELEASE:
  4105. case SIOCBONDSETHWADDR:
  4106. case SIOCBONDCHANGEACTIVE:
  4107. case SIOCBRADDIF:
  4108. case SIOCBRDELIF:
  4109. case SIOCSHWTSTAMP:
  4110. if (!capable(CAP_NET_ADMIN))
  4111. return -EPERM;
  4112. /* fall through */
  4113. case SIOCBONDSLAVEINFOQUERY:
  4114. case SIOCBONDINFOQUERY:
  4115. dev_load(net, ifr.ifr_name);
  4116. rtnl_lock();
  4117. ret = dev_ifsioc(net, &ifr, cmd);
  4118. rtnl_unlock();
  4119. return ret;
  4120. case SIOCGIFMEM:
  4121. /* Get the per device memory space. We can add this but
  4122. * currently do not support it */
  4123. case SIOCSIFMEM:
  4124. /* Set the per device memory buffer space.
  4125. * Not applicable in our case */
  4126. case SIOCSIFLINK:
  4127. return -EINVAL;
  4128. /*
  4129. * Unknown or private ioctl.
  4130. */
  4131. default:
  4132. if (cmd == SIOCWANDEV ||
  4133. (cmd >= SIOCDEVPRIVATE &&
  4134. cmd <= SIOCDEVPRIVATE + 15)) {
  4135. dev_load(net, ifr.ifr_name);
  4136. rtnl_lock();
  4137. ret = dev_ifsioc(net, &ifr, cmd);
  4138. rtnl_unlock();
  4139. if (!ret && copy_to_user(arg, &ifr,
  4140. sizeof(struct ifreq)))
  4141. ret = -EFAULT;
  4142. return ret;
  4143. }
  4144. /* Take care of Wireless Extensions */
  4145. if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
  4146. return wext_handle_ioctl(net, &ifr, cmd, arg);
  4147. return -EINVAL;
  4148. }
  4149. }
  4150. /**
  4151. * dev_new_index - allocate an ifindex
  4152. * @net: the applicable net namespace
  4153. *
  4154. * Returns a suitable unique value for a new device interface
  4155. * number. The caller must hold the rtnl semaphore or the
  4156. * dev_base_lock to be sure it remains unique.
  4157. */
  4158. static int dev_new_index(struct net *net)
  4159. {
  4160. static int ifindex;
  4161. for (;;) {
  4162. if (++ifindex <= 0)
  4163. ifindex = 1;
  4164. if (!__dev_get_by_index(net, ifindex))
  4165. return ifindex;
  4166. }
  4167. }
  4168. /* Delayed registration/unregisteration */
  4169. static LIST_HEAD(net_todo_list);
  4170. static void net_set_todo(struct net_device *dev)
  4171. {
  4172. list_add_tail(&dev->todo_list, &net_todo_list);
  4173. }
  4174. static void rollback_registered_many(struct list_head *head)
  4175. {
  4176. struct net_device *dev, *tmp;
  4177. BUG_ON(dev_boot_phase);
  4178. ASSERT_RTNL();
  4179. list_for_each_entry_safe(dev, tmp, head, unreg_list) {
  4180. /* Some devices call without registering
  4181. * for initialization unwind. Remove those
  4182. * devices and proceed with the remaining.
  4183. */
  4184. if (dev->reg_state == NETREG_UNINITIALIZED) {
  4185. pr_debug("unregister_netdevice: device %s/%p never "
  4186. "was registered\n", dev->name, dev);
  4187. WARN_ON(1);
  4188. list_del(&dev->unreg_list);
  4189. continue;
  4190. }
  4191. BUG_ON(dev->reg_state != NETREG_REGISTERED);
  4192. /* If device is running, close it first. */
  4193. dev_close(dev);
  4194. /* And unlink it from device chain. */
  4195. unlist_netdevice(dev);
  4196. dev->reg_state = NETREG_UNREGISTERING;
  4197. }
  4198. synchronize_net();
  4199. list_for_each_entry(dev, head, unreg_list) {
  4200. /* Shutdown queueing discipline. */
  4201. dev_shutdown(dev);
  4202. /* Notify protocols, that we are about to destroy
  4203. this device. They should clean all the things.
  4204. */
  4205. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  4206. if (!dev->rtnl_link_ops ||
  4207. dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
  4208. rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
  4209. /*
  4210. * Flush the unicast and multicast chains
  4211. */
  4212. dev_uc_flush(dev);
  4213. dev_mc_flush(dev);
  4214. if (dev->netdev_ops->ndo_uninit)
  4215. dev->netdev_ops->ndo_uninit(dev);
  4216. /* Notifier chain MUST detach us from master device. */
  4217. WARN_ON(dev->master);
  4218. /* Remove entries from kobject tree */
  4219. netdev_unregister_kobject(dev);
  4220. }
  4221. /* Process any work delayed until the end of the batch */
  4222. dev = list_first_entry(head, struct net_device, unreg_list);
  4223. call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
  4224. rcu_barrier();
  4225. list_for_each_entry(dev, head, unreg_list)
  4226. dev_put(dev);
  4227. }
  4228. static void rollback_registered(struct net_device *dev)
  4229. {
  4230. LIST_HEAD(single);
  4231. list_add(&dev->unreg_list, &single);
  4232. rollback_registered_many(&single);
  4233. }
  4234. unsigned long netdev_fix_features(unsigned long features, const char *name)
  4235. {
  4236. /* Fix illegal SG+CSUM combinations. */
  4237. if ((features & NETIF_F_SG) &&
  4238. !(features & NETIF_F_ALL_CSUM)) {
  4239. if (name)
  4240. printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
  4241. "checksum feature.\n", name);
  4242. features &= ~NETIF_F_SG;
  4243. }
  4244. /* TSO requires that SG is present as well. */
  4245. if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
  4246. if (name)
  4247. printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
  4248. "SG feature.\n", name);
  4249. features &= ~NETIF_F_TSO;
  4250. }
  4251. if (features & NETIF_F_UFO) {
  4252. if (!(features & NETIF_F_GEN_CSUM)) {
  4253. if (name)
  4254. printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
  4255. "since no NETIF_F_HW_CSUM feature.\n",
  4256. name);
  4257. features &= ~NETIF_F_UFO;
  4258. }
  4259. if (!(features & NETIF_F_SG)) {
  4260. if (name)
  4261. printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
  4262. "since no NETIF_F_SG feature.\n", name);
  4263. features &= ~NETIF_F_UFO;
  4264. }
  4265. }
  4266. return features;
  4267. }
  4268. EXPORT_SYMBOL(netdev_fix_features);
  4269. /**
  4270. * netif_stacked_transfer_operstate - transfer operstate
  4271. * @rootdev: the root or lower level device to transfer state from
  4272. * @dev: the device to transfer operstate to
  4273. *
  4274. * Transfer operational state from root to device. This is normally
  4275. * called when a stacking relationship exists between the root
  4276. * device and the device(a leaf device).
  4277. */
  4278. void netif_stacked_transfer_operstate(const struct net_device *rootdev,
  4279. struct net_device *dev)
  4280. {
  4281. if (rootdev->operstate == IF_OPER_DORMANT)
  4282. netif_dormant_on(dev);
  4283. else
  4284. netif_dormant_off(dev);
  4285. if (netif_carrier_ok(rootdev)) {
  4286. if (!netif_carrier_ok(dev))
  4287. netif_carrier_on(dev);
  4288. } else {
  4289. if (netif_carrier_ok(dev))
  4290. netif_carrier_off(dev);
  4291. }
  4292. }
  4293. EXPORT_SYMBOL(netif_stacked_transfer_operstate);
  4294. static int netif_alloc_rx_queues(struct net_device *dev)
  4295. {
  4296. #ifdef CONFIG_RPS
  4297. unsigned int i, count = dev->num_rx_queues;
  4298. struct netdev_rx_queue *rx;
  4299. BUG_ON(count < 1);
  4300. rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
  4301. if (!rx) {
  4302. pr_err("netdev: Unable to allocate %u rx queues.\n", count);
  4303. return -ENOMEM;
  4304. }
  4305. dev->_rx = rx;
  4306. /*
  4307. * Set a pointer to first element in the array which holds the
  4308. * reference count.
  4309. */
  4310. for (i = 0; i < count; i++)
  4311. rx[i].first = rx;
  4312. #endif
  4313. return 0;
  4314. }
  4315. static int netif_alloc_netdev_queues(struct net_device *dev)
  4316. {
  4317. unsigned int count = dev->num_tx_queues;
  4318. struct netdev_queue *tx;
  4319. BUG_ON(count < 1);
  4320. tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
  4321. if (!tx) {
  4322. pr_err("netdev: Unable to allocate %u tx queues.\n",
  4323. count);
  4324. return -ENOMEM;
  4325. }
  4326. dev->_tx = tx;
  4327. return 0;
  4328. }
  4329. static void netdev_init_one_queue(struct net_device *dev,
  4330. struct netdev_queue *queue,
  4331. void *_unused)
  4332. {
  4333. queue->dev = dev;
  4334. /* Initialize queue lock */
  4335. spin_lock_init(&queue->_xmit_lock);
  4336. netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
  4337. queue->xmit_lock_owner = -1;
  4338. }
  4339. static void netdev_init_queues(struct net_device *dev)
  4340. {
  4341. netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
  4342. spin_lock_init(&dev->tx_global_lock);
  4343. }
  4344. /**
  4345. * register_netdevice - register a network device
  4346. * @dev: device to register
  4347. *
  4348. * Take a completed network device structure and add it to the kernel
  4349. * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
  4350. * chain. 0 is returned on success. A negative errno code is returned
  4351. * on a failure to set up the device, or if the name is a duplicate.
  4352. *
  4353. * Callers must hold the rtnl semaphore. You may want
  4354. * register_netdev() instead of this.
  4355. *
  4356. * BUGS:
  4357. * The locking appears insufficient to guarantee two parallel registers
  4358. * will not get the same name.
  4359. */
  4360. int register_netdevice(struct net_device *dev)
  4361. {
  4362. int ret;
  4363. struct net *net = dev_net(dev);
  4364. BUG_ON(dev_boot_phase);
  4365. ASSERT_RTNL();
  4366. might_sleep();
  4367. /* When net_device's are persistent, this will be fatal. */
  4368. BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
  4369. BUG_ON(!net);
  4370. spin_lock_init(&dev->addr_list_lock);
  4371. netdev_set_addr_lockdep_class(dev);
  4372. dev->iflink = -1;
  4373. ret = netif_alloc_rx_queues(dev);
  4374. if (ret)
  4375. goto out;
  4376. netdev_init_queues(dev);
  4377. /* Init, if this function is available */
  4378. if (dev->netdev_ops->ndo_init) {
  4379. ret = dev->netdev_ops->ndo_init(dev);
  4380. if (ret) {
  4381. if (ret > 0)
  4382. ret = -EIO;
  4383. goto out;
  4384. }
  4385. }
  4386. ret = dev_get_valid_name(dev, dev->name, 0);
  4387. if (ret)
  4388. goto err_uninit;
  4389. dev->ifindex = dev_new_index(net);
  4390. if (dev->iflink == -1)
  4391. dev->iflink = dev->ifindex;
  4392. /* Fix illegal checksum combinations */
  4393. if ((dev->features & NETIF_F_HW_CSUM) &&
  4394. (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
  4395. printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
  4396. dev->name);
  4397. dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
  4398. }
  4399. if ((dev->features & NETIF_F_NO_CSUM) &&
  4400. (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
  4401. printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
  4402. dev->name);
  4403. dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
  4404. }
  4405. dev->features = netdev_fix_features(dev->features, dev->name);
  4406. /* Enable software GSO if SG is supported. */
  4407. if (dev->features & NETIF_F_SG)
  4408. dev->features |= NETIF_F_GSO;
  4409. /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
  4410. * vlan_dev_init() will do the dev->features check, so these features
  4411. * are enabled only if supported by underlying device.
  4412. */
  4413. dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA);
  4414. ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
  4415. ret = notifier_to_errno(ret);
  4416. if (ret)
  4417. goto err_uninit;
  4418. ret = netdev_register_kobject(dev);
  4419. if (ret)
  4420. goto err_uninit;
  4421. dev->reg_state = NETREG_REGISTERED;
  4422. /*
  4423. * Default initial state at registry is that the
  4424. * device is present.
  4425. */
  4426. set_bit(__LINK_STATE_PRESENT, &dev->state);
  4427. dev_init_scheduler(dev);
  4428. dev_hold(dev);
  4429. list_netdevice(dev);
  4430. /* Notify protocols, that a new device appeared. */
  4431. ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
  4432. ret = notifier_to_errno(ret);
  4433. if (ret) {
  4434. rollback_registered(dev);
  4435. dev->reg_state = NETREG_UNREGISTERED;
  4436. }
  4437. /*
  4438. * Prevent userspace races by waiting until the network
  4439. * device is fully setup before sending notifications.
  4440. */
  4441. if (!dev->rtnl_link_ops ||
  4442. dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
  4443. rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
  4444. out:
  4445. return ret;
  4446. err_uninit:
  4447. if (dev->netdev_ops->ndo_uninit)
  4448. dev->netdev_ops->ndo_uninit(dev);
  4449. goto out;
  4450. }
  4451. EXPORT_SYMBOL(register_netdevice);
  4452. /**
  4453. * init_dummy_netdev - init a dummy network device for NAPI
  4454. * @dev: device to init
  4455. *
  4456. * This takes a network device structure and initialize the minimum
  4457. * amount of fields so it can be used to schedule NAPI polls without
  4458. * registering a full blown interface. This is to be used by drivers
  4459. * that need to tie several hardware interfaces to a single NAPI
  4460. * poll scheduler due to HW limitations.
  4461. */
  4462. int init_dummy_netdev(struct net_device *dev)
  4463. {
  4464. /* Clear everything. Note we don't initialize spinlocks
  4465. * are they aren't supposed to be taken by any of the
  4466. * NAPI code and this dummy netdev is supposed to be
  4467. * only ever used for NAPI polls
  4468. */
  4469. memset(dev, 0, sizeof(struct net_device));
  4470. /* make sure we BUG if trying to hit standard
  4471. * register/unregister code path
  4472. */
  4473. dev->reg_state = NETREG_DUMMY;
  4474. /* NAPI wants this */
  4475. INIT_LIST_HEAD(&dev->napi_list);
  4476. /* a dummy interface is started by default */
  4477. set_bit(__LINK_STATE_PRESENT, &dev->state);
  4478. set_bit(__LINK_STATE_START, &dev->state);
  4479. /* Note : We dont allocate pcpu_refcnt for dummy devices,
  4480. * because users of this 'device' dont need to change
  4481. * its refcount.
  4482. */
  4483. return 0;
  4484. }
  4485. EXPORT_SYMBOL_GPL(init_dummy_netdev);
  4486. /**
  4487. * register_netdev - register a network device
  4488. * @dev: device to register
  4489. *
  4490. * Take a completed network device structure and add it to the kernel
  4491. * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
  4492. * chain. 0 is returned on success. A negative errno code is returned
  4493. * on a failure to set up the device, or if the name is a duplicate.
  4494. *
  4495. * This is a wrapper around register_netdevice that takes the rtnl semaphore
  4496. * and expands the device name if you passed a format string to
  4497. * alloc_netdev.
  4498. */
  4499. int register_netdev(struct net_device *dev)
  4500. {
  4501. int err;
  4502. rtnl_lock();
  4503. /*
  4504. * If the name is a format string the caller wants us to do a
  4505. * name allocation.
  4506. */
  4507. if (strchr(dev->name, '%')) {
  4508. err = dev_alloc_name(dev, dev->name);
  4509. if (err < 0)
  4510. goto out;
  4511. }
  4512. err = register_netdevice(dev);
  4513. out:
  4514. rtnl_unlock();
  4515. return err;
  4516. }
  4517. EXPORT_SYMBOL(register_netdev);
  4518. int netdev_refcnt_read(const struct net_device *dev)
  4519. {
  4520. int i, refcnt = 0;
  4521. for_each_possible_cpu(i)
  4522. refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
  4523. return refcnt;
  4524. }
  4525. EXPORT_SYMBOL(netdev_refcnt_read);
  4526. /*
  4527. * netdev_wait_allrefs - wait until all references are gone.
  4528. *
  4529. * This is called when unregistering network devices.
  4530. *
  4531. * Any protocol or device that holds a reference should register
  4532. * for netdevice notification, and cleanup and put back the
  4533. * reference if they receive an UNREGISTER event.
  4534. * We can get stuck here if buggy protocols don't correctly
  4535. * call dev_put.
  4536. */
  4537. static void netdev_wait_allrefs(struct net_device *dev)
  4538. {
  4539. unsigned long rebroadcast_time, warning_time;
  4540. int refcnt;
  4541. linkwatch_forget_dev(dev);
  4542. rebroadcast_time = warning_time = jiffies;
  4543. refcnt = netdev_refcnt_read(dev);
  4544. while (refcnt != 0) {
  4545. if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
  4546. rtnl_lock();
  4547. /* Rebroadcast unregister notification */
  4548. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  4549. /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
  4550. * should have already handle it the first time */
  4551. if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
  4552. &dev->state)) {
  4553. /* We must not have linkwatch events
  4554. * pending on unregister. If this
  4555. * happens, we simply run the queue
  4556. * unscheduled, resulting in a noop
  4557. * for this device.
  4558. */
  4559. linkwatch_run_queue();
  4560. }
  4561. __rtnl_unlock();
  4562. rebroadcast_time = jiffies;
  4563. }
  4564. msleep(250);
  4565. refcnt = netdev_refcnt_read(dev);
  4566. if (time_after(jiffies, warning_time + 10 * HZ)) {
  4567. printk(KERN_EMERG "unregister_netdevice: "
  4568. "waiting for %s to become free. Usage "
  4569. "count = %d\n",
  4570. dev->name, refcnt);
  4571. warning_time = jiffies;
  4572. }
  4573. }
  4574. }
  4575. /* The sequence is:
  4576. *
  4577. * rtnl_lock();
  4578. * ...
  4579. * register_netdevice(x1);
  4580. * register_netdevice(x2);
  4581. * ...
  4582. * unregister_netdevice(y1);
  4583. * unregister_netdevice(y2);
  4584. * ...
  4585. * rtnl_unlock();
  4586. * free_netdev(y1);
  4587. * free_netdev(y2);
  4588. *
  4589. * We are invoked by rtnl_unlock().
  4590. * This allows us to deal with problems:
  4591. * 1) We can delete sysfs objects which invoke hotplug
  4592. * without deadlocking with linkwatch via keventd.
  4593. * 2) Since we run with the RTNL semaphore not held, we can sleep
  4594. * safely in order to wait for the netdev refcnt to drop to zero.
  4595. *
  4596. * We must not return until all unregister events added during
  4597. * the interval the lock was held have been completed.
  4598. */
  4599. void netdev_run_todo(void)
  4600. {
  4601. struct list_head list;
  4602. /* Snapshot list, allow later requests */
  4603. list_replace_init(&net_todo_list, &list);
  4604. __rtnl_unlock();
  4605. while (!list_empty(&list)) {
  4606. struct net_device *dev
  4607. = list_first_entry(&list, struct net_device, todo_list);
  4608. list_del(&dev->todo_list);
  4609. if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
  4610. printk(KERN_ERR "network todo '%s' but state %d\n",
  4611. dev->name, dev->reg_state);
  4612. dump_stack();
  4613. continue;
  4614. }
  4615. dev->reg_state = NETREG_UNREGISTERED;
  4616. on_each_cpu(flush_backlog, dev, 1);
  4617. netdev_wait_allrefs(dev);
  4618. /* paranoia */
  4619. BUG_ON(netdev_refcnt_read(dev));
  4620. WARN_ON(rcu_dereference_raw(dev->ip_ptr));
  4621. WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
  4622. WARN_ON(dev->dn_ptr);
  4623. if (dev->destructor)
  4624. dev->destructor(dev);
  4625. /* Free network device */
  4626. kobject_put(&dev->dev.kobj);
  4627. }
  4628. }
  4629. /**
  4630. * dev_txq_stats_fold - fold tx_queues stats
  4631. * @dev: device to get statistics from
  4632. * @stats: struct rtnl_link_stats64 to hold results
  4633. */
  4634. void dev_txq_stats_fold(const struct net_device *dev,
  4635. struct rtnl_link_stats64 *stats)
  4636. {
  4637. u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
  4638. unsigned int i;
  4639. struct netdev_queue *txq;
  4640. for (i = 0; i < dev->num_tx_queues; i++) {
  4641. txq = netdev_get_tx_queue(dev, i);
  4642. spin_lock_bh(&txq->_xmit_lock);
  4643. tx_bytes += txq->tx_bytes;
  4644. tx_packets += txq->tx_packets;
  4645. tx_dropped += txq->tx_dropped;
  4646. spin_unlock_bh(&txq->_xmit_lock);
  4647. }
  4648. if (tx_bytes || tx_packets || tx_dropped) {
  4649. stats->tx_bytes = tx_bytes;
  4650. stats->tx_packets = tx_packets;
  4651. stats->tx_dropped = tx_dropped;
  4652. }
  4653. }
  4654. EXPORT_SYMBOL(dev_txq_stats_fold);
  4655. /* Convert net_device_stats to rtnl_link_stats64. They have the same
  4656. * fields in the same order, with only the type differing.
  4657. */
  4658. static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
  4659. const struct net_device_stats *netdev_stats)
  4660. {
  4661. #if BITS_PER_LONG == 64
  4662. BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
  4663. memcpy(stats64, netdev_stats, sizeof(*stats64));
  4664. #else
  4665. size_t i, n = sizeof(*stats64) / sizeof(u64);
  4666. const unsigned long *src = (const unsigned long *)netdev_stats;
  4667. u64 *dst = (u64 *)stats64;
  4668. BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
  4669. sizeof(*stats64) / sizeof(u64));
  4670. for (i = 0; i < n; i++)
  4671. dst[i] = src[i];
  4672. #endif
  4673. }
  4674. /**
  4675. * dev_get_stats - get network device statistics
  4676. * @dev: device to get statistics from
  4677. * @storage: place to store stats
  4678. *
  4679. * Get network statistics from device. Return @storage.
  4680. * The device driver may provide its own method by setting
  4681. * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
  4682. * otherwise the internal statistics structure is used.
  4683. */
  4684. struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
  4685. struct rtnl_link_stats64 *storage)
  4686. {
  4687. const struct net_device_ops *ops = dev->netdev_ops;
  4688. if (ops->ndo_get_stats64) {
  4689. memset(storage, 0, sizeof(*storage));
  4690. ops->ndo_get_stats64(dev, storage);
  4691. } else if (ops->ndo_get_stats) {
  4692. netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
  4693. } else {
  4694. netdev_stats_to_stats64(storage, &dev->stats);
  4695. dev_txq_stats_fold(dev, storage);
  4696. }
  4697. storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
  4698. return storage;
  4699. }
  4700. EXPORT_SYMBOL(dev_get_stats);
  4701. struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
  4702. {
  4703. struct netdev_queue *queue = dev_ingress_queue(dev);
  4704. #ifdef CONFIG_NET_CLS_ACT
  4705. if (queue)
  4706. return queue;
  4707. queue = kzalloc(sizeof(*queue), GFP_KERNEL);
  4708. if (!queue)
  4709. return NULL;
  4710. netdev_init_one_queue(dev, queue, NULL);
  4711. queue->qdisc = &noop_qdisc;
  4712. queue->qdisc_sleeping = &noop_qdisc;
  4713. rcu_assign_pointer(dev->ingress_queue, queue);
  4714. #endif
  4715. return queue;
  4716. }
  4717. /**
  4718. * alloc_netdev_mq - allocate network device
  4719. * @sizeof_priv: size of private data to allocate space for
  4720. * @name: device name format string
  4721. * @setup: callback to initialize device
  4722. * @queue_count: the number of subqueues to allocate
  4723. *
  4724. * Allocates a struct net_device with private data area for driver use
  4725. * and performs basic initialization. Also allocates subquue structs
  4726. * for each queue on the device at the end of the netdevice.
  4727. */
  4728. struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
  4729. void (*setup)(struct net_device *), unsigned int queue_count)
  4730. {
  4731. struct net_device *dev;
  4732. size_t alloc_size;
  4733. struct net_device *p;
  4734. BUG_ON(strlen(name) >= sizeof(dev->name));
  4735. if (queue_count < 1) {
  4736. pr_err("alloc_netdev: Unable to allocate device "
  4737. "with zero queues.\n");
  4738. return NULL;
  4739. }
  4740. alloc_size = sizeof(struct net_device);
  4741. if (sizeof_priv) {
  4742. /* ensure 32-byte alignment of private area */
  4743. alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
  4744. alloc_size += sizeof_priv;
  4745. }
  4746. /* ensure 32-byte alignment of whole construct */
  4747. alloc_size += NETDEV_ALIGN - 1;
  4748. p = kzalloc(alloc_size, GFP_KERNEL);
  4749. if (!p) {
  4750. printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
  4751. return NULL;
  4752. }
  4753. dev = PTR_ALIGN(p, NETDEV_ALIGN);
  4754. dev->padded = (char *)dev - (char *)p;
  4755. dev->pcpu_refcnt = alloc_percpu(int);
  4756. if (!dev->pcpu_refcnt)
  4757. goto free_p;
  4758. if (dev_addr_init(dev))
  4759. goto free_pcpu;
  4760. dev_mc_init(dev);
  4761. dev_uc_init(dev);
  4762. dev_net_set(dev, &init_net);
  4763. dev->num_tx_queues = queue_count;
  4764. dev->real_num_tx_queues = queue_count;
  4765. if (netif_alloc_netdev_queues(dev))
  4766. goto free_pcpu;
  4767. #ifdef CONFIG_RPS
  4768. dev->num_rx_queues = queue_count;
  4769. dev->real_num_rx_queues = queue_count;
  4770. #endif
  4771. dev->gso_max_size = GSO_MAX_SIZE;
  4772. INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
  4773. dev->ethtool_ntuple_list.count = 0;
  4774. INIT_LIST_HEAD(&dev->napi_list);
  4775. INIT_LIST_HEAD(&dev->unreg_list);
  4776. INIT_LIST_HEAD(&dev->link_watch_list);
  4777. dev->priv_flags = IFF_XMIT_DST_RELEASE;
  4778. setup(dev);
  4779. strcpy(dev->name, name);
  4780. return dev;
  4781. free_pcpu:
  4782. free_percpu(dev->pcpu_refcnt);
  4783. kfree(dev->_tx);
  4784. free_p:
  4785. kfree(p);
  4786. return NULL;
  4787. }
  4788. EXPORT_SYMBOL(alloc_netdev_mq);
  4789. /**
  4790. * free_netdev - free network device
  4791. * @dev: device
  4792. *
  4793. * This function does the last stage of destroying an allocated device
  4794. * interface. The reference to the device object is released.
  4795. * If this is the last reference then it will be freed.
  4796. */
  4797. void free_netdev(struct net_device *dev)
  4798. {
  4799. struct napi_struct *p, *n;
  4800. release_net(dev_net(dev));
  4801. kfree(dev->_tx);
  4802. kfree(rcu_dereference_raw(dev->ingress_queue));
  4803. /* Flush device addresses */
  4804. dev_addr_flush(dev);
  4805. /* Clear ethtool n-tuple list */
  4806. ethtool_ntuple_flush(dev);
  4807. list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
  4808. netif_napi_del(p);
  4809. free_percpu(dev->pcpu_refcnt);
  4810. dev->pcpu_refcnt = NULL;
  4811. /* Compatibility with error handling in drivers */
  4812. if (dev->reg_state == NETREG_UNINITIALIZED) {
  4813. kfree((char *)dev - dev->padded);
  4814. return;
  4815. }
  4816. BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
  4817. dev->reg_state = NETREG_RELEASED;
  4818. /* will free via device release */
  4819. put_device(&dev->dev);
  4820. }
  4821. EXPORT_SYMBOL(free_netdev);
  4822. /**
  4823. * synchronize_net - Synchronize with packet receive processing
  4824. *
  4825. * Wait for packets currently being received to be done.
  4826. * Does not block later packets from starting.
  4827. */
  4828. void synchronize_net(void)
  4829. {
  4830. might_sleep();
  4831. synchronize_rcu();
  4832. }
  4833. EXPORT_SYMBOL(synchronize_net);
  4834. /**
  4835. * unregister_netdevice_queue - remove device from the kernel
  4836. * @dev: device
  4837. * @head: list
  4838. *
  4839. * This function shuts down a device interface and removes it
  4840. * from the kernel tables.
  4841. * If head not NULL, device is queued to be unregistered later.
  4842. *
  4843. * Callers must hold the rtnl semaphore. You may want
  4844. * unregister_netdev() instead of this.
  4845. */
  4846. void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
  4847. {
  4848. ASSERT_RTNL();
  4849. if (head) {
  4850. list_move_tail(&dev->unreg_list, head);
  4851. } else {
  4852. rollback_registered(dev);
  4853. /* Finish processing unregister after unlock */
  4854. net_set_todo(dev);
  4855. }
  4856. }
  4857. EXPORT_SYMBOL(unregister_netdevice_queue);
  4858. /**
  4859. * unregister_netdevice_many - unregister many devices
  4860. * @head: list of devices
  4861. */
  4862. void unregister_netdevice_many(struct list_head *head)
  4863. {
  4864. struct net_device *dev;
  4865. if (!list_empty(head)) {
  4866. rollback_registered_many(head);
  4867. list_for_each_entry(dev, head, unreg_list)
  4868. net_set_todo(dev);
  4869. }
  4870. }
  4871. EXPORT_SYMBOL(unregister_netdevice_many);
  4872. /**
  4873. * unregister_netdev - remove device from the kernel
  4874. * @dev: device
  4875. *
  4876. * This function shuts down a device interface and removes it
  4877. * from the kernel tables.
  4878. *
  4879. * This is just a wrapper for unregister_netdevice that takes
  4880. * the rtnl semaphore. In general you want to use this and not
  4881. * unregister_netdevice.
  4882. */
  4883. void unregister_netdev(struct net_device *dev)
  4884. {
  4885. rtnl_lock();
  4886. unregister_netdevice(dev);
  4887. rtnl_unlock();
  4888. }
  4889. EXPORT_SYMBOL(unregister_netdev);
  4890. /**
  4891. * dev_change_net_namespace - move device to different nethost namespace
  4892. * @dev: device
  4893. * @net: network namespace
  4894. * @pat: If not NULL name pattern to try if the current device name
  4895. * is already taken in the destination network namespace.
  4896. *
  4897. * This function shuts down a device interface and moves it
  4898. * to a new network namespace. On success 0 is returned, on
  4899. * a failure a netagive errno code is returned.
  4900. *
  4901. * Callers must hold the rtnl semaphore.
  4902. */
  4903. int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
  4904. {
  4905. int err;
  4906. ASSERT_RTNL();
  4907. /* Don't allow namespace local devices to be moved. */
  4908. err = -EINVAL;
  4909. if (dev->features & NETIF_F_NETNS_LOCAL)
  4910. goto out;
  4911. /* Ensure the device has been registrered */
  4912. err = -EINVAL;
  4913. if (dev->reg_state != NETREG_REGISTERED)
  4914. goto out;
  4915. /* Get out if there is nothing todo */
  4916. err = 0;
  4917. if (net_eq(dev_net(dev), net))
  4918. goto out;
  4919. /* Pick the destination device name, and ensure
  4920. * we can use it in the destination network namespace.
  4921. */
  4922. err = -EEXIST;
  4923. if (__dev_get_by_name(net, dev->name)) {
  4924. /* We get here if we can't use the current device name */
  4925. if (!pat)
  4926. goto out;
  4927. if (dev_get_valid_name(dev, pat, 1))
  4928. goto out;
  4929. }
  4930. /*
  4931. * And now a mini version of register_netdevice unregister_netdevice.
  4932. */
  4933. /* If device is running close it first. */
  4934. dev_close(dev);
  4935. /* And unlink it from device chain */
  4936. err = -ENODEV;
  4937. unlist_netdevice(dev);
  4938. synchronize_net();
  4939. /* Shutdown queueing discipline. */
  4940. dev_shutdown(dev);
  4941. /* Notify protocols, that we are about to destroy
  4942. this device. They should clean all the things.
  4943. Note that dev->reg_state stays at NETREG_REGISTERED.
  4944. This is wanted because this way 8021q and macvlan know
  4945. the device is just moving and can keep their slaves up.
  4946. */
  4947. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  4948. call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
  4949. /*
  4950. * Flush the unicast and multicast chains
  4951. */
  4952. dev_uc_flush(dev);
  4953. dev_mc_flush(dev);
  4954. /* Actually switch the network namespace */
  4955. dev_net_set(dev, net);
  4956. /* If there is an ifindex conflict assign a new one */
  4957. if (__dev_get_by_index(net, dev->ifindex)) {
  4958. int iflink = (dev->iflink == dev->ifindex);
  4959. dev->ifindex = dev_new_index(net);
  4960. if (iflink)
  4961. dev->iflink = dev->ifindex;
  4962. }
  4963. /* Fixup kobjects */
  4964. err = device_rename(&dev->dev, dev->name);
  4965. WARN_ON(err);
  4966. /* Add the device back in the hashes */
  4967. list_netdevice(dev);
  4968. /* Notify protocols, that a new device appeared. */
  4969. call_netdevice_notifiers(NETDEV_REGISTER, dev);
  4970. /*
  4971. * Prevent userspace races by waiting until the network
  4972. * device is fully setup before sending notifications.
  4973. */
  4974. rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
  4975. synchronize_net();
  4976. err = 0;
  4977. out:
  4978. return err;
  4979. }
  4980. EXPORT_SYMBOL_GPL(dev_change_net_namespace);
  4981. static int dev_cpu_callback(struct notifier_block *nfb,
  4982. unsigned long action,
  4983. void *ocpu)
  4984. {
  4985. struct sk_buff **list_skb;
  4986. struct sk_buff *skb;
  4987. unsigned int cpu, oldcpu = (unsigned long)ocpu;
  4988. struct softnet_data *sd, *oldsd;
  4989. if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
  4990. return NOTIFY_OK;
  4991. local_irq_disable();
  4992. cpu = smp_processor_id();
  4993. sd = &per_cpu(softnet_data, cpu);
  4994. oldsd = &per_cpu(softnet_data, oldcpu);
  4995. /* Find end of our completion_queue. */
  4996. list_skb = &sd->completion_queue;
  4997. while (*list_skb)
  4998. list_skb = &(*list_skb)->next;
  4999. /* Append completion queue from offline CPU. */
  5000. *list_skb = oldsd->completion_queue;
  5001. oldsd->completion_queue = NULL;
  5002. /* Append output queue from offline CPU. */
  5003. if (oldsd->output_queue) {
  5004. *sd->output_queue_tailp = oldsd->output_queue;
  5005. sd->output_queue_tailp = oldsd->output_queue_tailp;
  5006. oldsd->output_queue = NULL;
  5007. oldsd->output_queue_tailp = &oldsd->output_queue;
  5008. }
  5009. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  5010. local_irq_enable();
  5011. /* Process offline CPU's input_pkt_queue */
  5012. while ((skb = __skb_dequeue(&oldsd->process_queue))) {
  5013. netif_rx(skb);
  5014. input_queue_head_incr(oldsd);
  5015. }
  5016. while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
  5017. netif_rx(skb);
  5018. input_queue_head_incr(oldsd);
  5019. }
  5020. return NOTIFY_OK;
  5021. }
  5022. /**
  5023. * netdev_increment_features - increment feature set by one
  5024. * @all: current feature set
  5025. * @one: new feature set
  5026. * @mask: mask feature set
  5027. *
  5028. * Computes a new feature set after adding a device with feature set
  5029. * @one to the master device with current feature set @all. Will not
  5030. * enable anything that is off in @mask. Returns the new feature set.
  5031. */
  5032. unsigned long netdev_increment_features(unsigned long all, unsigned long one,
  5033. unsigned long mask)
  5034. {
  5035. /* If device needs checksumming, downgrade to it. */
  5036. if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
  5037. all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
  5038. else if (mask & NETIF_F_ALL_CSUM) {
  5039. /* If one device supports v4/v6 checksumming, set for all. */
  5040. if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
  5041. !(all & NETIF_F_GEN_CSUM)) {
  5042. all &= ~NETIF_F_ALL_CSUM;
  5043. all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
  5044. }
  5045. /* If one device supports hw checksumming, set for all. */
  5046. if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
  5047. all &= ~NETIF_F_ALL_CSUM;
  5048. all |= NETIF_F_HW_CSUM;
  5049. }
  5050. }
  5051. one |= NETIF_F_ALL_CSUM;
  5052. one |= all & NETIF_F_ONE_FOR_ALL;
  5053. all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
  5054. all |= one & mask & NETIF_F_ONE_FOR_ALL;
  5055. return all;
  5056. }
  5057. EXPORT_SYMBOL(netdev_increment_features);
  5058. static struct hlist_head *netdev_create_hash(void)
  5059. {
  5060. int i;
  5061. struct hlist_head *hash;
  5062. hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
  5063. if (hash != NULL)
  5064. for (i = 0; i < NETDEV_HASHENTRIES; i++)
  5065. INIT_HLIST_HEAD(&hash[i]);
  5066. return hash;
  5067. }
  5068. /* Initialize per network namespace state */
  5069. static int __net_init netdev_init(struct net *net)
  5070. {
  5071. INIT_LIST_HEAD(&net->dev_base_head);
  5072. net->dev_name_head = netdev_create_hash();
  5073. if (net->dev_name_head == NULL)
  5074. goto err_name;
  5075. net->dev_index_head = netdev_create_hash();
  5076. if (net->dev_index_head == NULL)
  5077. goto err_idx;
  5078. return 0;
  5079. err_idx:
  5080. kfree(net->dev_name_head);
  5081. err_name:
  5082. return -ENOMEM;
  5083. }
  5084. /**
  5085. * netdev_drivername - network driver for the device
  5086. * @dev: network device
  5087. * @buffer: buffer for resulting name
  5088. * @len: size of buffer
  5089. *
  5090. * Determine network driver for device.
  5091. */
  5092. char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
  5093. {
  5094. const struct device_driver *driver;
  5095. const struct device *parent;
  5096. if (len <= 0 || !buffer)
  5097. return buffer;
  5098. buffer[0] = 0;
  5099. parent = dev->dev.parent;
  5100. if (!parent)
  5101. return buffer;
  5102. driver = parent->driver;
  5103. if (driver && driver->name)
  5104. strlcpy(buffer, driver->name, len);
  5105. return buffer;
  5106. }
  5107. static int __netdev_printk(const char *level, const struct net_device *dev,
  5108. struct va_format *vaf)
  5109. {
  5110. int r;
  5111. if (dev && dev->dev.parent)
  5112. r = dev_printk(level, dev->dev.parent, "%s: %pV",
  5113. netdev_name(dev), vaf);
  5114. else if (dev)
  5115. r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
  5116. else
  5117. r = printk("%s(NULL net_device): %pV", level, vaf);
  5118. return r;
  5119. }
  5120. int netdev_printk(const char *level, const struct net_device *dev,
  5121. const char *format, ...)
  5122. {
  5123. struct va_format vaf;
  5124. va_list args;
  5125. int r;
  5126. va_start(args, format);
  5127. vaf.fmt = format;
  5128. vaf.va = &args;
  5129. r = __netdev_printk(level, dev, &vaf);
  5130. va_end(args);
  5131. return r;
  5132. }
  5133. EXPORT_SYMBOL(netdev_printk);
  5134. #define define_netdev_printk_level(func, level) \
  5135. int func(const struct net_device *dev, const char *fmt, ...) \
  5136. { \
  5137. int r; \
  5138. struct va_format vaf; \
  5139. va_list args; \
  5140. \
  5141. va_start(args, fmt); \
  5142. \
  5143. vaf.fmt = fmt; \
  5144. vaf.va = &args; \
  5145. \
  5146. r = __netdev_printk(level, dev, &vaf); \
  5147. va_end(args); \
  5148. \
  5149. return r; \
  5150. } \
  5151. EXPORT_SYMBOL(func);
  5152. define_netdev_printk_level(netdev_emerg, KERN_EMERG);
  5153. define_netdev_printk_level(netdev_alert, KERN_ALERT);
  5154. define_netdev_printk_level(netdev_crit, KERN_CRIT);
  5155. define_netdev_printk_level(netdev_err, KERN_ERR);
  5156. define_netdev_printk_level(netdev_warn, KERN_WARNING);
  5157. define_netdev_printk_level(netdev_notice, KERN_NOTICE);
  5158. define_netdev_printk_level(netdev_info, KERN_INFO);
  5159. static void __net_exit netdev_exit(struct net *net)
  5160. {
  5161. kfree(net->dev_name_head);
  5162. kfree(net->dev_index_head);
  5163. }
  5164. static struct pernet_operations __net_initdata netdev_net_ops = {
  5165. .init = netdev_init,
  5166. .exit = netdev_exit,
  5167. };
  5168. static void __net_exit default_device_exit(struct net *net)
  5169. {
  5170. struct net_device *dev, *aux;
  5171. /*
  5172. * Push all migratable network devices back to the
  5173. * initial network namespace
  5174. */
  5175. rtnl_lock();
  5176. for_each_netdev_safe(net, dev, aux) {
  5177. int err;
  5178. char fb_name[IFNAMSIZ];
  5179. /* Ignore unmoveable devices (i.e. loopback) */
  5180. if (dev->features & NETIF_F_NETNS_LOCAL)
  5181. continue;
  5182. /* Leave virtual devices for the generic cleanup */
  5183. if (dev->rtnl_link_ops)
  5184. continue;
  5185. /* Push remaing network devices to init_net */
  5186. snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
  5187. err = dev_change_net_namespace(dev, &init_net, fb_name);
  5188. if (err) {
  5189. printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
  5190. __func__, dev->name, err);
  5191. BUG();
  5192. }
  5193. }
  5194. rtnl_unlock();
  5195. }
  5196. static void __net_exit default_device_exit_batch(struct list_head *net_list)
  5197. {
  5198. /* At exit all network devices most be removed from a network
  5199. * namespace. Do this in the reverse order of registeration.
  5200. * Do this across as many network namespaces as possible to
  5201. * improve batching efficiency.
  5202. */
  5203. struct net_device *dev;
  5204. struct net *net;
  5205. LIST_HEAD(dev_kill_list);
  5206. rtnl_lock();
  5207. list_for_each_entry(net, net_list, exit_list) {
  5208. for_each_netdev_reverse(net, dev) {
  5209. if (dev->rtnl_link_ops)
  5210. dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
  5211. else
  5212. unregister_netdevice_queue(dev, &dev_kill_list);
  5213. }
  5214. }
  5215. unregister_netdevice_many(&dev_kill_list);
  5216. rtnl_unlock();
  5217. }
  5218. static struct pernet_operations __net_initdata default_device_ops = {
  5219. .exit = default_device_exit,
  5220. .exit_batch = default_device_exit_batch,
  5221. };
  5222. /*
  5223. * Initialize the DEV module. At boot time this walks the device list and
  5224. * unhooks any devices that fail to initialise (normally hardware not
  5225. * present) and leaves us with a valid list of present and active devices.
  5226. *
  5227. */
  5228. /*
  5229. * This is called single threaded during boot, so no need
  5230. * to take the rtnl semaphore.
  5231. */
  5232. static int __init net_dev_init(void)
  5233. {
  5234. int i, rc = -ENOMEM;
  5235. BUG_ON(!dev_boot_phase);
  5236. if (dev_proc_init())
  5237. goto out;
  5238. if (netdev_kobject_init())
  5239. goto out;
  5240. INIT_LIST_HEAD(&ptype_all);
  5241. for (i = 0; i < PTYPE_HASH_SIZE; i++)
  5242. INIT_LIST_HEAD(&ptype_base[i]);
  5243. if (register_pernet_subsys(&netdev_net_ops))
  5244. goto out;
  5245. /*
  5246. * Initialise the packet receive queues.
  5247. */
  5248. for_each_possible_cpu(i) {
  5249. struct softnet_data *sd = &per_cpu(softnet_data, i);
  5250. memset(sd, 0, sizeof(*sd));
  5251. skb_queue_head_init(&sd->input_pkt_queue);
  5252. skb_queue_head_init(&sd->process_queue);
  5253. sd->completion_queue = NULL;
  5254. INIT_LIST_HEAD(&sd->poll_list);
  5255. sd->output_queue = NULL;
  5256. sd->output_queue_tailp = &sd->output_queue;
  5257. #ifdef CONFIG_RPS
  5258. sd->csd.func = rps_trigger_softirq;
  5259. sd->csd.info = sd;
  5260. sd->csd.flags = 0;
  5261. sd->cpu = i;
  5262. #endif
  5263. sd->backlog.poll = process_backlog;
  5264. sd->backlog.weight = weight_p;
  5265. sd->backlog.gro_list = NULL;
  5266. sd->backlog.gro_count = 0;
  5267. }
  5268. dev_boot_phase = 0;
  5269. /* The loopback device is special if any other network devices
  5270. * is present in a network namespace the loopback device must
  5271. * be present. Since we now dynamically allocate and free the
  5272. * loopback device ensure this invariant is maintained by
  5273. * keeping the loopback device as the first device on the
  5274. * list of network devices. Ensuring the loopback devices
  5275. * is the first device that appears and the last network device
  5276. * that disappears.
  5277. */
  5278. if (register_pernet_device(&loopback_net_ops))
  5279. goto out;
  5280. if (register_pernet_device(&default_device_ops))
  5281. goto out;
  5282. open_softirq(NET_TX_SOFTIRQ, net_tx_action);
  5283. open_softirq(NET_RX_SOFTIRQ, net_rx_action);
  5284. hotcpu_notifier(dev_cpu_callback, 0);
  5285. dst_init();
  5286. dev_mcast_init();
  5287. rc = 0;
  5288. out:
  5289. return rc;
  5290. }
  5291. subsys_initcall(net_dev_init);
  5292. static int __init initialize_hashrnd(void)
  5293. {
  5294. get_random_bytes(&hashrnd, sizeof(hashrnd));
  5295. return 0;
  5296. }
  5297. late_initcall_sync(initialize_hashrnd);