dev.c 160 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612
  1. /*
  2. * NET3 Protocol independent device support routines.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Derived from the non IP parts of dev.c 1.0.19
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  13. *
  14. * Additional Authors:
  15. * Florian la Roche <rzsfl@rz.uni-sb.de>
  16. * Alan Cox <gw4pts@gw4pts.ampr.org>
  17. * David Hinds <dahinds@users.sourceforge.net>
  18. * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  19. * Adam Sulmicki <adam@cfar.umd.edu>
  20. * Pekka Riikonen <priikone@poesidon.pspt.fi>
  21. *
  22. * Changes:
  23. * D.J. Barrow : Fixed bug where dev->refcnt gets set
  24. * to 2 if register_netdev gets called
  25. * before net_dev_init & also removed a
  26. * few lines of code in the process.
  27. * Alan Cox : device private ioctl copies fields back.
  28. * Alan Cox : Transmit queue code does relevant
  29. * stunts to keep the queue safe.
  30. * Alan Cox : Fixed double lock.
  31. * Alan Cox : Fixed promisc NULL pointer trap
  32. * ???????? : Support the full private ioctl range
  33. * Alan Cox : Moved ioctl permission check into
  34. * drivers
  35. * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
  36. * Alan Cox : 100 backlog just doesn't cut it when
  37. * you start doing multicast video 8)
  38. * Alan Cox : Rewrote net_bh and list manager.
  39. * Alan Cox : Fix ETH_P_ALL echoback lengths.
  40. * Alan Cox : Took out transmit every packet pass
  41. * Saved a few bytes in the ioctl handler
  42. * Alan Cox : Network driver sets packet type before
  43. * calling netif_rx. Saves a function
  44. * call a packet.
  45. * Alan Cox : Hashed net_bh()
  46. * Richard Kooijman: Timestamp fixes.
  47. * Alan Cox : Wrong field in SIOCGIFDSTADDR
  48. * Alan Cox : Device lock protection.
  49. * Alan Cox : Fixed nasty side effect of device close
  50. * changes.
  51. * Rudi Cilibrasi : Pass the right thing to
  52. * set_mac_address()
  53. * Dave Miller : 32bit quantity for the device lock to
  54. * make it work out on a Sparc.
  55. * Bjorn Ekwall : Added KERNELD hack.
  56. * Alan Cox : Cleaned up the backlog initialise.
  57. * Craig Metz : SIOCGIFCONF fix if space for under
  58. * 1 device.
  59. * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
  60. * is no device open function.
  61. * Andi Kleen : Fix error reporting for SIOCGIFCONF
  62. * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
  63. * Cyrus Durgin : Cleaned for KMOD
  64. * Adam Sulmicki : Bug Fix : Network Device Unload
  65. * A network device unload needs to purge
  66. * the backlog queue.
  67. * Paul Rusty Russell : SIOCSIFNAME
  68. * Pekka Riikonen : Netdev boot-time settings code
  69. * Andrew Morton : Make unregister_netdevice wait
  70. * indefinitely on dev->refcnt
  71. * J Hadi Salim : - Backlog queue sampling
  72. * - netif_rx() feedback
  73. */
  74. #include <asm/uaccess.h>
  75. #include <linux/bitops.h>
  76. #include <linux/capability.h>
  77. #include <linux/cpu.h>
  78. #include <linux/types.h>
  79. #include <linux/kernel.h>
  80. #include <linux/hash.h>
  81. #include <linux/slab.h>
  82. #include <linux/sched.h>
  83. #include <linux/mutex.h>
  84. #include <linux/string.h>
  85. #include <linux/mm.h>
  86. #include <linux/socket.h>
  87. #include <linux/sockios.h>
  88. #include <linux/errno.h>
  89. #include <linux/interrupt.h>
  90. #include <linux/if_ether.h>
  91. #include <linux/netdevice.h>
  92. #include <linux/etherdevice.h>
  93. #include <linux/ethtool.h>
  94. #include <linux/notifier.h>
  95. #include <linux/skbuff.h>
  96. #include <net/net_namespace.h>
  97. #include <net/sock.h>
  98. #include <linux/rtnetlink.h>
  99. #include <linux/proc_fs.h>
  100. #include <linux/seq_file.h>
  101. #include <linux/stat.h>
  102. #include <net/dst.h>
  103. #include <net/pkt_sched.h>
  104. #include <net/checksum.h>
  105. #include <net/xfrm.h>
  106. #include <linux/highmem.h>
  107. #include <linux/init.h>
  108. #include <linux/kmod.h>
  109. #include <linux/module.h>
  110. #include <linux/netpoll.h>
  111. #include <linux/rcupdate.h>
  112. #include <linux/delay.h>
  113. #include <net/wext.h>
  114. #include <net/iw_handler.h>
  115. #include <asm/current.h>
  116. #include <linux/audit.h>
  117. #include <linux/dmaengine.h>
  118. #include <linux/err.h>
  119. #include <linux/ctype.h>
  120. #include <linux/if_arp.h>
  121. #include <linux/if_vlan.h>
  122. #include <linux/ip.h>
  123. #include <net/ip.h>
  124. #include <linux/ipv6.h>
  125. #include <linux/in.h>
  126. #include <linux/jhash.h>
  127. #include <linux/random.h>
  128. #include <trace/events/napi.h>
  129. #include <trace/events/net.h>
  130. #include <trace/events/skb.h>
  131. #include <linux/pci.h>
  132. #include <linux/inetdevice.h>
  133. #include <linux/cpu_rmap.h>
  134. #include <linux/net_tstamp.h>
  135. #include <linux/static_key.h>
  136. #include <net/flow_keys.h>
  137. #include "net-sysfs.h"
  138. /* Instead of increasing this, you should create a hash table. */
  139. #define MAX_GRO_SKBS 8
  140. /* This should be increased if a protocol with a bigger head is added. */
  141. #define GRO_MAX_HEAD (MAX_HEADER + 128)
  142. /*
  143. * The list of packet types we will receive (as opposed to discard)
  144. * and the routines to invoke.
  145. *
  146. * Why 16. Because with 16 the only overlap we get on a hash of the
  147. * low nibble of the protocol value is RARP/SNAP/X.25.
  148. *
  149. * NOTE: That is no longer true with the addition of VLAN tags. Not
  150. * sure which should go first, but I bet it won't make much
  151. * difference if we are running VLANs. The good news is that
  152. * this protocol won't be in the list unless compiled in, so
  153. * the average user (w/out VLANs) will not be adversely affected.
  154. * --BLG
  155. *
  156. * 0800 IP
  157. * 8100 802.1Q VLAN
  158. * 0001 802.3
  159. * 0002 AX.25
  160. * 0004 802.2
  161. * 8035 RARP
  162. * 0005 SNAP
  163. * 0805 X.25
  164. * 0806 ARP
  165. * 8137 IPX
  166. * 0009 Localtalk
  167. * 86DD IPv6
  168. */
  169. #define PTYPE_HASH_SIZE (16)
  170. #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
  171. static DEFINE_SPINLOCK(ptype_lock);
  172. static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
  173. static struct list_head ptype_all __read_mostly; /* Taps */
  174. /*
  175. * The @dev_base_head list is protected by @dev_base_lock and the rtnl
  176. * semaphore.
  177. *
  178. * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
  179. *
  180. * Writers must hold the rtnl semaphore while they loop through the
  181. * dev_base_head list, and hold dev_base_lock for writing when they do the
  182. * actual updates. This allows pure readers to access the list even
  183. * while a writer is preparing to update it.
  184. *
  185. * To put it another way, dev_base_lock is held for writing only to
  186. * protect against pure readers; the rtnl semaphore provides the
  187. * protection against other writers.
  188. *
  189. * See, for example usages, register_netdevice() and
  190. * unregister_netdevice(), which must be called with the rtnl
  191. * semaphore held.
  192. */
  193. DEFINE_RWLOCK(dev_base_lock);
  194. EXPORT_SYMBOL(dev_base_lock);
  195. static inline void dev_base_seq_inc(struct net *net)
  196. {
  197. while (++net->dev_base_seq == 0);
  198. }
  199. static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
  200. {
  201. unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
  202. return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
  203. }
  204. static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
  205. {
  206. return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
  207. }
  208. static inline void rps_lock(struct softnet_data *sd)
  209. {
  210. #ifdef CONFIG_RPS
  211. spin_lock(&sd->input_pkt_queue.lock);
  212. #endif
  213. }
  214. static inline void rps_unlock(struct softnet_data *sd)
  215. {
  216. #ifdef CONFIG_RPS
  217. spin_unlock(&sd->input_pkt_queue.lock);
  218. #endif
  219. }
  220. /* Device list insertion */
  221. static int list_netdevice(struct net_device *dev)
  222. {
  223. struct net *net = dev_net(dev);
  224. ASSERT_RTNL();
  225. write_lock_bh(&dev_base_lock);
  226. list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
  227. hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
  228. hlist_add_head_rcu(&dev->index_hlist,
  229. dev_index_hash(net, dev->ifindex));
  230. write_unlock_bh(&dev_base_lock);
  231. dev_base_seq_inc(net);
  232. return 0;
  233. }
  234. /* Device list removal
  235. * caller must respect a RCU grace period before freeing/reusing dev
  236. */
  237. static void unlist_netdevice(struct net_device *dev)
  238. {
  239. ASSERT_RTNL();
  240. /* Unlink dev from the device chain */
  241. write_lock_bh(&dev_base_lock);
  242. list_del_rcu(&dev->dev_list);
  243. hlist_del_rcu(&dev->name_hlist);
  244. hlist_del_rcu(&dev->index_hlist);
  245. write_unlock_bh(&dev_base_lock);
  246. dev_base_seq_inc(dev_net(dev));
  247. }
  248. /*
  249. * Our notifier list
  250. */
  251. static RAW_NOTIFIER_HEAD(netdev_chain);
  252. /*
  253. * Device drivers call our routines to queue packets here. We empty the
  254. * queue in the local softnet handler.
  255. */
  256. DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
  257. EXPORT_PER_CPU_SYMBOL(softnet_data);
  258. #ifdef CONFIG_LOCKDEP
  259. /*
  260. * register_netdevice() inits txq->_xmit_lock and sets lockdep class
  261. * according to dev->type
  262. */
  263. static const unsigned short netdev_lock_type[] =
  264. {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
  265. ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
  266. ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
  267. ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
  268. ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
  269. ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
  270. ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
  271. ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
  272. ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
  273. ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
  274. ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
  275. ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
  276. ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
  277. ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
  278. ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
  279. ARPHRD_VOID, ARPHRD_NONE};
  280. static const char *const netdev_lock_name[] =
  281. {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
  282. "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
  283. "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
  284. "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
  285. "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
  286. "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
  287. "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
  288. "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
  289. "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
  290. "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
  291. "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
  292. "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
  293. "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
  294. "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
  295. "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
  296. "_xmit_VOID", "_xmit_NONE"};
  297. static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
  298. static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
  299. static inline unsigned short netdev_lock_pos(unsigned short dev_type)
  300. {
  301. int i;
  302. for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
  303. if (netdev_lock_type[i] == dev_type)
  304. return i;
  305. /* the last key is used by default */
  306. return ARRAY_SIZE(netdev_lock_type) - 1;
  307. }
  308. static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
  309. unsigned short dev_type)
  310. {
  311. int i;
  312. i = netdev_lock_pos(dev_type);
  313. lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
  314. netdev_lock_name[i]);
  315. }
  316. static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
  317. {
  318. int i;
  319. i = netdev_lock_pos(dev->type);
  320. lockdep_set_class_and_name(&dev->addr_list_lock,
  321. &netdev_addr_lock_key[i],
  322. netdev_lock_name[i]);
  323. }
  324. #else
  325. static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
  326. unsigned short dev_type)
  327. {
  328. }
  329. static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
  330. {
  331. }
  332. #endif
  333. /*******************************************************************************
  334. Protocol management and registration routines
  335. *******************************************************************************/
  336. /*
  337. * Add a protocol ID to the list. Now that the input handler is
  338. * smarter we can dispense with all the messy stuff that used to be
  339. * here.
  340. *
  341. * BEWARE!!! Protocol handlers, mangling input packets,
  342. * MUST BE last in hash buckets and checking protocol handlers
  343. * MUST start from promiscuous ptype_all chain in net_bh.
  344. * It is true now, do not change it.
  345. * Explanation follows: if protocol handler, mangling packet, will
  346. * be the first on list, it is not able to sense, that packet
  347. * is cloned and should be copied-on-write, so that it will
  348. * change it and subsequent readers will get broken packet.
  349. * --ANK (980803)
  350. */
  351. static inline struct list_head *ptype_head(const struct packet_type *pt)
  352. {
  353. if (pt->type == htons(ETH_P_ALL))
  354. return &ptype_all;
  355. else
  356. return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
  357. }
  358. /**
  359. * dev_add_pack - add packet handler
  360. * @pt: packet type declaration
  361. *
  362. * Add a protocol handler to the networking stack. The passed &packet_type
  363. * is linked into kernel lists and may not be freed until it has been
  364. * removed from the kernel lists.
  365. *
  366. * This call does not sleep therefore it can not
  367. * guarantee all CPU's that are in middle of receiving packets
  368. * will see the new packet type (until the next received packet).
  369. */
  370. void dev_add_pack(struct packet_type *pt)
  371. {
  372. struct list_head *head = ptype_head(pt);
  373. spin_lock(&ptype_lock);
  374. list_add_rcu(&pt->list, head);
  375. spin_unlock(&ptype_lock);
  376. }
  377. EXPORT_SYMBOL(dev_add_pack);
  378. /**
  379. * __dev_remove_pack - remove packet handler
  380. * @pt: packet type declaration
  381. *
  382. * Remove a protocol handler that was previously added to the kernel
  383. * protocol handlers by dev_add_pack(). The passed &packet_type is removed
  384. * from the kernel lists and can be freed or reused once this function
  385. * returns.
  386. *
  387. * The packet type might still be in use by receivers
  388. * and must not be freed until after all the CPU's have gone
  389. * through a quiescent state.
  390. */
  391. void __dev_remove_pack(struct packet_type *pt)
  392. {
  393. struct list_head *head = ptype_head(pt);
  394. struct packet_type *pt1;
  395. spin_lock(&ptype_lock);
  396. list_for_each_entry(pt1, head, list) {
  397. if (pt == pt1) {
  398. list_del_rcu(&pt->list);
  399. goto out;
  400. }
  401. }
  402. pr_warn("dev_remove_pack: %p not found\n", pt);
  403. out:
  404. spin_unlock(&ptype_lock);
  405. }
  406. EXPORT_SYMBOL(__dev_remove_pack);
  407. /**
  408. * dev_remove_pack - remove packet handler
  409. * @pt: packet type declaration
  410. *
  411. * Remove a protocol handler that was previously added to the kernel
  412. * protocol handlers by dev_add_pack(). The passed &packet_type is removed
  413. * from the kernel lists and can be freed or reused once this function
  414. * returns.
  415. *
  416. * This call sleeps to guarantee that no CPU is looking at the packet
  417. * type after return.
  418. */
  419. void dev_remove_pack(struct packet_type *pt)
  420. {
  421. __dev_remove_pack(pt);
  422. synchronize_net();
  423. }
  424. EXPORT_SYMBOL(dev_remove_pack);
  425. /******************************************************************************
  426. Device Boot-time Settings Routines
  427. *******************************************************************************/
  428. /* Boot time configuration table */
  429. static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
  430. /**
  431. * netdev_boot_setup_add - add new setup entry
  432. * @name: name of the device
  433. * @map: configured settings for the device
  434. *
  435. * Adds new setup entry to the dev_boot_setup list. The function
  436. * returns 0 on error and 1 on success. This is a generic routine to
  437. * all netdevices.
  438. */
  439. static int netdev_boot_setup_add(char *name, struct ifmap *map)
  440. {
  441. struct netdev_boot_setup *s;
  442. int i;
  443. s = dev_boot_setup;
  444. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
  445. if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
  446. memset(s[i].name, 0, sizeof(s[i].name));
  447. strlcpy(s[i].name, name, IFNAMSIZ);
  448. memcpy(&s[i].map, map, sizeof(s[i].map));
  449. break;
  450. }
  451. }
  452. return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
  453. }
  454. /**
  455. * netdev_boot_setup_check - check boot time settings
  456. * @dev: the netdevice
  457. *
  458. * Check boot time settings for the device.
  459. * The found settings are set for the device to be used
  460. * later in the device probing.
  461. * Returns 0 if no settings found, 1 if they are.
  462. */
  463. int netdev_boot_setup_check(struct net_device *dev)
  464. {
  465. struct netdev_boot_setup *s = dev_boot_setup;
  466. int i;
  467. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
  468. if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
  469. !strcmp(dev->name, s[i].name)) {
  470. dev->irq = s[i].map.irq;
  471. dev->base_addr = s[i].map.base_addr;
  472. dev->mem_start = s[i].map.mem_start;
  473. dev->mem_end = s[i].map.mem_end;
  474. return 1;
  475. }
  476. }
  477. return 0;
  478. }
  479. EXPORT_SYMBOL(netdev_boot_setup_check);
  480. /**
  481. * netdev_boot_base - get address from boot time settings
  482. * @prefix: prefix for network device
  483. * @unit: id for network device
  484. *
  485. * Check boot time settings for the base address of device.
  486. * The found settings are set for the device to be used
  487. * later in the device probing.
  488. * Returns 0 if no settings found.
  489. */
  490. unsigned long netdev_boot_base(const char *prefix, int unit)
  491. {
  492. const struct netdev_boot_setup *s = dev_boot_setup;
  493. char name[IFNAMSIZ];
  494. int i;
  495. sprintf(name, "%s%d", prefix, unit);
  496. /*
  497. * If device already registered then return base of 1
  498. * to indicate not to probe for this interface
  499. */
  500. if (__dev_get_by_name(&init_net, name))
  501. return 1;
  502. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
  503. if (!strcmp(name, s[i].name))
  504. return s[i].map.base_addr;
  505. return 0;
  506. }
  507. /*
  508. * Saves at boot time configured settings for any netdevice.
  509. */
  510. int __init netdev_boot_setup(char *str)
  511. {
  512. int ints[5];
  513. struct ifmap map;
  514. str = get_options(str, ARRAY_SIZE(ints), ints);
  515. if (!str || !*str)
  516. return 0;
  517. /* Save settings */
  518. memset(&map, 0, sizeof(map));
  519. if (ints[0] > 0)
  520. map.irq = ints[1];
  521. if (ints[0] > 1)
  522. map.base_addr = ints[2];
  523. if (ints[0] > 2)
  524. map.mem_start = ints[3];
  525. if (ints[0] > 3)
  526. map.mem_end = ints[4];
  527. /* Add new entry to the list */
  528. return netdev_boot_setup_add(str, &map);
  529. }
  530. __setup("netdev=", netdev_boot_setup);
  531. /*******************************************************************************
  532. Device Interface Subroutines
  533. *******************************************************************************/
  534. /**
  535. * __dev_get_by_name - find a device by its name
  536. * @net: the applicable net namespace
  537. * @name: name to find
  538. *
  539. * Find an interface by name. Must be called under RTNL semaphore
  540. * or @dev_base_lock. If the name is found a pointer to the device
  541. * is returned. If the name is not found then %NULL is returned. The
  542. * reference counters are not incremented so the caller must be
  543. * careful with locks.
  544. */
  545. struct net_device *__dev_get_by_name(struct net *net, const char *name)
  546. {
  547. struct hlist_node *p;
  548. struct net_device *dev;
  549. struct hlist_head *head = dev_name_hash(net, name);
  550. hlist_for_each_entry(dev, p, head, name_hlist)
  551. if (!strncmp(dev->name, name, IFNAMSIZ))
  552. return dev;
  553. return NULL;
  554. }
  555. EXPORT_SYMBOL(__dev_get_by_name);
  556. /**
  557. * dev_get_by_name_rcu - find a device by its name
  558. * @net: the applicable net namespace
  559. * @name: name to find
  560. *
  561. * Find an interface by name.
  562. * If the name is found a pointer to the device is returned.
  563. * If the name is not found then %NULL is returned.
  564. * The reference counters are not incremented so the caller must be
  565. * careful with locks. The caller must hold RCU lock.
  566. */
  567. struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
  568. {
  569. struct hlist_node *p;
  570. struct net_device *dev;
  571. struct hlist_head *head = dev_name_hash(net, name);
  572. hlist_for_each_entry_rcu(dev, p, head, name_hlist)
  573. if (!strncmp(dev->name, name, IFNAMSIZ))
  574. return dev;
  575. return NULL;
  576. }
  577. EXPORT_SYMBOL(dev_get_by_name_rcu);
  578. /**
  579. * dev_get_by_name - find a device by its name
  580. * @net: the applicable net namespace
  581. * @name: name to find
  582. *
  583. * Find an interface by name. This can be called from any
  584. * context and does its own locking. The returned handle has
  585. * the usage count incremented and the caller must use dev_put() to
  586. * release it when it is no longer needed. %NULL is returned if no
  587. * matching device is found.
  588. */
  589. struct net_device *dev_get_by_name(struct net *net, const char *name)
  590. {
  591. struct net_device *dev;
  592. rcu_read_lock();
  593. dev = dev_get_by_name_rcu(net, name);
  594. if (dev)
  595. dev_hold(dev);
  596. rcu_read_unlock();
  597. return dev;
  598. }
  599. EXPORT_SYMBOL(dev_get_by_name);
  600. /**
  601. * __dev_get_by_index - find a device by its ifindex
  602. * @net: the applicable net namespace
  603. * @ifindex: index of device
  604. *
  605. * Search for an interface by index. Returns %NULL if the device
  606. * is not found or a pointer to the device. The device has not
  607. * had its reference counter increased so the caller must be careful
  608. * about locking. The caller must hold either the RTNL semaphore
  609. * or @dev_base_lock.
  610. */
  611. struct net_device *__dev_get_by_index(struct net *net, int ifindex)
  612. {
  613. struct hlist_node *p;
  614. struct net_device *dev;
  615. struct hlist_head *head = dev_index_hash(net, ifindex);
  616. hlist_for_each_entry(dev, p, head, index_hlist)
  617. if (dev->ifindex == ifindex)
  618. return dev;
  619. return NULL;
  620. }
  621. EXPORT_SYMBOL(__dev_get_by_index);
  622. /**
  623. * dev_get_by_index_rcu - find a device by its ifindex
  624. * @net: the applicable net namespace
  625. * @ifindex: index of device
  626. *
  627. * Search for an interface by index. Returns %NULL if the device
  628. * is not found or a pointer to the device. The device has not
  629. * had its reference counter increased so the caller must be careful
  630. * about locking. The caller must hold RCU lock.
  631. */
  632. struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
  633. {
  634. struct hlist_node *p;
  635. struct net_device *dev;
  636. struct hlist_head *head = dev_index_hash(net, ifindex);
  637. hlist_for_each_entry_rcu(dev, p, head, index_hlist)
  638. if (dev->ifindex == ifindex)
  639. return dev;
  640. return NULL;
  641. }
  642. EXPORT_SYMBOL(dev_get_by_index_rcu);
  643. /**
  644. * dev_get_by_index - find a device by its ifindex
  645. * @net: the applicable net namespace
  646. * @ifindex: index of device
  647. *
  648. * Search for an interface by index. Returns NULL if the device
  649. * is not found or a pointer to the device. The device returned has
  650. * had a reference added and the pointer is safe until the user calls
  651. * dev_put to indicate they have finished with it.
  652. */
  653. struct net_device *dev_get_by_index(struct net *net, int ifindex)
  654. {
  655. struct net_device *dev;
  656. rcu_read_lock();
  657. dev = dev_get_by_index_rcu(net, ifindex);
  658. if (dev)
  659. dev_hold(dev);
  660. rcu_read_unlock();
  661. return dev;
  662. }
  663. EXPORT_SYMBOL(dev_get_by_index);
  664. /**
  665. * dev_getbyhwaddr_rcu - find a device by its hardware address
  666. * @net: the applicable net namespace
  667. * @type: media type of device
  668. * @ha: hardware address
  669. *
  670. * Search for an interface by MAC address. Returns NULL if the device
  671. * is not found or a pointer to the device.
  672. * The caller must hold RCU or RTNL.
  673. * The returned device has not had its ref count increased
  674. * and the caller must therefore be careful about locking
  675. *
  676. */
  677. struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
  678. const char *ha)
  679. {
  680. struct net_device *dev;
  681. for_each_netdev_rcu(net, dev)
  682. if (dev->type == type &&
  683. !memcmp(dev->dev_addr, ha, dev->addr_len))
  684. return dev;
  685. return NULL;
  686. }
  687. EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
  688. struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
  689. {
  690. struct net_device *dev;
  691. ASSERT_RTNL();
  692. for_each_netdev(net, dev)
  693. if (dev->type == type)
  694. return dev;
  695. return NULL;
  696. }
  697. EXPORT_SYMBOL(__dev_getfirstbyhwtype);
  698. struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
  699. {
  700. struct net_device *dev, *ret = NULL;
  701. rcu_read_lock();
  702. for_each_netdev_rcu(net, dev)
  703. if (dev->type == type) {
  704. dev_hold(dev);
  705. ret = dev;
  706. break;
  707. }
  708. rcu_read_unlock();
  709. return ret;
  710. }
  711. EXPORT_SYMBOL(dev_getfirstbyhwtype);
  712. /**
  713. * dev_get_by_flags_rcu - find any device with given flags
  714. * @net: the applicable net namespace
  715. * @if_flags: IFF_* values
  716. * @mask: bitmask of bits in if_flags to check
  717. *
  718. * Search for any interface with the given flags. Returns NULL if a device
  719. * is not found or a pointer to the device. Must be called inside
  720. * rcu_read_lock(), and result refcount is unchanged.
  721. */
  722. struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
  723. unsigned short mask)
  724. {
  725. struct net_device *dev, *ret;
  726. ret = NULL;
  727. for_each_netdev_rcu(net, dev) {
  728. if (((dev->flags ^ if_flags) & mask) == 0) {
  729. ret = dev;
  730. break;
  731. }
  732. }
  733. return ret;
  734. }
  735. EXPORT_SYMBOL(dev_get_by_flags_rcu);
  736. /**
  737. * dev_valid_name - check if name is okay for network device
  738. * @name: name string
  739. *
  740. * Network device names need to be valid file names to
  741. * to allow sysfs to work. We also disallow any kind of
  742. * whitespace.
  743. */
  744. bool dev_valid_name(const char *name)
  745. {
  746. if (*name == '\0')
  747. return false;
  748. if (strlen(name) >= IFNAMSIZ)
  749. return false;
  750. if (!strcmp(name, ".") || !strcmp(name, ".."))
  751. return false;
  752. while (*name) {
  753. if (*name == '/' || isspace(*name))
  754. return false;
  755. name++;
  756. }
  757. return true;
  758. }
  759. EXPORT_SYMBOL(dev_valid_name);
  760. /**
  761. * __dev_alloc_name - allocate a name for a device
  762. * @net: network namespace to allocate the device name in
  763. * @name: name format string
  764. * @buf: scratch buffer and result name string
  765. *
  766. * Passed a format string - eg "lt%d" it will try and find a suitable
  767. * id. It scans list of devices to build up a free map, then chooses
  768. * the first empty slot. The caller must hold the dev_base or rtnl lock
  769. * while allocating the name and adding the device in order to avoid
  770. * duplicates.
  771. * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
  772. * Returns the number of the unit assigned or a negative errno code.
  773. */
  774. static int __dev_alloc_name(struct net *net, const char *name, char *buf)
  775. {
  776. int i = 0;
  777. const char *p;
  778. const int max_netdevices = 8*PAGE_SIZE;
  779. unsigned long *inuse;
  780. struct net_device *d;
  781. p = strnchr(name, IFNAMSIZ-1, '%');
  782. if (p) {
  783. /*
  784. * Verify the string as this thing may have come from
  785. * the user. There must be either one "%d" and no other "%"
  786. * characters.
  787. */
  788. if (p[1] != 'd' || strchr(p + 2, '%'))
  789. return -EINVAL;
  790. /* Use one page as a bit array of possible slots */
  791. inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
  792. if (!inuse)
  793. return -ENOMEM;
  794. for_each_netdev(net, d) {
  795. if (!sscanf(d->name, name, &i))
  796. continue;
  797. if (i < 0 || i >= max_netdevices)
  798. continue;
  799. /* avoid cases where sscanf is not exact inverse of printf */
  800. snprintf(buf, IFNAMSIZ, name, i);
  801. if (!strncmp(buf, d->name, IFNAMSIZ))
  802. set_bit(i, inuse);
  803. }
  804. i = find_first_zero_bit(inuse, max_netdevices);
  805. free_page((unsigned long) inuse);
  806. }
  807. if (buf != name)
  808. snprintf(buf, IFNAMSIZ, name, i);
  809. if (!__dev_get_by_name(net, buf))
  810. return i;
  811. /* It is possible to run out of possible slots
  812. * when the name is long and there isn't enough space left
  813. * for the digits, or if all bits are used.
  814. */
  815. return -ENFILE;
  816. }
  817. /**
  818. * dev_alloc_name - allocate a name for a device
  819. * @dev: device
  820. * @name: name format string
  821. *
  822. * Passed a format string - eg "lt%d" it will try and find a suitable
  823. * id. It scans list of devices to build up a free map, then chooses
  824. * the first empty slot. The caller must hold the dev_base or rtnl lock
  825. * while allocating the name and adding the device in order to avoid
  826. * duplicates.
  827. * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
  828. * Returns the number of the unit assigned or a negative errno code.
  829. */
  830. int dev_alloc_name(struct net_device *dev, const char *name)
  831. {
  832. char buf[IFNAMSIZ];
  833. struct net *net;
  834. int ret;
  835. BUG_ON(!dev_net(dev));
  836. net = dev_net(dev);
  837. ret = __dev_alloc_name(net, name, buf);
  838. if (ret >= 0)
  839. strlcpy(dev->name, buf, IFNAMSIZ);
  840. return ret;
  841. }
  842. EXPORT_SYMBOL(dev_alloc_name);
  843. static int dev_get_valid_name(struct net_device *dev, const char *name)
  844. {
  845. struct net *net;
  846. BUG_ON(!dev_net(dev));
  847. net = dev_net(dev);
  848. if (!dev_valid_name(name))
  849. return -EINVAL;
  850. if (strchr(name, '%'))
  851. return dev_alloc_name(dev, name);
  852. else if (__dev_get_by_name(net, name))
  853. return -EEXIST;
  854. else if (dev->name != name)
  855. strlcpy(dev->name, name, IFNAMSIZ);
  856. return 0;
  857. }
  858. /**
  859. * dev_change_name - change name of a device
  860. * @dev: device
  861. * @newname: name (or format string) must be at least IFNAMSIZ
  862. *
  863. * Change name of a device, can pass format strings "eth%d".
  864. * for wildcarding.
  865. */
  866. int dev_change_name(struct net_device *dev, const char *newname)
  867. {
  868. char oldname[IFNAMSIZ];
  869. int err = 0;
  870. int ret;
  871. struct net *net;
  872. ASSERT_RTNL();
  873. BUG_ON(!dev_net(dev));
  874. net = dev_net(dev);
  875. if (dev->flags & IFF_UP)
  876. return -EBUSY;
  877. if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
  878. return 0;
  879. memcpy(oldname, dev->name, IFNAMSIZ);
  880. err = dev_get_valid_name(dev, newname);
  881. if (err < 0)
  882. return err;
  883. rollback:
  884. ret = device_rename(&dev->dev, dev->name);
  885. if (ret) {
  886. memcpy(dev->name, oldname, IFNAMSIZ);
  887. return ret;
  888. }
  889. write_lock_bh(&dev_base_lock);
  890. hlist_del_rcu(&dev->name_hlist);
  891. write_unlock_bh(&dev_base_lock);
  892. synchronize_rcu();
  893. write_lock_bh(&dev_base_lock);
  894. hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
  895. write_unlock_bh(&dev_base_lock);
  896. ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
  897. ret = notifier_to_errno(ret);
  898. if (ret) {
  899. /* err >= 0 after dev_alloc_name() or stores the first errno */
  900. if (err >= 0) {
  901. err = ret;
  902. memcpy(dev->name, oldname, IFNAMSIZ);
  903. goto rollback;
  904. } else {
  905. pr_err("%s: name change rollback failed: %d\n",
  906. dev->name, ret);
  907. }
  908. }
  909. return err;
  910. }
  911. /**
  912. * dev_set_alias - change ifalias of a device
  913. * @dev: device
  914. * @alias: name up to IFALIASZ
  915. * @len: limit of bytes to copy from info
  916. *
  917. * Set ifalias for a device,
  918. */
  919. int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
  920. {
  921. ASSERT_RTNL();
  922. if (len >= IFALIASZ)
  923. return -EINVAL;
  924. if (!len) {
  925. if (dev->ifalias) {
  926. kfree(dev->ifalias);
  927. dev->ifalias = NULL;
  928. }
  929. return 0;
  930. }
  931. dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
  932. if (!dev->ifalias)
  933. return -ENOMEM;
  934. strlcpy(dev->ifalias, alias, len+1);
  935. return len;
  936. }
  937. /**
  938. * netdev_features_change - device changes features
  939. * @dev: device to cause notification
  940. *
  941. * Called to indicate a device has changed features.
  942. */
  943. void netdev_features_change(struct net_device *dev)
  944. {
  945. call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
  946. }
  947. EXPORT_SYMBOL(netdev_features_change);
  948. /**
  949. * netdev_state_change - device changes state
  950. * @dev: device to cause notification
  951. *
  952. * Called to indicate a device has changed state. This function calls
  953. * the notifier chains for netdev_chain and sends a NEWLINK message
  954. * to the routing socket.
  955. */
  956. void netdev_state_change(struct net_device *dev)
  957. {
  958. if (dev->flags & IFF_UP) {
  959. call_netdevice_notifiers(NETDEV_CHANGE, dev);
  960. rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
  961. }
  962. }
  963. EXPORT_SYMBOL(netdev_state_change);
  964. int netdev_bonding_change(struct net_device *dev, unsigned long event)
  965. {
  966. return call_netdevice_notifiers(event, dev);
  967. }
  968. EXPORT_SYMBOL(netdev_bonding_change);
  969. /**
  970. * dev_load - load a network module
  971. * @net: the applicable net namespace
  972. * @name: name of interface
  973. *
  974. * If a network interface is not present and the process has suitable
  975. * privileges this function loads the module. If module loading is not
  976. * available in this kernel then it becomes a nop.
  977. */
  978. void dev_load(struct net *net, const char *name)
  979. {
  980. struct net_device *dev;
  981. int no_module;
  982. rcu_read_lock();
  983. dev = dev_get_by_name_rcu(net, name);
  984. rcu_read_unlock();
  985. no_module = !dev;
  986. if (no_module && capable(CAP_NET_ADMIN))
  987. no_module = request_module("netdev-%s", name);
  988. if (no_module && capable(CAP_SYS_MODULE)) {
  989. if (!request_module("%s", name))
  990. pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
  991. name);
  992. }
  993. }
  994. EXPORT_SYMBOL(dev_load);
  995. static int __dev_open(struct net_device *dev)
  996. {
  997. const struct net_device_ops *ops = dev->netdev_ops;
  998. int ret;
  999. ASSERT_RTNL();
  1000. if (!netif_device_present(dev))
  1001. return -ENODEV;
  1002. ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
  1003. ret = notifier_to_errno(ret);
  1004. if (ret)
  1005. return ret;
  1006. set_bit(__LINK_STATE_START, &dev->state);
  1007. if (ops->ndo_validate_addr)
  1008. ret = ops->ndo_validate_addr(dev);
  1009. if (!ret && ops->ndo_open)
  1010. ret = ops->ndo_open(dev);
  1011. if (ret)
  1012. clear_bit(__LINK_STATE_START, &dev->state);
  1013. else {
  1014. dev->flags |= IFF_UP;
  1015. net_dmaengine_get();
  1016. dev_set_rx_mode(dev);
  1017. dev_activate(dev);
  1018. }
  1019. return ret;
  1020. }
  1021. /**
  1022. * dev_open - prepare an interface for use.
  1023. * @dev: device to open
  1024. *
  1025. * Takes a device from down to up state. The device's private open
  1026. * function is invoked and then the multicast lists are loaded. Finally
  1027. * the device is moved into the up state and a %NETDEV_UP message is
  1028. * sent to the netdev notifier chain.
  1029. *
  1030. * Calling this function on an active interface is a nop. On a failure
  1031. * a negative errno code is returned.
  1032. */
  1033. int dev_open(struct net_device *dev)
  1034. {
  1035. int ret;
  1036. if (dev->flags & IFF_UP)
  1037. return 0;
  1038. ret = __dev_open(dev);
  1039. if (ret < 0)
  1040. return ret;
  1041. rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
  1042. call_netdevice_notifiers(NETDEV_UP, dev);
  1043. return ret;
  1044. }
  1045. EXPORT_SYMBOL(dev_open);
  1046. static int __dev_close_many(struct list_head *head)
  1047. {
  1048. struct net_device *dev;
  1049. ASSERT_RTNL();
  1050. might_sleep();
  1051. list_for_each_entry(dev, head, unreg_list) {
  1052. call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
  1053. clear_bit(__LINK_STATE_START, &dev->state);
  1054. /* Synchronize to scheduled poll. We cannot touch poll list, it
  1055. * can be even on different cpu. So just clear netif_running().
  1056. *
  1057. * dev->stop() will invoke napi_disable() on all of it's
  1058. * napi_struct instances on this device.
  1059. */
  1060. smp_mb__after_clear_bit(); /* Commit netif_running(). */
  1061. }
  1062. dev_deactivate_many(head);
  1063. list_for_each_entry(dev, head, unreg_list) {
  1064. const struct net_device_ops *ops = dev->netdev_ops;
  1065. /*
  1066. * Call the device specific close. This cannot fail.
  1067. * Only if device is UP
  1068. *
  1069. * We allow it to be called even after a DETACH hot-plug
  1070. * event.
  1071. */
  1072. if (ops->ndo_stop)
  1073. ops->ndo_stop(dev);
  1074. dev->flags &= ~IFF_UP;
  1075. net_dmaengine_put();
  1076. }
  1077. return 0;
  1078. }
  1079. static int __dev_close(struct net_device *dev)
  1080. {
  1081. int retval;
  1082. LIST_HEAD(single);
  1083. list_add(&dev->unreg_list, &single);
  1084. retval = __dev_close_many(&single);
  1085. list_del(&single);
  1086. return retval;
  1087. }
  1088. static int dev_close_many(struct list_head *head)
  1089. {
  1090. struct net_device *dev, *tmp;
  1091. LIST_HEAD(tmp_list);
  1092. list_for_each_entry_safe(dev, tmp, head, unreg_list)
  1093. if (!(dev->flags & IFF_UP))
  1094. list_move(&dev->unreg_list, &tmp_list);
  1095. __dev_close_many(head);
  1096. list_for_each_entry(dev, head, unreg_list) {
  1097. rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
  1098. call_netdevice_notifiers(NETDEV_DOWN, dev);
  1099. }
  1100. /* rollback_registered_many needs the complete original list */
  1101. list_splice(&tmp_list, head);
  1102. return 0;
  1103. }
  1104. /**
  1105. * dev_close - shutdown an interface.
  1106. * @dev: device to shutdown
  1107. *
  1108. * This function moves an active device into down state. A
  1109. * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
  1110. * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
  1111. * chain.
  1112. */
  1113. int dev_close(struct net_device *dev)
  1114. {
  1115. if (dev->flags & IFF_UP) {
  1116. LIST_HEAD(single);
  1117. list_add(&dev->unreg_list, &single);
  1118. dev_close_many(&single);
  1119. list_del(&single);
  1120. }
  1121. return 0;
  1122. }
  1123. EXPORT_SYMBOL(dev_close);
  1124. /**
  1125. * dev_disable_lro - disable Large Receive Offload on a device
  1126. * @dev: device
  1127. *
  1128. * Disable Large Receive Offload (LRO) on a net device. Must be
  1129. * called under RTNL. This is needed if received packets may be
  1130. * forwarded to another interface.
  1131. */
  1132. void dev_disable_lro(struct net_device *dev)
  1133. {
  1134. /*
  1135. * If we're trying to disable lro on a vlan device
  1136. * use the underlying physical device instead
  1137. */
  1138. if (is_vlan_dev(dev))
  1139. dev = vlan_dev_real_dev(dev);
  1140. dev->wanted_features &= ~NETIF_F_LRO;
  1141. netdev_update_features(dev);
  1142. if (unlikely(dev->features & NETIF_F_LRO))
  1143. netdev_WARN(dev, "failed to disable LRO!\n");
  1144. }
  1145. EXPORT_SYMBOL(dev_disable_lro);
  1146. static int dev_boot_phase = 1;
  1147. /**
  1148. * register_netdevice_notifier - register a network notifier block
  1149. * @nb: notifier
  1150. *
  1151. * Register a notifier to be called when network device events occur.
  1152. * The notifier passed is linked into the kernel structures and must
  1153. * not be reused until it has been unregistered. A negative errno code
  1154. * is returned on a failure.
  1155. *
  1156. * When registered all registration and up events are replayed
  1157. * to the new notifier to allow device to have a race free
  1158. * view of the network device list.
  1159. */
  1160. int register_netdevice_notifier(struct notifier_block *nb)
  1161. {
  1162. struct net_device *dev;
  1163. struct net_device *last;
  1164. struct net *net;
  1165. int err;
  1166. rtnl_lock();
  1167. err = raw_notifier_chain_register(&netdev_chain, nb);
  1168. if (err)
  1169. goto unlock;
  1170. if (dev_boot_phase)
  1171. goto unlock;
  1172. for_each_net(net) {
  1173. for_each_netdev(net, dev) {
  1174. err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
  1175. err = notifier_to_errno(err);
  1176. if (err)
  1177. goto rollback;
  1178. if (!(dev->flags & IFF_UP))
  1179. continue;
  1180. nb->notifier_call(nb, NETDEV_UP, dev);
  1181. }
  1182. }
  1183. unlock:
  1184. rtnl_unlock();
  1185. return err;
  1186. rollback:
  1187. last = dev;
  1188. for_each_net(net) {
  1189. for_each_netdev(net, dev) {
  1190. if (dev == last)
  1191. goto outroll;
  1192. if (dev->flags & IFF_UP) {
  1193. nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
  1194. nb->notifier_call(nb, NETDEV_DOWN, dev);
  1195. }
  1196. nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
  1197. nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
  1198. }
  1199. }
  1200. outroll:
  1201. raw_notifier_chain_unregister(&netdev_chain, nb);
  1202. goto unlock;
  1203. }
  1204. EXPORT_SYMBOL(register_netdevice_notifier);
  1205. /**
  1206. * unregister_netdevice_notifier - unregister a network notifier block
  1207. * @nb: notifier
  1208. *
  1209. * Unregister a notifier previously registered by
  1210. * register_netdevice_notifier(). The notifier is unlinked into the
  1211. * kernel structures and may then be reused. A negative errno code
  1212. * is returned on a failure.
  1213. */
  1214. int unregister_netdevice_notifier(struct notifier_block *nb)
  1215. {
  1216. int err;
  1217. rtnl_lock();
  1218. err = raw_notifier_chain_unregister(&netdev_chain, nb);
  1219. rtnl_unlock();
  1220. return err;
  1221. }
  1222. EXPORT_SYMBOL(unregister_netdevice_notifier);
  1223. /**
  1224. * call_netdevice_notifiers - call all network notifier blocks
  1225. * @val: value passed unmodified to notifier function
  1226. * @dev: net_device pointer passed unmodified to notifier function
  1227. *
  1228. * Call all network notifier blocks. Parameters and return value
  1229. * are as for raw_notifier_call_chain().
  1230. */
  1231. int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
  1232. {
  1233. ASSERT_RTNL();
  1234. return raw_notifier_call_chain(&netdev_chain, val, dev);
  1235. }
  1236. EXPORT_SYMBOL(call_netdevice_notifiers);
  1237. static struct static_key netstamp_needed __read_mostly;
  1238. #ifdef HAVE_JUMP_LABEL
  1239. /* We are not allowed to call static_key_slow_dec() from irq context
  1240. * If net_disable_timestamp() is called from irq context, defer the
  1241. * static_key_slow_dec() calls.
  1242. */
  1243. static atomic_t netstamp_needed_deferred;
  1244. #endif
  1245. void net_enable_timestamp(void)
  1246. {
  1247. #ifdef HAVE_JUMP_LABEL
  1248. int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
  1249. if (deferred) {
  1250. while (--deferred)
  1251. static_key_slow_dec(&netstamp_needed);
  1252. return;
  1253. }
  1254. #endif
  1255. WARN_ON(in_interrupt());
  1256. static_key_slow_inc(&netstamp_needed);
  1257. }
  1258. EXPORT_SYMBOL(net_enable_timestamp);
  1259. void net_disable_timestamp(void)
  1260. {
  1261. #ifdef HAVE_JUMP_LABEL
  1262. if (in_interrupt()) {
  1263. atomic_inc(&netstamp_needed_deferred);
  1264. return;
  1265. }
  1266. #endif
  1267. static_key_slow_dec(&netstamp_needed);
  1268. }
  1269. EXPORT_SYMBOL(net_disable_timestamp);
  1270. static inline void net_timestamp_set(struct sk_buff *skb)
  1271. {
  1272. skb->tstamp.tv64 = 0;
  1273. if (static_key_false(&netstamp_needed))
  1274. __net_timestamp(skb);
  1275. }
  1276. #define net_timestamp_check(COND, SKB) \
  1277. if (static_key_false(&netstamp_needed)) { \
  1278. if ((COND) && !(SKB)->tstamp.tv64) \
  1279. __net_timestamp(SKB); \
  1280. } \
  1281. static int net_hwtstamp_validate(struct ifreq *ifr)
  1282. {
  1283. struct hwtstamp_config cfg;
  1284. enum hwtstamp_tx_types tx_type;
  1285. enum hwtstamp_rx_filters rx_filter;
  1286. int tx_type_valid = 0;
  1287. int rx_filter_valid = 0;
  1288. if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
  1289. return -EFAULT;
  1290. if (cfg.flags) /* reserved for future extensions */
  1291. return -EINVAL;
  1292. tx_type = cfg.tx_type;
  1293. rx_filter = cfg.rx_filter;
  1294. switch (tx_type) {
  1295. case HWTSTAMP_TX_OFF:
  1296. case HWTSTAMP_TX_ON:
  1297. case HWTSTAMP_TX_ONESTEP_SYNC:
  1298. tx_type_valid = 1;
  1299. break;
  1300. }
  1301. switch (rx_filter) {
  1302. case HWTSTAMP_FILTER_NONE:
  1303. case HWTSTAMP_FILTER_ALL:
  1304. case HWTSTAMP_FILTER_SOME:
  1305. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  1306. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  1307. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  1308. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  1309. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  1310. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  1311. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  1312. case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
  1313. case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
  1314. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  1315. case HWTSTAMP_FILTER_PTP_V2_SYNC:
  1316. case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
  1317. rx_filter_valid = 1;
  1318. break;
  1319. }
  1320. if (!tx_type_valid || !rx_filter_valid)
  1321. return -ERANGE;
  1322. return 0;
  1323. }
  1324. static inline bool is_skb_forwardable(struct net_device *dev,
  1325. struct sk_buff *skb)
  1326. {
  1327. unsigned int len;
  1328. if (!(dev->flags & IFF_UP))
  1329. return false;
  1330. len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
  1331. if (skb->len <= len)
  1332. return true;
  1333. /* if TSO is enabled, we don't care about the length as the packet
  1334. * could be forwarded without being segmented before
  1335. */
  1336. if (skb_is_gso(skb))
  1337. return true;
  1338. return false;
  1339. }
  1340. /**
  1341. * dev_forward_skb - loopback an skb to another netif
  1342. *
  1343. * @dev: destination network device
  1344. * @skb: buffer to forward
  1345. *
  1346. * return values:
  1347. * NET_RX_SUCCESS (no congestion)
  1348. * NET_RX_DROP (packet was dropped, but freed)
  1349. *
  1350. * dev_forward_skb can be used for injecting an skb from the
  1351. * start_xmit function of one device into the receive queue
  1352. * of another device.
  1353. *
  1354. * The receiving device may be in another namespace, so
  1355. * we have to clear all information in the skb that could
  1356. * impact namespace isolation.
  1357. */
  1358. int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
  1359. {
  1360. if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
  1361. if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
  1362. atomic_long_inc(&dev->rx_dropped);
  1363. kfree_skb(skb);
  1364. return NET_RX_DROP;
  1365. }
  1366. }
  1367. skb_orphan(skb);
  1368. nf_reset(skb);
  1369. if (unlikely(!is_skb_forwardable(dev, skb))) {
  1370. atomic_long_inc(&dev->rx_dropped);
  1371. kfree_skb(skb);
  1372. return NET_RX_DROP;
  1373. }
  1374. skb_set_dev(skb, dev);
  1375. skb->tstamp.tv64 = 0;
  1376. skb->pkt_type = PACKET_HOST;
  1377. skb->protocol = eth_type_trans(skb, dev);
  1378. return netif_rx(skb);
  1379. }
  1380. EXPORT_SYMBOL_GPL(dev_forward_skb);
  1381. static inline int deliver_skb(struct sk_buff *skb,
  1382. struct packet_type *pt_prev,
  1383. struct net_device *orig_dev)
  1384. {
  1385. atomic_inc(&skb->users);
  1386. return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  1387. }
  1388. /*
  1389. * Support routine. Sends outgoing frames to any network
  1390. * taps currently in use.
  1391. */
  1392. static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  1393. {
  1394. struct packet_type *ptype;
  1395. struct sk_buff *skb2 = NULL;
  1396. struct packet_type *pt_prev = NULL;
  1397. rcu_read_lock();
  1398. list_for_each_entry_rcu(ptype, &ptype_all, list) {
  1399. /* Never send packets back to the socket
  1400. * they originated from - MvS (miquels@drinkel.ow.org)
  1401. */
  1402. if ((ptype->dev == dev || !ptype->dev) &&
  1403. (ptype->af_packet_priv == NULL ||
  1404. (struct sock *)ptype->af_packet_priv != skb->sk)) {
  1405. if (pt_prev) {
  1406. deliver_skb(skb2, pt_prev, skb->dev);
  1407. pt_prev = ptype;
  1408. continue;
  1409. }
  1410. skb2 = skb_clone(skb, GFP_ATOMIC);
  1411. if (!skb2)
  1412. break;
  1413. net_timestamp_set(skb2);
  1414. /* skb->nh should be correctly
  1415. set by sender, so that the second statement is
  1416. just protection against buggy protocols.
  1417. */
  1418. skb_reset_mac_header(skb2);
  1419. if (skb_network_header(skb2) < skb2->data ||
  1420. skb2->network_header > skb2->tail) {
  1421. if (net_ratelimit())
  1422. pr_crit("protocol %04x is buggy, dev %s\n",
  1423. ntohs(skb2->protocol),
  1424. dev->name);
  1425. skb_reset_network_header(skb2);
  1426. }
  1427. skb2->transport_header = skb2->network_header;
  1428. skb2->pkt_type = PACKET_OUTGOING;
  1429. pt_prev = ptype;
  1430. }
  1431. }
  1432. if (pt_prev)
  1433. pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
  1434. rcu_read_unlock();
  1435. }
  1436. /* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
  1437. * @dev: Network device
  1438. * @txq: number of queues available
  1439. *
  1440. * If real_num_tx_queues is changed the tc mappings may no longer be
  1441. * valid. To resolve this verify the tc mapping remains valid and if
  1442. * not NULL the mapping. With no priorities mapping to this
  1443. * offset/count pair it will no longer be used. In the worst case TC0
  1444. * is invalid nothing can be done so disable priority mappings. If is
  1445. * expected that drivers will fix this mapping if they can before
  1446. * calling netif_set_real_num_tx_queues.
  1447. */
  1448. static void netif_setup_tc(struct net_device *dev, unsigned int txq)
  1449. {
  1450. int i;
  1451. struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
  1452. /* If TC0 is invalidated disable TC mapping */
  1453. if (tc->offset + tc->count > txq) {
  1454. pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
  1455. dev->num_tc = 0;
  1456. return;
  1457. }
  1458. /* Invalidated prio to tc mappings set to TC0 */
  1459. for (i = 1; i < TC_BITMASK + 1; i++) {
  1460. int q = netdev_get_prio_tc_map(dev, i);
  1461. tc = &dev->tc_to_txq[q];
  1462. if (tc->offset + tc->count > txq) {
  1463. pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
  1464. i, q);
  1465. netdev_set_prio_tc_map(dev, i, 0);
  1466. }
  1467. }
  1468. }
  1469. /*
  1470. * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
  1471. * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
  1472. */
  1473. int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
  1474. {
  1475. int rc;
  1476. if (txq < 1 || txq > dev->num_tx_queues)
  1477. return -EINVAL;
  1478. if (dev->reg_state == NETREG_REGISTERED ||
  1479. dev->reg_state == NETREG_UNREGISTERING) {
  1480. ASSERT_RTNL();
  1481. rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
  1482. txq);
  1483. if (rc)
  1484. return rc;
  1485. if (dev->num_tc)
  1486. netif_setup_tc(dev, txq);
  1487. if (txq < dev->real_num_tx_queues)
  1488. qdisc_reset_all_tx_gt(dev, txq);
  1489. }
  1490. dev->real_num_tx_queues = txq;
  1491. return 0;
  1492. }
  1493. EXPORT_SYMBOL(netif_set_real_num_tx_queues);
  1494. #ifdef CONFIG_RPS
  1495. /**
  1496. * netif_set_real_num_rx_queues - set actual number of RX queues used
  1497. * @dev: Network device
  1498. * @rxq: Actual number of RX queues
  1499. *
  1500. * This must be called either with the rtnl_lock held or before
  1501. * registration of the net device. Returns 0 on success, or a
  1502. * negative error code. If called before registration, it always
  1503. * succeeds.
  1504. */
  1505. int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
  1506. {
  1507. int rc;
  1508. if (rxq < 1 || rxq > dev->num_rx_queues)
  1509. return -EINVAL;
  1510. if (dev->reg_state == NETREG_REGISTERED) {
  1511. ASSERT_RTNL();
  1512. rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
  1513. rxq);
  1514. if (rc)
  1515. return rc;
  1516. }
  1517. dev->real_num_rx_queues = rxq;
  1518. return 0;
  1519. }
  1520. EXPORT_SYMBOL(netif_set_real_num_rx_queues);
  1521. #endif
  1522. static inline void __netif_reschedule(struct Qdisc *q)
  1523. {
  1524. struct softnet_data *sd;
  1525. unsigned long flags;
  1526. local_irq_save(flags);
  1527. sd = &__get_cpu_var(softnet_data);
  1528. q->next_sched = NULL;
  1529. *sd->output_queue_tailp = q;
  1530. sd->output_queue_tailp = &q->next_sched;
  1531. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  1532. local_irq_restore(flags);
  1533. }
  1534. void __netif_schedule(struct Qdisc *q)
  1535. {
  1536. if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
  1537. __netif_reschedule(q);
  1538. }
  1539. EXPORT_SYMBOL(__netif_schedule);
  1540. void dev_kfree_skb_irq(struct sk_buff *skb)
  1541. {
  1542. if (atomic_dec_and_test(&skb->users)) {
  1543. struct softnet_data *sd;
  1544. unsigned long flags;
  1545. local_irq_save(flags);
  1546. sd = &__get_cpu_var(softnet_data);
  1547. skb->next = sd->completion_queue;
  1548. sd->completion_queue = skb;
  1549. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  1550. local_irq_restore(flags);
  1551. }
  1552. }
  1553. EXPORT_SYMBOL(dev_kfree_skb_irq);
  1554. void dev_kfree_skb_any(struct sk_buff *skb)
  1555. {
  1556. if (in_irq() || irqs_disabled())
  1557. dev_kfree_skb_irq(skb);
  1558. else
  1559. dev_kfree_skb(skb);
  1560. }
  1561. EXPORT_SYMBOL(dev_kfree_skb_any);
  1562. /**
  1563. * netif_device_detach - mark device as removed
  1564. * @dev: network device
  1565. *
  1566. * Mark device as removed from system and therefore no longer available.
  1567. */
  1568. void netif_device_detach(struct net_device *dev)
  1569. {
  1570. if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
  1571. netif_running(dev)) {
  1572. netif_tx_stop_all_queues(dev);
  1573. }
  1574. }
  1575. EXPORT_SYMBOL(netif_device_detach);
  1576. /**
  1577. * netif_device_attach - mark device as attached
  1578. * @dev: network device
  1579. *
  1580. * Mark device as attached from system and restart if needed.
  1581. */
  1582. void netif_device_attach(struct net_device *dev)
  1583. {
  1584. if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
  1585. netif_running(dev)) {
  1586. netif_tx_wake_all_queues(dev);
  1587. __netdev_watchdog_up(dev);
  1588. }
  1589. }
  1590. EXPORT_SYMBOL(netif_device_attach);
  1591. /**
  1592. * skb_dev_set -- assign a new device to a buffer
  1593. * @skb: buffer for the new device
  1594. * @dev: network device
  1595. *
  1596. * If an skb is owned by a device already, we have to reset
  1597. * all data private to the namespace a device belongs to
  1598. * before assigning it a new device.
  1599. */
  1600. #ifdef CONFIG_NET_NS
  1601. void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
  1602. {
  1603. skb_dst_drop(skb);
  1604. if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
  1605. secpath_reset(skb);
  1606. nf_reset(skb);
  1607. skb_init_secmark(skb);
  1608. skb->mark = 0;
  1609. skb->priority = 0;
  1610. skb->nf_trace = 0;
  1611. skb->ipvs_property = 0;
  1612. #ifdef CONFIG_NET_SCHED
  1613. skb->tc_index = 0;
  1614. #endif
  1615. }
  1616. skb->dev = dev;
  1617. }
  1618. EXPORT_SYMBOL(skb_set_dev);
  1619. #endif /* CONFIG_NET_NS */
  1620. static void skb_warn_bad_offload(const struct sk_buff *skb)
  1621. {
  1622. static const netdev_features_t null_features = 0;
  1623. struct net_device *dev = skb->dev;
  1624. const char *driver = "";
  1625. if (dev && dev->dev.parent)
  1626. driver = dev_driver_string(dev->dev.parent);
  1627. WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
  1628. "gso_type=%d ip_summed=%d\n",
  1629. driver, dev ? &dev->features : &null_features,
  1630. skb->sk ? &skb->sk->sk_route_caps : &null_features,
  1631. skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
  1632. skb_shinfo(skb)->gso_type, skb->ip_summed);
  1633. }
  1634. /*
  1635. * Invalidate hardware checksum when packet is to be mangled, and
  1636. * complete checksum manually on outgoing path.
  1637. */
  1638. int skb_checksum_help(struct sk_buff *skb)
  1639. {
  1640. __wsum csum;
  1641. int ret = 0, offset;
  1642. if (skb->ip_summed == CHECKSUM_COMPLETE)
  1643. goto out_set_summed;
  1644. if (unlikely(skb_shinfo(skb)->gso_size)) {
  1645. skb_warn_bad_offload(skb);
  1646. return -EINVAL;
  1647. }
  1648. offset = skb_checksum_start_offset(skb);
  1649. BUG_ON(offset >= skb_headlen(skb));
  1650. csum = skb_checksum(skb, offset, skb->len - offset, 0);
  1651. offset += skb->csum_offset;
  1652. BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
  1653. if (skb_cloned(skb) &&
  1654. !skb_clone_writable(skb, offset + sizeof(__sum16))) {
  1655. ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  1656. if (ret)
  1657. goto out;
  1658. }
  1659. *(__sum16 *)(skb->data + offset) = csum_fold(csum);
  1660. out_set_summed:
  1661. skb->ip_summed = CHECKSUM_NONE;
  1662. out:
  1663. return ret;
  1664. }
  1665. EXPORT_SYMBOL(skb_checksum_help);
  1666. /**
  1667. * skb_gso_segment - Perform segmentation on skb.
  1668. * @skb: buffer to segment
  1669. * @features: features for the output path (see dev->features)
  1670. *
  1671. * This function segments the given skb and returns a list of segments.
  1672. *
  1673. * It may return NULL if the skb requires no segmentation. This is
  1674. * only possible when GSO is used for verifying header integrity.
  1675. */
  1676. struct sk_buff *skb_gso_segment(struct sk_buff *skb,
  1677. netdev_features_t features)
  1678. {
  1679. struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
  1680. struct packet_type *ptype;
  1681. __be16 type = skb->protocol;
  1682. int vlan_depth = ETH_HLEN;
  1683. int err;
  1684. while (type == htons(ETH_P_8021Q)) {
  1685. struct vlan_hdr *vh;
  1686. if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
  1687. return ERR_PTR(-EINVAL);
  1688. vh = (struct vlan_hdr *)(skb->data + vlan_depth);
  1689. type = vh->h_vlan_encapsulated_proto;
  1690. vlan_depth += VLAN_HLEN;
  1691. }
  1692. skb_reset_mac_header(skb);
  1693. skb->mac_len = skb->network_header - skb->mac_header;
  1694. __skb_pull(skb, skb->mac_len);
  1695. if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
  1696. skb_warn_bad_offload(skb);
  1697. if (skb_header_cloned(skb) &&
  1698. (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
  1699. return ERR_PTR(err);
  1700. }
  1701. rcu_read_lock();
  1702. list_for_each_entry_rcu(ptype,
  1703. &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
  1704. if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
  1705. if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
  1706. err = ptype->gso_send_check(skb);
  1707. segs = ERR_PTR(err);
  1708. if (err || skb_gso_ok(skb, features))
  1709. break;
  1710. __skb_push(skb, (skb->data -
  1711. skb_network_header(skb)));
  1712. }
  1713. segs = ptype->gso_segment(skb, features);
  1714. break;
  1715. }
  1716. }
  1717. rcu_read_unlock();
  1718. __skb_push(skb, skb->data - skb_mac_header(skb));
  1719. return segs;
  1720. }
  1721. EXPORT_SYMBOL(skb_gso_segment);
  1722. /* Take action when hardware reception checksum errors are detected. */
  1723. #ifdef CONFIG_BUG
  1724. void netdev_rx_csum_fault(struct net_device *dev)
  1725. {
  1726. if (net_ratelimit()) {
  1727. pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
  1728. dump_stack();
  1729. }
  1730. }
  1731. EXPORT_SYMBOL(netdev_rx_csum_fault);
  1732. #endif
  1733. /* Actually, we should eliminate this check as soon as we know, that:
  1734. * 1. IOMMU is present and allows to map all the memory.
  1735. * 2. No high memory really exists on this machine.
  1736. */
  1737. static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
  1738. {
  1739. #ifdef CONFIG_HIGHMEM
  1740. int i;
  1741. if (!(dev->features & NETIF_F_HIGHDMA)) {
  1742. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1743. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1744. if (PageHighMem(skb_frag_page(frag)))
  1745. return 1;
  1746. }
  1747. }
  1748. if (PCI_DMA_BUS_IS_PHYS) {
  1749. struct device *pdev = dev->dev.parent;
  1750. if (!pdev)
  1751. return 0;
  1752. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1753. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1754. dma_addr_t addr = page_to_phys(skb_frag_page(frag));
  1755. if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
  1756. return 1;
  1757. }
  1758. }
  1759. #endif
  1760. return 0;
  1761. }
  1762. struct dev_gso_cb {
  1763. void (*destructor)(struct sk_buff *skb);
  1764. };
  1765. #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
  1766. static void dev_gso_skb_destructor(struct sk_buff *skb)
  1767. {
  1768. struct dev_gso_cb *cb;
  1769. do {
  1770. struct sk_buff *nskb = skb->next;
  1771. skb->next = nskb->next;
  1772. nskb->next = NULL;
  1773. kfree_skb(nskb);
  1774. } while (skb->next);
  1775. cb = DEV_GSO_CB(skb);
  1776. if (cb->destructor)
  1777. cb->destructor(skb);
  1778. }
  1779. /**
  1780. * dev_gso_segment - Perform emulated hardware segmentation on skb.
  1781. * @skb: buffer to segment
  1782. * @features: device features as applicable to this skb
  1783. *
  1784. * This function segments the given skb and stores the list of segments
  1785. * in skb->next.
  1786. */
  1787. static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
  1788. {
  1789. struct sk_buff *segs;
  1790. segs = skb_gso_segment(skb, features);
  1791. /* Verifying header integrity only. */
  1792. if (!segs)
  1793. return 0;
  1794. if (IS_ERR(segs))
  1795. return PTR_ERR(segs);
  1796. skb->next = segs;
  1797. DEV_GSO_CB(skb)->destructor = skb->destructor;
  1798. skb->destructor = dev_gso_skb_destructor;
  1799. return 0;
  1800. }
  1801. /*
  1802. * Try to orphan skb early, right before transmission by the device.
  1803. * We cannot orphan skb if tx timestamp is requested or the sk-reference
  1804. * is needed on driver level for other reasons, e.g. see net/can/raw.c
  1805. */
  1806. static inline void skb_orphan_try(struct sk_buff *skb)
  1807. {
  1808. struct sock *sk = skb->sk;
  1809. if (sk && !skb_shinfo(skb)->tx_flags) {
  1810. /* skb_tx_hash() wont be able to get sk.
  1811. * We copy sk_hash into skb->rxhash
  1812. */
  1813. if (!skb->rxhash)
  1814. skb->rxhash = sk->sk_hash;
  1815. skb_orphan(skb);
  1816. }
  1817. }
  1818. static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
  1819. {
  1820. return ((features & NETIF_F_GEN_CSUM) ||
  1821. ((features & NETIF_F_V4_CSUM) &&
  1822. protocol == htons(ETH_P_IP)) ||
  1823. ((features & NETIF_F_V6_CSUM) &&
  1824. protocol == htons(ETH_P_IPV6)) ||
  1825. ((features & NETIF_F_FCOE_CRC) &&
  1826. protocol == htons(ETH_P_FCOE)));
  1827. }
  1828. static netdev_features_t harmonize_features(struct sk_buff *skb,
  1829. __be16 protocol, netdev_features_t features)
  1830. {
  1831. if (!can_checksum_protocol(features, protocol)) {
  1832. features &= ~NETIF_F_ALL_CSUM;
  1833. features &= ~NETIF_F_SG;
  1834. } else if (illegal_highdma(skb->dev, skb)) {
  1835. features &= ~NETIF_F_SG;
  1836. }
  1837. return features;
  1838. }
  1839. netdev_features_t netif_skb_features(struct sk_buff *skb)
  1840. {
  1841. __be16 protocol = skb->protocol;
  1842. netdev_features_t features = skb->dev->features;
  1843. if (protocol == htons(ETH_P_8021Q)) {
  1844. struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
  1845. protocol = veh->h_vlan_encapsulated_proto;
  1846. } else if (!vlan_tx_tag_present(skb)) {
  1847. return harmonize_features(skb, protocol, features);
  1848. }
  1849. features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
  1850. if (protocol != htons(ETH_P_8021Q)) {
  1851. return harmonize_features(skb, protocol, features);
  1852. } else {
  1853. features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
  1854. NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
  1855. return harmonize_features(skb, protocol, features);
  1856. }
  1857. }
  1858. EXPORT_SYMBOL(netif_skb_features);
  1859. /*
  1860. * Returns true if either:
  1861. * 1. skb has frag_list and the device doesn't support FRAGLIST, or
  1862. * 2. skb is fragmented and the device does not support SG, or if
  1863. * at least one of fragments is in highmem and device does not
  1864. * support DMA from it.
  1865. */
  1866. static inline int skb_needs_linearize(struct sk_buff *skb,
  1867. int features)
  1868. {
  1869. return skb_is_nonlinear(skb) &&
  1870. ((skb_has_frag_list(skb) &&
  1871. !(features & NETIF_F_FRAGLIST)) ||
  1872. (skb_shinfo(skb)->nr_frags &&
  1873. !(features & NETIF_F_SG)));
  1874. }
  1875. int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
  1876. struct netdev_queue *txq)
  1877. {
  1878. const struct net_device_ops *ops = dev->netdev_ops;
  1879. int rc = NETDEV_TX_OK;
  1880. unsigned int skb_len;
  1881. if (likely(!skb->next)) {
  1882. netdev_features_t features;
  1883. /*
  1884. * If device doesn't need skb->dst, release it right now while
  1885. * its hot in this cpu cache
  1886. */
  1887. if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
  1888. skb_dst_drop(skb);
  1889. if (!list_empty(&ptype_all))
  1890. dev_queue_xmit_nit(skb, dev);
  1891. skb_orphan_try(skb);
  1892. features = netif_skb_features(skb);
  1893. if (vlan_tx_tag_present(skb) &&
  1894. !(features & NETIF_F_HW_VLAN_TX)) {
  1895. skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
  1896. if (unlikely(!skb))
  1897. goto out;
  1898. skb->vlan_tci = 0;
  1899. }
  1900. if (netif_needs_gso(skb, features)) {
  1901. if (unlikely(dev_gso_segment(skb, features)))
  1902. goto out_kfree_skb;
  1903. if (skb->next)
  1904. goto gso;
  1905. } else {
  1906. if (skb_needs_linearize(skb, features) &&
  1907. __skb_linearize(skb))
  1908. goto out_kfree_skb;
  1909. /* If packet is not checksummed and device does not
  1910. * support checksumming for this protocol, complete
  1911. * checksumming here.
  1912. */
  1913. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1914. skb_set_transport_header(skb,
  1915. skb_checksum_start_offset(skb));
  1916. if (!(features & NETIF_F_ALL_CSUM) &&
  1917. skb_checksum_help(skb))
  1918. goto out_kfree_skb;
  1919. }
  1920. }
  1921. skb_len = skb->len;
  1922. rc = ops->ndo_start_xmit(skb, dev);
  1923. trace_net_dev_xmit(skb, rc, dev, skb_len);
  1924. if (rc == NETDEV_TX_OK)
  1925. txq_trans_update(txq);
  1926. return rc;
  1927. }
  1928. gso:
  1929. do {
  1930. struct sk_buff *nskb = skb->next;
  1931. skb->next = nskb->next;
  1932. nskb->next = NULL;
  1933. /*
  1934. * If device doesn't need nskb->dst, release it right now while
  1935. * its hot in this cpu cache
  1936. */
  1937. if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
  1938. skb_dst_drop(nskb);
  1939. skb_len = nskb->len;
  1940. rc = ops->ndo_start_xmit(nskb, dev);
  1941. trace_net_dev_xmit(nskb, rc, dev, skb_len);
  1942. if (unlikely(rc != NETDEV_TX_OK)) {
  1943. if (rc & ~NETDEV_TX_MASK)
  1944. goto out_kfree_gso_skb;
  1945. nskb->next = skb->next;
  1946. skb->next = nskb;
  1947. return rc;
  1948. }
  1949. txq_trans_update(txq);
  1950. if (unlikely(netif_xmit_stopped(txq) && skb->next))
  1951. return NETDEV_TX_BUSY;
  1952. } while (skb->next);
  1953. out_kfree_gso_skb:
  1954. if (likely(skb->next == NULL))
  1955. skb->destructor = DEV_GSO_CB(skb)->destructor;
  1956. out_kfree_skb:
  1957. kfree_skb(skb);
  1958. out:
  1959. return rc;
  1960. }
  1961. static u32 hashrnd __read_mostly;
  1962. /*
  1963. * Returns a Tx hash based on the given packet descriptor a Tx queues' number
  1964. * to be used as a distribution range.
  1965. */
  1966. u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
  1967. unsigned int num_tx_queues)
  1968. {
  1969. u32 hash;
  1970. u16 qoffset = 0;
  1971. u16 qcount = num_tx_queues;
  1972. if (skb_rx_queue_recorded(skb)) {
  1973. hash = skb_get_rx_queue(skb);
  1974. while (unlikely(hash >= num_tx_queues))
  1975. hash -= num_tx_queues;
  1976. return hash;
  1977. }
  1978. if (dev->num_tc) {
  1979. u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
  1980. qoffset = dev->tc_to_txq[tc].offset;
  1981. qcount = dev->tc_to_txq[tc].count;
  1982. }
  1983. if (skb->sk && skb->sk->sk_hash)
  1984. hash = skb->sk->sk_hash;
  1985. else
  1986. hash = (__force u16) skb->protocol ^ skb->rxhash;
  1987. hash = jhash_1word(hash, hashrnd);
  1988. return (u16) (((u64) hash * qcount) >> 32) + qoffset;
  1989. }
  1990. EXPORT_SYMBOL(__skb_tx_hash);
  1991. static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
  1992. {
  1993. if (unlikely(queue_index >= dev->real_num_tx_queues)) {
  1994. if (net_ratelimit()) {
  1995. pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n",
  1996. dev->name, queue_index,
  1997. dev->real_num_tx_queues);
  1998. }
  1999. return 0;
  2000. }
  2001. return queue_index;
  2002. }
  2003. static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
  2004. {
  2005. #ifdef CONFIG_XPS
  2006. struct xps_dev_maps *dev_maps;
  2007. struct xps_map *map;
  2008. int queue_index = -1;
  2009. rcu_read_lock();
  2010. dev_maps = rcu_dereference(dev->xps_maps);
  2011. if (dev_maps) {
  2012. map = rcu_dereference(
  2013. dev_maps->cpu_map[raw_smp_processor_id()]);
  2014. if (map) {
  2015. if (map->len == 1)
  2016. queue_index = map->queues[0];
  2017. else {
  2018. u32 hash;
  2019. if (skb->sk && skb->sk->sk_hash)
  2020. hash = skb->sk->sk_hash;
  2021. else
  2022. hash = (__force u16) skb->protocol ^
  2023. skb->rxhash;
  2024. hash = jhash_1word(hash, hashrnd);
  2025. queue_index = map->queues[
  2026. ((u64)hash * map->len) >> 32];
  2027. }
  2028. if (unlikely(queue_index >= dev->real_num_tx_queues))
  2029. queue_index = -1;
  2030. }
  2031. }
  2032. rcu_read_unlock();
  2033. return queue_index;
  2034. #else
  2035. return -1;
  2036. #endif
  2037. }
  2038. static struct netdev_queue *dev_pick_tx(struct net_device *dev,
  2039. struct sk_buff *skb)
  2040. {
  2041. int queue_index;
  2042. const struct net_device_ops *ops = dev->netdev_ops;
  2043. if (dev->real_num_tx_queues == 1)
  2044. queue_index = 0;
  2045. else if (ops->ndo_select_queue) {
  2046. queue_index = ops->ndo_select_queue(dev, skb);
  2047. queue_index = dev_cap_txqueue(dev, queue_index);
  2048. } else {
  2049. struct sock *sk = skb->sk;
  2050. queue_index = sk_tx_queue_get(sk);
  2051. if (queue_index < 0 || skb->ooo_okay ||
  2052. queue_index >= dev->real_num_tx_queues) {
  2053. int old_index = queue_index;
  2054. queue_index = get_xps_queue(dev, skb);
  2055. if (queue_index < 0)
  2056. queue_index = skb_tx_hash(dev, skb);
  2057. if (queue_index != old_index && sk) {
  2058. struct dst_entry *dst =
  2059. rcu_dereference_check(sk->sk_dst_cache, 1);
  2060. if (dst && skb_dst(skb) == dst)
  2061. sk_tx_queue_set(sk, queue_index);
  2062. }
  2063. }
  2064. }
  2065. skb_set_queue_mapping(skb, queue_index);
  2066. return netdev_get_tx_queue(dev, queue_index);
  2067. }
  2068. static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
  2069. struct net_device *dev,
  2070. struct netdev_queue *txq)
  2071. {
  2072. spinlock_t *root_lock = qdisc_lock(q);
  2073. bool contended;
  2074. int rc;
  2075. qdisc_skb_cb(skb)->pkt_len = skb->len;
  2076. qdisc_calculate_pkt_len(skb, q);
  2077. /*
  2078. * Heuristic to force contended enqueues to serialize on a
  2079. * separate lock before trying to get qdisc main lock.
  2080. * This permits __QDISC_STATE_RUNNING owner to get the lock more often
  2081. * and dequeue packets faster.
  2082. */
  2083. contended = qdisc_is_running(q);
  2084. if (unlikely(contended))
  2085. spin_lock(&q->busylock);
  2086. spin_lock(root_lock);
  2087. if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
  2088. kfree_skb(skb);
  2089. rc = NET_XMIT_DROP;
  2090. } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
  2091. qdisc_run_begin(q)) {
  2092. /*
  2093. * This is a work-conserving queue; there are no old skbs
  2094. * waiting to be sent out; and the qdisc is not running -
  2095. * xmit the skb directly.
  2096. */
  2097. if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
  2098. skb_dst_force(skb);
  2099. qdisc_bstats_update(q, skb);
  2100. if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
  2101. if (unlikely(contended)) {
  2102. spin_unlock(&q->busylock);
  2103. contended = false;
  2104. }
  2105. __qdisc_run(q);
  2106. } else
  2107. qdisc_run_end(q);
  2108. rc = NET_XMIT_SUCCESS;
  2109. } else {
  2110. skb_dst_force(skb);
  2111. rc = q->enqueue(skb, q) & NET_XMIT_MASK;
  2112. if (qdisc_run_begin(q)) {
  2113. if (unlikely(contended)) {
  2114. spin_unlock(&q->busylock);
  2115. contended = false;
  2116. }
  2117. __qdisc_run(q);
  2118. }
  2119. }
  2120. spin_unlock(root_lock);
  2121. if (unlikely(contended))
  2122. spin_unlock(&q->busylock);
  2123. return rc;
  2124. }
  2125. #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
  2126. static void skb_update_prio(struct sk_buff *skb)
  2127. {
  2128. struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
  2129. if ((!skb->priority) && (skb->sk) && map)
  2130. skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
  2131. }
  2132. #else
  2133. #define skb_update_prio(skb)
  2134. #endif
  2135. static DEFINE_PER_CPU(int, xmit_recursion);
  2136. #define RECURSION_LIMIT 10
  2137. /**
  2138. * dev_queue_xmit - transmit a buffer
  2139. * @skb: buffer to transmit
  2140. *
  2141. * Queue a buffer for transmission to a network device. The caller must
  2142. * have set the device and priority and built the buffer before calling
  2143. * this function. The function can be called from an interrupt.
  2144. *
  2145. * A negative errno code is returned on a failure. A success does not
  2146. * guarantee the frame will be transmitted as it may be dropped due
  2147. * to congestion or traffic shaping.
  2148. *
  2149. * -----------------------------------------------------------------------------------
  2150. * I notice this method can also return errors from the queue disciplines,
  2151. * including NET_XMIT_DROP, which is a positive value. So, errors can also
  2152. * be positive.
  2153. *
  2154. * Regardless of the return value, the skb is consumed, so it is currently
  2155. * difficult to retry a send to this method. (You can bump the ref count
  2156. * before sending to hold a reference for retry if you are careful.)
  2157. *
  2158. * When calling this method, interrupts MUST be enabled. This is because
  2159. * the BH enable code must have IRQs enabled so that it will not deadlock.
  2160. * --BLG
  2161. */
  2162. int dev_queue_xmit(struct sk_buff *skb)
  2163. {
  2164. struct net_device *dev = skb->dev;
  2165. struct netdev_queue *txq;
  2166. struct Qdisc *q;
  2167. int rc = -ENOMEM;
  2168. /* Disable soft irqs for various locks below. Also
  2169. * stops preemption for RCU.
  2170. */
  2171. rcu_read_lock_bh();
  2172. skb_update_prio(skb);
  2173. txq = dev_pick_tx(dev, skb);
  2174. q = rcu_dereference_bh(txq->qdisc);
  2175. #ifdef CONFIG_NET_CLS_ACT
  2176. skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
  2177. #endif
  2178. trace_net_dev_queue(skb);
  2179. if (q->enqueue) {
  2180. rc = __dev_xmit_skb(skb, q, dev, txq);
  2181. goto out;
  2182. }
  2183. /* The device has no queue. Common case for software devices:
  2184. loopback, all the sorts of tunnels...
  2185. Really, it is unlikely that netif_tx_lock protection is necessary
  2186. here. (f.e. loopback and IP tunnels are clean ignoring statistics
  2187. counters.)
  2188. However, it is possible, that they rely on protection
  2189. made by us here.
  2190. Check this and shot the lock. It is not prone from deadlocks.
  2191. Either shot noqueue qdisc, it is even simpler 8)
  2192. */
  2193. if (dev->flags & IFF_UP) {
  2194. int cpu = smp_processor_id(); /* ok because BHs are off */
  2195. if (txq->xmit_lock_owner != cpu) {
  2196. if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
  2197. goto recursion_alert;
  2198. HARD_TX_LOCK(dev, txq, cpu);
  2199. if (!netif_xmit_stopped(txq)) {
  2200. __this_cpu_inc(xmit_recursion);
  2201. rc = dev_hard_start_xmit(skb, dev, txq);
  2202. __this_cpu_dec(xmit_recursion);
  2203. if (dev_xmit_complete(rc)) {
  2204. HARD_TX_UNLOCK(dev, txq);
  2205. goto out;
  2206. }
  2207. }
  2208. HARD_TX_UNLOCK(dev, txq);
  2209. if (net_ratelimit())
  2210. pr_crit("Virtual device %s asks to queue packet!\n",
  2211. dev->name);
  2212. } else {
  2213. /* Recursion is detected! It is possible,
  2214. * unfortunately
  2215. */
  2216. recursion_alert:
  2217. if (net_ratelimit())
  2218. pr_crit("Dead loop on virtual device %s, fix it urgently!\n",
  2219. dev->name);
  2220. }
  2221. }
  2222. rc = -ENETDOWN;
  2223. rcu_read_unlock_bh();
  2224. kfree_skb(skb);
  2225. return rc;
  2226. out:
  2227. rcu_read_unlock_bh();
  2228. return rc;
  2229. }
  2230. EXPORT_SYMBOL(dev_queue_xmit);
  2231. /*=======================================================================
  2232. Receiver routines
  2233. =======================================================================*/
  2234. int netdev_max_backlog __read_mostly = 1000;
  2235. int netdev_tstamp_prequeue __read_mostly = 1;
  2236. int netdev_budget __read_mostly = 300;
  2237. int weight_p __read_mostly = 64; /* old backlog weight */
  2238. /* Called with irq disabled */
  2239. static inline void ____napi_schedule(struct softnet_data *sd,
  2240. struct napi_struct *napi)
  2241. {
  2242. list_add_tail(&napi->poll_list, &sd->poll_list);
  2243. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  2244. }
  2245. /*
  2246. * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
  2247. * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
  2248. * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
  2249. * if hash is a canonical 4-tuple hash over transport ports.
  2250. */
  2251. void __skb_get_rxhash(struct sk_buff *skb)
  2252. {
  2253. struct flow_keys keys;
  2254. u32 hash;
  2255. if (!skb_flow_dissect(skb, &keys))
  2256. return;
  2257. if (keys.ports) {
  2258. if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
  2259. swap(keys.port16[0], keys.port16[1]);
  2260. skb->l4_rxhash = 1;
  2261. }
  2262. /* get a consistent hash (same value on both flow directions) */
  2263. if ((__force u32)keys.dst < (__force u32)keys.src)
  2264. swap(keys.dst, keys.src);
  2265. hash = jhash_3words((__force u32)keys.dst,
  2266. (__force u32)keys.src,
  2267. (__force u32)keys.ports, hashrnd);
  2268. if (!hash)
  2269. hash = 1;
  2270. skb->rxhash = hash;
  2271. }
  2272. EXPORT_SYMBOL(__skb_get_rxhash);
  2273. #ifdef CONFIG_RPS
  2274. /* One global table that all flow-based protocols share. */
  2275. struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
  2276. EXPORT_SYMBOL(rps_sock_flow_table);
  2277. struct static_key rps_needed __read_mostly;
  2278. static struct rps_dev_flow *
  2279. set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
  2280. struct rps_dev_flow *rflow, u16 next_cpu)
  2281. {
  2282. if (next_cpu != RPS_NO_CPU) {
  2283. #ifdef CONFIG_RFS_ACCEL
  2284. struct netdev_rx_queue *rxqueue;
  2285. struct rps_dev_flow_table *flow_table;
  2286. struct rps_dev_flow *old_rflow;
  2287. u32 flow_id;
  2288. u16 rxq_index;
  2289. int rc;
  2290. /* Should we steer this flow to a different hardware queue? */
  2291. if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
  2292. !(dev->features & NETIF_F_NTUPLE))
  2293. goto out;
  2294. rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
  2295. if (rxq_index == skb_get_rx_queue(skb))
  2296. goto out;
  2297. rxqueue = dev->_rx + rxq_index;
  2298. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  2299. if (!flow_table)
  2300. goto out;
  2301. flow_id = skb->rxhash & flow_table->mask;
  2302. rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
  2303. rxq_index, flow_id);
  2304. if (rc < 0)
  2305. goto out;
  2306. old_rflow = rflow;
  2307. rflow = &flow_table->flows[flow_id];
  2308. rflow->filter = rc;
  2309. if (old_rflow->filter == rflow->filter)
  2310. old_rflow->filter = RPS_NO_FILTER;
  2311. out:
  2312. #endif
  2313. rflow->last_qtail =
  2314. per_cpu(softnet_data, next_cpu).input_queue_head;
  2315. }
  2316. rflow->cpu = next_cpu;
  2317. return rflow;
  2318. }
  2319. /*
  2320. * get_rps_cpu is called from netif_receive_skb and returns the target
  2321. * CPU from the RPS map of the receiving queue for a given skb.
  2322. * rcu_read_lock must be held on entry.
  2323. */
  2324. static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
  2325. struct rps_dev_flow **rflowp)
  2326. {
  2327. struct netdev_rx_queue *rxqueue;
  2328. struct rps_map *map;
  2329. struct rps_dev_flow_table *flow_table;
  2330. struct rps_sock_flow_table *sock_flow_table;
  2331. int cpu = -1;
  2332. u16 tcpu;
  2333. if (skb_rx_queue_recorded(skb)) {
  2334. u16 index = skb_get_rx_queue(skb);
  2335. if (unlikely(index >= dev->real_num_rx_queues)) {
  2336. WARN_ONCE(dev->real_num_rx_queues > 1,
  2337. "%s received packet on queue %u, but number "
  2338. "of RX queues is %u\n",
  2339. dev->name, index, dev->real_num_rx_queues);
  2340. goto done;
  2341. }
  2342. rxqueue = dev->_rx + index;
  2343. } else
  2344. rxqueue = dev->_rx;
  2345. map = rcu_dereference(rxqueue->rps_map);
  2346. if (map) {
  2347. if (map->len == 1 &&
  2348. !rcu_access_pointer(rxqueue->rps_flow_table)) {
  2349. tcpu = map->cpus[0];
  2350. if (cpu_online(tcpu))
  2351. cpu = tcpu;
  2352. goto done;
  2353. }
  2354. } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
  2355. goto done;
  2356. }
  2357. skb_reset_network_header(skb);
  2358. if (!skb_get_rxhash(skb))
  2359. goto done;
  2360. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  2361. sock_flow_table = rcu_dereference(rps_sock_flow_table);
  2362. if (flow_table && sock_flow_table) {
  2363. u16 next_cpu;
  2364. struct rps_dev_flow *rflow;
  2365. rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
  2366. tcpu = rflow->cpu;
  2367. next_cpu = sock_flow_table->ents[skb->rxhash &
  2368. sock_flow_table->mask];
  2369. /*
  2370. * If the desired CPU (where last recvmsg was done) is
  2371. * different from current CPU (one in the rx-queue flow
  2372. * table entry), switch if one of the following holds:
  2373. * - Current CPU is unset (equal to RPS_NO_CPU).
  2374. * - Current CPU is offline.
  2375. * - The current CPU's queue tail has advanced beyond the
  2376. * last packet that was enqueued using this table entry.
  2377. * This guarantees that all previous packets for the flow
  2378. * have been dequeued, thus preserving in order delivery.
  2379. */
  2380. if (unlikely(tcpu != next_cpu) &&
  2381. (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
  2382. ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
  2383. rflow->last_qtail)) >= 0))
  2384. rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
  2385. if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
  2386. *rflowp = rflow;
  2387. cpu = tcpu;
  2388. goto done;
  2389. }
  2390. }
  2391. if (map) {
  2392. tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
  2393. if (cpu_online(tcpu)) {
  2394. cpu = tcpu;
  2395. goto done;
  2396. }
  2397. }
  2398. done:
  2399. return cpu;
  2400. }
  2401. #ifdef CONFIG_RFS_ACCEL
  2402. /**
  2403. * rps_may_expire_flow - check whether an RFS hardware filter may be removed
  2404. * @dev: Device on which the filter was set
  2405. * @rxq_index: RX queue index
  2406. * @flow_id: Flow ID passed to ndo_rx_flow_steer()
  2407. * @filter_id: Filter ID returned by ndo_rx_flow_steer()
  2408. *
  2409. * Drivers that implement ndo_rx_flow_steer() should periodically call
  2410. * this function for each installed filter and remove the filters for
  2411. * which it returns %true.
  2412. */
  2413. bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
  2414. u32 flow_id, u16 filter_id)
  2415. {
  2416. struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
  2417. struct rps_dev_flow_table *flow_table;
  2418. struct rps_dev_flow *rflow;
  2419. bool expire = true;
  2420. int cpu;
  2421. rcu_read_lock();
  2422. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  2423. if (flow_table && flow_id <= flow_table->mask) {
  2424. rflow = &flow_table->flows[flow_id];
  2425. cpu = ACCESS_ONCE(rflow->cpu);
  2426. if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
  2427. ((int)(per_cpu(softnet_data, cpu).input_queue_head -
  2428. rflow->last_qtail) <
  2429. (int)(10 * flow_table->mask)))
  2430. expire = false;
  2431. }
  2432. rcu_read_unlock();
  2433. return expire;
  2434. }
  2435. EXPORT_SYMBOL(rps_may_expire_flow);
  2436. #endif /* CONFIG_RFS_ACCEL */
  2437. /* Called from hardirq (IPI) context */
  2438. static void rps_trigger_softirq(void *data)
  2439. {
  2440. struct softnet_data *sd = data;
  2441. ____napi_schedule(sd, &sd->backlog);
  2442. sd->received_rps++;
  2443. }
  2444. #endif /* CONFIG_RPS */
  2445. /*
  2446. * Check if this softnet_data structure is another cpu one
  2447. * If yes, queue it to our IPI list and return 1
  2448. * If no, return 0
  2449. */
  2450. static int rps_ipi_queued(struct softnet_data *sd)
  2451. {
  2452. #ifdef CONFIG_RPS
  2453. struct softnet_data *mysd = &__get_cpu_var(softnet_data);
  2454. if (sd != mysd) {
  2455. sd->rps_ipi_next = mysd->rps_ipi_list;
  2456. mysd->rps_ipi_list = sd;
  2457. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  2458. return 1;
  2459. }
  2460. #endif /* CONFIG_RPS */
  2461. return 0;
  2462. }
  2463. /*
  2464. * enqueue_to_backlog is called to queue an skb to a per CPU backlog
  2465. * queue (may be a remote CPU queue).
  2466. */
  2467. static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
  2468. unsigned int *qtail)
  2469. {
  2470. struct softnet_data *sd;
  2471. unsigned long flags;
  2472. sd = &per_cpu(softnet_data, cpu);
  2473. local_irq_save(flags);
  2474. rps_lock(sd);
  2475. if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
  2476. if (skb_queue_len(&sd->input_pkt_queue)) {
  2477. enqueue:
  2478. __skb_queue_tail(&sd->input_pkt_queue, skb);
  2479. input_queue_tail_incr_save(sd, qtail);
  2480. rps_unlock(sd);
  2481. local_irq_restore(flags);
  2482. return NET_RX_SUCCESS;
  2483. }
  2484. /* Schedule NAPI for backlog device
  2485. * We can use non atomic operation since we own the queue lock
  2486. */
  2487. if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
  2488. if (!rps_ipi_queued(sd))
  2489. ____napi_schedule(sd, &sd->backlog);
  2490. }
  2491. goto enqueue;
  2492. }
  2493. sd->dropped++;
  2494. rps_unlock(sd);
  2495. local_irq_restore(flags);
  2496. atomic_long_inc(&skb->dev->rx_dropped);
  2497. kfree_skb(skb);
  2498. return NET_RX_DROP;
  2499. }
  2500. /**
  2501. * netif_rx - post buffer to the network code
  2502. * @skb: buffer to post
  2503. *
  2504. * This function receives a packet from a device driver and queues it for
  2505. * the upper (protocol) levels to process. It always succeeds. The buffer
  2506. * may be dropped during processing for congestion control or by the
  2507. * protocol layers.
  2508. *
  2509. * return values:
  2510. * NET_RX_SUCCESS (no congestion)
  2511. * NET_RX_DROP (packet was dropped)
  2512. *
  2513. */
  2514. int netif_rx(struct sk_buff *skb)
  2515. {
  2516. int ret;
  2517. /* if netpoll wants it, pretend we never saw it */
  2518. if (netpoll_rx(skb))
  2519. return NET_RX_DROP;
  2520. net_timestamp_check(netdev_tstamp_prequeue, skb);
  2521. trace_netif_rx(skb);
  2522. #ifdef CONFIG_RPS
  2523. if (static_key_false(&rps_needed)) {
  2524. struct rps_dev_flow voidflow, *rflow = &voidflow;
  2525. int cpu;
  2526. preempt_disable();
  2527. rcu_read_lock();
  2528. cpu = get_rps_cpu(skb->dev, skb, &rflow);
  2529. if (cpu < 0)
  2530. cpu = smp_processor_id();
  2531. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  2532. rcu_read_unlock();
  2533. preempt_enable();
  2534. } else
  2535. #endif
  2536. {
  2537. unsigned int qtail;
  2538. ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
  2539. put_cpu();
  2540. }
  2541. return ret;
  2542. }
  2543. EXPORT_SYMBOL(netif_rx);
  2544. int netif_rx_ni(struct sk_buff *skb)
  2545. {
  2546. int err;
  2547. preempt_disable();
  2548. err = netif_rx(skb);
  2549. if (local_softirq_pending())
  2550. do_softirq();
  2551. preempt_enable();
  2552. return err;
  2553. }
  2554. EXPORT_SYMBOL(netif_rx_ni);
  2555. static void net_tx_action(struct softirq_action *h)
  2556. {
  2557. struct softnet_data *sd = &__get_cpu_var(softnet_data);
  2558. if (sd->completion_queue) {
  2559. struct sk_buff *clist;
  2560. local_irq_disable();
  2561. clist = sd->completion_queue;
  2562. sd->completion_queue = NULL;
  2563. local_irq_enable();
  2564. while (clist) {
  2565. struct sk_buff *skb = clist;
  2566. clist = clist->next;
  2567. WARN_ON(atomic_read(&skb->users));
  2568. trace_kfree_skb(skb, net_tx_action);
  2569. __kfree_skb(skb);
  2570. }
  2571. }
  2572. if (sd->output_queue) {
  2573. struct Qdisc *head;
  2574. local_irq_disable();
  2575. head = sd->output_queue;
  2576. sd->output_queue = NULL;
  2577. sd->output_queue_tailp = &sd->output_queue;
  2578. local_irq_enable();
  2579. while (head) {
  2580. struct Qdisc *q = head;
  2581. spinlock_t *root_lock;
  2582. head = head->next_sched;
  2583. root_lock = qdisc_lock(q);
  2584. if (spin_trylock(root_lock)) {
  2585. smp_mb__before_clear_bit();
  2586. clear_bit(__QDISC_STATE_SCHED,
  2587. &q->state);
  2588. qdisc_run(q);
  2589. spin_unlock(root_lock);
  2590. } else {
  2591. if (!test_bit(__QDISC_STATE_DEACTIVATED,
  2592. &q->state)) {
  2593. __netif_reschedule(q);
  2594. } else {
  2595. smp_mb__before_clear_bit();
  2596. clear_bit(__QDISC_STATE_SCHED,
  2597. &q->state);
  2598. }
  2599. }
  2600. }
  2601. }
  2602. }
  2603. #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
  2604. (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
  2605. /* This hook is defined here for ATM LANE */
  2606. int (*br_fdb_test_addr_hook)(struct net_device *dev,
  2607. unsigned char *addr) __read_mostly;
  2608. EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
  2609. #endif
  2610. #ifdef CONFIG_NET_CLS_ACT
  2611. /* TODO: Maybe we should just force sch_ingress to be compiled in
  2612. * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
  2613. * a compare and 2 stores extra right now if we dont have it on
  2614. * but have CONFIG_NET_CLS_ACT
  2615. * NOTE: This doesn't stop any functionality; if you dont have
  2616. * the ingress scheduler, you just can't add policies on ingress.
  2617. *
  2618. */
  2619. static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
  2620. {
  2621. struct net_device *dev = skb->dev;
  2622. u32 ttl = G_TC_RTTL(skb->tc_verd);
  2623. int result = TC_ACT_OK;
  2624. struct Qdisc *q;
  2625. if (unlikely(MAX_RED_LOOP < ttl++)) {
  2626. if (net_ratelimit())
  2627. pr_warn("Redir loop detected Dropping packet (%d->%d)\n",
  2628. skb->skb_iif, dev->ifindex);
  2629. return TC_ACT_SHOT;
  2630. }
  2631. skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
  2632. skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
  2633. q = rxq->qdisc;
  2634. if (q != &noop_qdisc) {
  2635. spin_lock(qdisc_lock(q));
  2636. if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
  2637. result = qdisc_enqueue_root(skb, q);
  2638. spin_unlock(qdisc_lock(q));
  2639. }
  2640. return result;
  2641. }
  2642. static inline struct sk_buff *handle_ing(struct sk_buff *skb,
  2643. struct packet_type **pt_prev,
  2644. int *ret, struct net_device *orig_dev)
  2645. {
  2646. struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
  2647. if (!rxq || rxq->qdisc == &noop_qdisc)
  2648. goto out;
  2649. if (*pt_prev) {
  2650. *ret = deliver_skb(skb, *pt_prev, orig_dev);
  2651. *pt_prev = NULL;
  2652. }
  2653. switch (ing_filter(skb, rxq)) {
  2654. case TC_ACT_SHOT:
  2655. case TC_ACT_STOLEN:
  2656. kfree_skb(skb);
  2657. return NULL;
  2658. }
  2659. out:
  2660. skb->tc_verd = 0;
  2661. return skb;
  2662. }
  2663. #endif
  2664. /**
  2665. * netdev_rx_handler_register - register receive handler
  2666. * @dev: device to register a handler for
  2667. * @rx_handler: receive handler to register
  2668. * @rx_handler_data: data pointer that is used by rx handler
  2669. *
  2670. * Register a receive hander for a device. This handler will then be
  2671. * called from __netif_receive_skb. A negative errno code is returned
  2672. * on a failure.
  2673. *
  2674. * The caller must hold the rtnl_mutex.
  2675. *
  2676. * For a general description of rx_handler, see enum rx_handler_result.
  2677. */
  2678. int netdev_rx_handler_register(struct net_device *dev,
  2679. rx_handler_func_t *rx_handler,
  2680. void *rx_handler_data)
  2681. {
  2682. ASSERT_RTNL();
  2683. if (dev->rx_handler)
  2684. return -EBUSY;
  2685. rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
  2686. rcu_assign_pointer(dev->rx_handler, rx_handler);
  2687. return 0;
  2688. }
  2689. EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
  2690. /**
  2691. * netdev_rx_handler_unregister - unregister receive handler
  2692. * @dev: device to unregister a handler from
  2693. *
  2694. * Unregister a receive hander from a device.
  2695. *
  2696. * The caller must hold the rtnl_mutex.
  2697. */
  2698. void netdev_rx_handler_unregister(struct net_device *dev)
  2699. {
  2700. ASSERT_RTNL();
  2701. RCU_INIT_POINTER(dev->rx_handler, NULL);
  2702. RCU_INIT_POINTER(dev->rx_handler_data, NULL);
  2703. }
  2704. EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
  2705. static int __netif_receive_skb(struct sk_buff *skb)
  2706. {
  2707. struct packet_type *ptype, *pt_prev;
  2708. rx_handler_func_t *rx_handler;
  2709. struct net_device *orig_dev;
  2710. struct net_device *null_or_dev;
  2711. bool deliver_exact = false;
  2712. int ret = NET_RX_DROP;
  2713. __be16 type;
  2714. net_timestamp_check(!netdev_tstamp_prequeue, skb);
  2715. trace_netif_receive_skb(skb);
  2716. /* if we've gotten here through NAPI, check netpoll */
  2717. if (netpoll_receive_skb(skb))
  2718. return NET_RX_DROP;
  2719. if (!skb->skb_iif)
  2720. skb->skb_iif = skb->dev->ifindex;
  2721. orig_dev = skb->dev;
  2722. skb_reset_network_header(skb);
  2723. skb_reset_transport_header(skb);
  2724. skb_reset_mac_len(skb);
  2725. pt_prev = NULL;
  2726. rcu_read_lock();
  2727. another_round:
  2728. __this_cpu_inc(softnet_data.processed);
  2729. if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
  2730. skb = vlan_untag(skb);
  2731. if (unlikely(!skb))
  2732. goto out;
  2733. }
  2734. #ifdef CONFIG_NET_CLS_ACT
  2735. if (skb->tc_verd & TC_NCLS) {
  2736. skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
  2737. goto ncls;
  2738. }
  2739. #endif
  2740. list_for_each_entry_rcu(ptype, &ptype_all, list) {
  2741. if (!ptype->dev || ptype->dev == skb->dev) {
  2742. if (pt_prev)
  2743. ret = deliver_skb(skb, pt_prev, orig_dev);
  2744. pt_prev = ptype;
  2745. }
  2746. }
  2747. #ifdef CONFIG_NET_CLS_ACT
  2748. skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
  2749. if (!skb)
  2750. goto out;
  2751. ncls:
  2752. #endif
  2753. rx_handler = rcu_dereference(skb->dev->rx_handler);
  2754. if (vlan_tx_tag_present(skb)) {
  2755. if (pt_prev) {
  2756. ret = deliver_skb(skb, pt_prev, orig_dev);
  2757. pt_prev = NULL;
  2758. }
  2759. if (vlan_do_receive(&skb, !rx_handler))
  2760. goto another_round;
  2761. else if (unlikely(!skb))
  2762. goto out;
  2763. }
  2764. if (rx_handler) {
  2765. if (pt_prev) {
  2766. ret = deliver_skb(skb, pt_prev, orig_dev);
  2767. pt_prev = NULL;
  2768. }
  2769. switch (rx_handler(&skb)) {
  2770. case RX_HANDLER_CONSUMED:
  2771. goto out;
  2772. case RX_HANDLER_ANOTHER:
  2773. goto another_round;
  2774. case RX_HANDLER_EXACT:
  2775. deliver_exact = true;
  2776. case RX_HANDLER_PASS:
  2777. break;
  2778. default:
  2779. BUG();
  2780. }
  2781. }
  2782. /* deliver only exact match when indicated */
  2783. null_or_dev = deliver_exact ? skb->dev : NULL;
  2784. type = skb->protocol;
  2785. list_for_each_entry_rcu(ptype,
  2786. &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
  2787. if (ptype->type == type &&
  2788. (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
  2789. ptype->dev == orig_dev)) {
  2790. if (pt_prev)
  2791. ret = deliver_skb(skb, pt_prev, orig_dev);
  2792. pt_prev = ptype;
  2793. }
  2794. }
  2795. if (pt_prev) {
  2796. ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  2797. } else {
  2798. atomic_long_inc(&skb->dev->rx_dropped);
  2799. kfree_skb(skb);
  2800. /* Jamal, now you will not able to escape explaining
  2801. * me how you were going to use this. :-)
  2802. */
  2803. ret = NET_RX_DROP;
  2804. }
  2805. out:
  2806. rcu_read_unlock();
  2807. return ret;
  2808. }
  2809. /**
  2810. * netif_receive_skb - process receive buffer from network
  2811. * @skb: buffer to process
  2812. *
  2813. * netif_receive_skb() is the main receive data processing function.
  2814. * It always succeeds. The buffer may be dropped during processing
  2815. * for congestion control or by the protocol layers.
  2816. *
  2817. * This function may only be called from softirq context and interrupts
  2818. * should be enabled.
  2819. *
  2820. * Return values (usually ignored):
  2821. * NET_RX_SUCCESS: no congestion
  2822. * NET_RX_DROP: packet was dropped
  2823. */
  2824. int netif_receive_skb(struct sk_buff *skb)
  2825. {
  2826. net_timestamp_check(netdev_tstamp_prequeue, skb);
  2827. if (skb_defer_rx_timestamp(skb))
  2828. return NET_RX_SUCCESS;
  2829. #ifdef CONFIG_RPS
  2830. if (static_key_false(&rps_needed)) {
  2831. struct rps_dev_flow voidflow, *rflow = &voidflow;
  2832. int cpu, ret;
  2833. rcu_read_lock();
  2834. cpu = get_rps_cpu(skb->dev, skb, &rflow);
  2835. if (cpu >= 0) {
  2836. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  2837. rcu_read_unlock();
  2838. return ret;
  2839. }
  2840. rcu_read_unlock();
  2841. }
  2842. #endif
  2843. return __netif_receive_skb(skb);
  2844. }
  2845. EXPORT_SYMBOL(netif_receive_skb);
  2846. /* Network device is going away, flush any packets still pending
  2847. * Called with irqs disabled.
  2848. */
  2849. static void flush_backlog(void *arg)
  2850. {
  2851. struct net_device *dev = arg;
  2852. struct softnet_data *sd = &__get_cpu_var(softnet_data);
  2853. struct sk_buff *skb, *tmp;
  2854. rps_lock(sd);
  2855. skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
  2856. if (skb->dev == dev) {
  2857. __skb_unlink(skb, &sd->input_pkt_queue);
  2858. kfree_skb(skb);
  2859. input_queue_head_incr(sd);
  2860. }
  2861. }
  2862. rps_unlock(sd);
  2863. skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
  2864. if (skb->dev == dev) {
  2865. __skb_unlink(skb, &sd->process_queue);
  2866. kfree_skb(skb);
  2867. input_queue_head_incr(sd);
  2868. }
  2869. }
  2870. }
  2871. static int napi_gro_complete(struct sk_buff *skb)
  2872. {
  2873. struct packet_type *ptype;
  2874. __be16 type = skb->protocol;
  2875. struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
  2876. int err = -ENOENT;
  2877. if (NAPI_GRO_CB(skb)->count == 1) {
  2878. skb_shinfo(skb)->gso_size = 0;
  2879. goto out;
  2880. }
  2881. rcu_read_lock();
  2882. list_for_each_entry_rcu(ptype, head, list) {
  2883. if (ptype->type != type || ptype->dev || !ptype->gro_complete)
  2884. continue;
  2885. err = ptype->gro_complete(skb);
  2886. break;
  2887. }
  2888. rcu_read_unlock();
  2889. if (err) {
  2890. WARN_ON(&ptype->list == head);
  2891. kfree_skb(skb);
  2892. return NET_RX_SUCCESS;
  2893. }
  2894. out:
  2895. return netif_receive_skb(skb);
  2896. }
  2897. inline void napi_gro_flush(struct napi_struct *napi)
  2898. {
  2899. struct sk_buff *skb, *next;
  2900. for (skb = napi->gro_list; skb; skb = next) {
  2901. next = skb->next;
  2902. skb->next = NULL;
  2903. napi_gro_complete(skb);
  2904. }
  2905. napi->gro_count = 0;
  2906. napi->gro_list = NULL;
  2907. }
  2908. EXPORT_SYMBOL(napi_gro_flush);
  2909. enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  2910. {
  2911. struct sk_buff **pp = NULL;
  2912. struct packet_type *ptype;
  2913. __be16 type = skb->protocol;
  2914. struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
  2915. int same_flow;
  2916. int mac_len;
  2917. enum gro_result ret;
  2918. if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
  2919. goto normal;
  2920. if (skb_is_gso(skb) || skb_has_frag_list(skb))
  2921. goto normal;
  2922. rcu_read_lock();
  2923. list_for_each_entry_rcu(ptype, head, list) {
  2924. if (ptype->type != type || ptype->dev || !ptype->gro_receive)
  2925. continue;
  2926. skb_set_network_header(skb, skb_gro_offset(skb));
  2927. mac_len = skb->network_header - skb->mac_header;
  2928. skb->mac_len = mac_len;
  2929. NAPI_GRO_CB(skb)->same_flow = 0;
  2930. NAPI_GRO_CB(skb)->flush = 0;
  2931. NAPI_GRO_CB(skb)->free = 0;
  2932. pp = ptype->gro_receive(&napi->gro_list, skb);
  2933. break;
  2934. }
  2935. rcu_read_unlock();
  2936. if (&ptype->list == head)
  2937. goto normal;
  2938. same_flow = NAPI_GRO_CB(skb)->same_flow;
  2939. ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
  2940. if (pp) {
  2941. struct sk_buff *nskb = *pp;
  2942. *pp = nskb->next;
  2943. nskb->next = NULL;
  2944. napi_gro_complete(nskb);
  2945. napi->gro_count--;
  2946. }
  2947. if (same_flow)
  2948. goto ok;
  2949. if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
  2950. goto normal;
  2951. napi->gro_count++;
  2952. NAPI_GRO_CB(skb)->count = 1;
  2953. skb_shinfo(skb)->gso_size = skb_gro_len(skb);
  2954. skb->next = napi->gro_list;
  2955. napi->gro_list = skb;
  2956. ret = GRO_HELD;
  2957. pull:
  2958. if (skb_headlen(skb) < skb_gro_offset(skb)) {
  2959. int grow = skb_gro_offset(skb) - skb_headlen(skb);
  2960. BUG_ON(skb->end - skb->tail < grow);
  2961. memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
  2962. skb->tail += grow;
  2963. skb->data_len -= grow;
  2964. skb_shinfo(skb)->frags[0].page_offset += grow;
  2965. skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
  2966. if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
  2967. skb_frag_unref(skb, 0);
  2968. memmove(skb_shinfo(skb)->frags,
  2969. skb_shinfo(skb)->frags + 1,
  2970. --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
  2971. }
  2972. }
  2973. ok:
  2974. return ret;
  2975. normal:
  2976. ret = GRO_NORMAL;
  2977. goto pull;
  2978. }
  2979. EXPORT_SYMBOL(dev_gro_receive);
  2980. static inline gro_result_t
  2981. __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  2982. {
  2983. struct sk_buff *p;
  2984. unsigned int maclen = skb->dev->hard_header_len;
  2985. for (p = napi->gro_list; p; p = p->next) {
  2986. unsigned long diffs;
  2987. diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
  2988. diffs |= p->vlan_tci ^ skb->vlan_tci;
  2989. if (maclen == ETH_HLEN)
  2990. diffs |= compare_ether_header(skb_mac_header(p),
  2991. skb_gro_mac_header(skb));
  2992. else if (!diffs)
  2993. diffs = memcmp(skb_mac_header(p),
  2994. skb_gro_mac_header(skb),
  2995. maclen);
  2996. NAPI_GRO_CB(p)->same_flow = !diffs;
  2997. NAPI_GRO_CB(p)->flush = 0;
  2998. }
  2999. return dev_gro_receive(napi, skb);
  3000. }
  3001. gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
  3002. {
  3003. switch (ret) {
  3004. case GRO_NORMAL:
  3005. if (netif_receive_skb(skb))
  3006. ret = GRO_DROP;
  3007. break;
  3008. case GRO_DROP:
  3009. case GRO_MERGED_FREE:
  3010. kfree_skb(skb);
  3011. break;
  3012. case GRO_HELD:
  3013. case GRO_MERGED:
  3014. break;
  3015. }
  3016. return ret;
  3017. }
  3018. EXPORT_SYMBOL(napi_skb_finish);
  3019. void skb_gro_reset_offset(struct sk_buff *skb)
  3020. {
  3021. NAPI_GRO_CB(skb)->data_offset = 0;
  3022. NAPI_GRO_CB(skb)->frag0 = NULL;
  3023. NAPI_GRO_CB(skb)->frag0_len = 0;
  3024. if (skb->mac_header == skb->tail &&
  3025. !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
  3026. NAPI_GRO_CB(skb)->frag0 =
  3027. skb_frag_address(&skb_shinfo(skb)->frags[0]);
  3028. NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
  3029. }
  3030. }
  3031. EXPORT_SYMBOL(skb_gro_reset_offset);
  3032. gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  3033. {
  3034. skb_gro_reset_offset(skb);
  3035. return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
  3036. }
  3037. EXPORT_SYMBOL(napi_gro_receive);
  3038. static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
  3039. {
  3040. __skb_pull(skb, skb_headlen(skb));
  3041. /* restore the reserve we had after netdev_alloc_skb_ip_align() */
  3042. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
  3043. skb->vlan_tci = 0;
  3044. skb->dev = napi->dev;
  3045. skb->skb_iif = 0;
  3046. napi->skb = skb;
  3047. }
  3048. struct sk_buff *napi_get_frags(struct napi_struct *napi)
  3049. {
  3050. struct sk_buff *skb = napi->skb;
  3051. if (!skb) {
  3052. skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
  3053. if (skb)
  3054. napi->skb = skb;
  3055. }
  3056. return skb;
  3057. }
  3058. EXPORT_SYMBOL(napi_get_frags);
  3059. gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
  3060. gro_result_t ret)
  3061. {
  3062. switch (ret) {
  3063. case GRO_NORMAL:
  3064. case GRO_HELD:
  3065. skb->protocol = eth_type_trans(skb, skb->dev);
  3066. if (ret == GRO_HELD)
  3067. skb_gro_pull(skb, -ETH_HLEN);
  3068. else if (netif_receive_skb(skb))
  3069. ret = GRO_DROP;
  3070. break;
  3071. case GRO_DROP:
  3072. case GRO_MERGED_FREE:
  3073. napi_reuse_skb(napi, skb);
  3074. break;
  3075. case GRO_MERGED:
  3076. break;
  3077. }
  3078. return ret;
  3079. }
  3080. EXPORT_SYMBOL(napi_frags_finish);
  3081. struct sk_buff *napi_frags_skb(struct napi_struct *napi)
  3082. {
  3083. struct sk_buff *skb = napi->skb;
  3084. struct ethhdr *eth;
  3085. unsigned int hlen;
  3086. unsigned int off;
  3087. napi->skb = NULL;
  3088. skb_reset_mac_header(skb);
  3089. skb_gro_reset_offset(skb);
  3090. off = skb_gro_offset(skb);
  3091. hlen = off + sizeof(*eth);
  3092. eth = skb_gro_header_fast(skb, off);
  3093. if (skb_gro_header_hard(skb, hlen)) {
  3094. eth = skb_gro_header_slow(skb, hlen, off);
  3095. if (unlikely(!eth)) {
  3096. napi_reuse_skb(napi, skb);
  3097. skb = NULL;
  3098. goto out;
  3099. }
  3100. }
  3101. skb_gro_pull(skb, sizeof(*eth));
  3102. /*
  3103. * This works because the only protocols we care about don't require
  3104. * special handling. We'll fix it up properly at the end.
  3105. */
  3106. skb->protocol = eth->h_proto;
  3107. out:
  3108. return skb;
  3109. }
  3110. EXPORT_SYMBOL(napi_frags_skb);
  3111. gro_result_t napi_gro_frags(struct napi_struct *napi)
  3112. {
  3113. struct sk_buff *skb = napi_frags_skb(napi);
  3114. if (!skb)
  3115. return GRO_DROP;
  3116. return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
  3117. }
  3118. EXPORT_SYMBOL(napi_gro_frags);
  3119. /*
  3120. * net_rps_action sends any pending IPI's for rps.
  3121. * Note: called with local irq disabled, but exits with local irq enabled.
  3122. */
  3123. static void net_rps_action_and_irq_enable(struct softnet_data *sd)
  3124. {
  3125. #ifdef CONFIG_RPS
  3126. struct softnet_data *remsd = sd->rps_ipi_list;
  3127. if (remsd) {
  3128. sd->rps_ipi_list = NULL;
  3129. local_irq_enable();
  3130. /* Send pending IPI's to kick RPS processing on remote cpus. */
  3131. while (remsd) {
  3132. struct softnet_data *next = remsd->rps_ipi_next;
  3133. if (cpu_online(remsd->cpu))
  3134. __smp_call_function_single(remsd->cpu,
  3135. &remsd->csd, 0);
  3136. remsd = next;
  3137. }
  3138. } else
  3139. #endif
  3140. local_irq_enable();
  3141. }
  3142. static int process_backlog(struct napi_struct *napi, int quota)
  3143. {
  3144. int work = 0;
  3145. struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
  3146. #ifdef CONFIG_RPS
  3147. /* Check if we have pending ipi, its better to send them now,
  3148. * not waiting net_rx_action() end.
  3149. */
  3150. if (sd->rps_ipi_list) {
  3151. local_irq_disable();
  3152. net_rps_action_and_irq_enable(sd);
  3153. }
  3154. #endif
  3155. napi->weight = weight_p;
  3156. local_irq_disable();
  3157. while (work < quota) {
  3158. struct sk_buff *skb;
  3159. unsigned int qlen;
  3160. while ((skb = __skb_dequeue(&sd->process_queue))) {
  3161. local_irq_enable();
  3162. __netif_receive_skb(skb);
  3163. local_irq_disable();
  3164. input_queue_head_incr(sd);
  3165. if (++work >= quota) {
  3166. local_irq_enable();
  3167. return work;
  3168. }
  3169. }
  3170. rps_lock(sd);
  3171. qlen = skb_queue_len(&sd->input_pkt_queue);
  3172. if (qlen)
  3173. skb_queue_splice_tail_init(&sd->input_pkt_queue,
  3174. &sd->process_queue);
  3175. if (qlen < quota - work) {
  3176. /*
  3177. * Inline a custom version of __napi_complete().
  3178. * only current cpu owns and manipulates this napi,
  3179. * and NAPI_STATE_SCHED is the only possible flag set on backlog.
  3180. * we can use a plain write instead of clear_bit(),
  3181. * and we dont need an smp_mb() memory barrier.
  3182. */
  3183. list_del(&napi->poll_list);
  3184. napi->state = 0;
  3185. quota = work + qlen;
  3186. }
  3187. rps_unlock(sd);
  3188. }
  3189. local_irq_enable();
  3190. return work;
  3191. }
  3192. /**
  3193. * __napi_schedule - schedule for receive
  3194. * @n: entry to schedule
  3195. *
  3196. * The entry's receive function will be scheduled to run
  3197. */
  3198. void __napi_schedule(struct napi_struct *n)
  3199. {
  3200. unsigned long flags;
  3201. local_irq_save(flags);
  3202. ____napi_schedule(&__get_cpu_var(softnet_data), n);
  3203. local_irq_restore(flags);
  3204. }
  3205. EXPORT_SYMBOL(__napi_schedule);
  3206. void __napi_complete(struct napi_struct *n)
  3207. {
  3208. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  3209. BUG_ON(n->gro_list);
  3210. list_del(&n->poll_list);
  3211. smp_mb__before_clear_bit();
  3212. clear_bit(NAPI_STATE_SCHED, &n->state);
  3213. }
  3214. EXPORT_SYMBOL(__napi_complete);
  3215. void napi_complete(struct napi_struct *n)
  3216. {
  3217. unsigned long flags;
  3218. /*
  3219. * don't let napi dequeue from the cpu poll list
  3220. * just in case its running on a different cpu
  3221. */
  3222. if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
  3223. return;
  3224. napi_gro_flush(n);
  3225. local_irq_save(flags);
  3226. __napi_complete(n);
  3227. local_irq_restore(flags);
  3228. }
  3229. EXPORT_SYMBOL(napi_complete);
  3230. void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
  3231. int (*poll)(struct napi_struct *, int), int weight)
  3232. {
  3233. INIT_LIST_HEAD(&napi->poll_list);
  3234. napi->gro_count = 0;
  3235. napi->gro_list = NULL;
  3236. napi->skb = NULL;
  3237. napi->poll = poll;
  3238. napi->weight = weight;
  3239. list_add(&napi->dev_list, &dev->napi_list);
  3240. napi->dev = dev;
  3241. #ifdef CONFIG_NETPOLL
  3242. spin_lock_init(&napi->poll_lock);
  3243. napi->poll_owner = -1;
  3244. #endif
  3245. set_bit(NAPI_STATE_SCHED, &napi->state);
  3246. }
  3247. EXPORT_SYMBOL(netif_napi_add);
  3248. void netif_napi_del(struct napi_struct *napi)
  3249. {
  3250. struct sk_buff *skb, *next;
  3251. list_del_init(&napi->dev_list);
  3252. napi_free_frags(napi);
  3253. for (skb = napi->gro_list; skb; skb = next) {
  3254. next = skb->next;
  3255. skb->next = NULL;
  3256. kfree_skb(skb);
  3257. }
  3258. napi->gro_list = NULL;
  3259. napi->gro_count = 0;
  3260. }
  3261. EXPORT_SYMBOL(netif_napi_del);
  3262. static void net_rx_action(struct softirq_action *h)
  3263. {
  3264. struct softnet_data *sd = &__get_cpu_var(softnet_data);
  3265. unsigned long time_limit = jiffies + 2;
  3266. int budget = netdev_budget;
  3267. void *have;
  3268. local_irq_disable();
  3269. while (!list_empty(&sd->poll_list)) {
  3270. struct napi_struct *n;
  3271. int work, weight;
  3272. /* If softirq window is exhuasted then punt.
  3273. * Allow this to run for 2 jiffies since which will allow
  3274. * an average latency of 1.5/HZ.
  3275. */
  3276. if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
  3277. goto softnet_break;
  3278. local_irq_enable();
  3279. /* Even though interrupts have been re-enabled, this
  3280. * access is safe because interrupts can only add new
  3281. * entries to the tail of this list, and only ->poll()
  3282. * calls can remove this head entry from the list.
  3283. */
  3284. n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
  3285. have = netpoll_poll_lock(n);
  3286. weight = n->weight;
  3287. /* This NAPI_STATE_SCHED test is for avoiding a race
  3288. * with netpoll's poll_napi(). Only the entity which
  3289. * obtains the lock and sees NAPI_STATE_SCHED set will
  3290. * actually make the ->poll() call. Therefore we avoid
  3291. * accidentally calling ->poll() when NAPI is not scheduled.
  3292. */
  3293. work = 0;
  3294. if (test_bit(NAPI_STATE_SCHED, &n->state)) {
  3295. work = n->poll(n, weight);
  3296. trace_napi_poll(n);
  3297. }
  3298. WARN_ON_ONCE(work > weight);
  3299. budget -= work;
  3300. local_irq_disable();
  3301. /* Drivers must not modify the NAPI state if they
  3302. * consume the entire weight. In such cases this code
  3303. * still "owns" the NAPI instance and therefore can
  3304. * move the instance around on the list at-will.
  3305. */
  3306. if (unlikely(work == weight)) {
  3307. if (unlikely(napi_disable_pending(n))) {
  3308. local_irq_enable();
  3309. napi_complete(n);
  3310. local_irq_disable();
  3311. } else
  3312. list_move_tail(&n->poll_list, &sd->poll_list);
  3313. }
  3314. netpoll_poll_unlock(have);
  3315. }
  3316. out:
  3317. net_rps_action_and_irq_enable(sd);
  3318. #ifdef CONFIG_NET_DMA
  3319. /*
  3320. * There may not be any more sk_buffs coming right now, so push
  3321. * any pending DMA copies to hardware
  3322. */
  3323. dma_issue_pending_all();
  3324. #endif
  3325. return;
  3326. softnet_break:
  3327. sd->time_squeeze++;
  3328. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  3329. goto out;
  3330. }
  3331. static gifconf_func_t *gifconf_list[NPROTO];
  3332. /**
  3333. * register_gifconf - register a SIOCGIF handler
  3334. * @family: Address family
  3335. * @gifconf: Function handler
  3336. *
  3337. * Register protocol dependent address dumping routines. The handler
  3338. * that is passed must not be freed or reused until it has been replaced
  3339. * by another handler.
  3340. */
  3341. int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
  3342. {
  3343. if (family >= NPROTO)
  3344. return -EINVAL;
  3345. gifconf_list[family] = gifconf;
  3346. return 0;
  3347. }
  3348. EXPORT_SYMBOL(register_gifconf);
  3349. /*
  3350. * Map an interface index to its name (SIOCGIFNAME)
  3351. */
  3352. /*
  3353. * We need this ioctl for efficient implementation of the
  3354. * if_indextoname() function required by the IPv6 API. Without
  3355. * it, we would have to search all the interfaces to find a
  3356. * match. --pb
  3357. */
  3358. static int dev_ifname(struct net *net, struct ifreq __user *arg)
  3359. {
  3360. struct net_device *dev;
  3361. struct ifreq ifr;
  3362. /*
  3363. * Fetch the caller's info block.
  3364. */
  3365. if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
  3366. return -EFAULT;
  3367. rcu_read_lock();
  3368. dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
  3369. if (!dev) {
  3370. rcu_read_unlock();
  3371. return -ENODEV;
  3372. }
  3373. strcpy(ifr.ifr_name, dev->name);
  3374. rcu_read_unlock();
  3375. if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
  3376. return -EFAULT;
  3377. return 0;
  3378. }
  3379. /*
  3380. * Perform a SIOCGIFCONF call. This structure will change
  3381. * size eventually, and there is nothing I can do about it.
  3382. * Thus we will need a 'compatibility mode'.
  3383. */
  3384. static int dev_ifconf(struct net *net, char __user *arg)
  3385. {
  3386. struct ifconf ifc;
  3387. struct net_device *dev;
  3388. char __user *pos;
  3389. int len;
  3390. int total;
  3391. int i;
  3392. /*
  3393. * Fetch the caller's info block.
  3394. */
  3395. if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
  3396. return -EFAULT;
  3397. pos = ifc.ifc_buf;
  3398. len = ifc.ifc_len;
  3399. /*
  3400. * Loop over the interfaces, and write an info block for each.
  3401. */
  3402. total = 0;
  3403. for_each_netdev(net, dev) {
  3404. for (i = 0; i < NPROTO; i++) {
  3405. if (gifconf_list[i]) {
  3406. int done;
  3407. if (!pos)
  3408. done = gifconf_list[i](dev, NULL, 0);
  3409. else
  3410. done = gifconf_list[i](dev, pos + total,
  3411. len - total);
  3412. if (done < 0)
  3413. return -EFAULT;
  3414. total += done;
  3415. }
  3416. }
  3417. }
  3418. /*
  3419. * All done. Write the updated control block back to the caller.
  3420. */
  3421. ifc.ifc_len = total;
  3422. /*
  3423. * Both BSD and Solaris return 0 here, so we do too.
  3424. */
  3425. return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
  3426. }
  3427. #ifdef CONFIG_PROC_FS
  3428. #define BUCKET_SPACE (32 - NETDEV_HASHBITS)
  3429. struct dev_iter_state {
  3430. struct seq_net_private p;
  3431. unsigned int pos; /* bucket << BUCKET_SPACE + offset */
  3432. };
  3433. #define get_bucket(x) ((x) >> BUCKET_SPACE)
  3434. #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
  3435. #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
  3436. static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
  3437. {
  3438. struct dev_iter_state *state = seq->private;
  3439. struct net *net = seq_file_net(seq);
  3440. struct net_device *dev;
  3441. struct hlist_node *p;
  3442. struct hlist_head *h;
  3443. unsigned int count, bucket, offset;
  3444. bucket = get_bucket(state->pos);
  3445. offset = get_offset(state->pos);
  3446. h = &net->dev_name_head[bucket];
  3447. count = 0;
  3448. hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
  3449. if (count++ == offset) {
  3450. state->pos = set_bucket_offset(bucket, count);
  3451. return dev;
  3452. }
  3453. }
  3454. return NULL;
  3455. }
  3456. static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
  3457. {
  3458. struct dev_iter_state *state = seq->private;
  3459. struct net_device *dev;
  3460. unsigned int bucket;
  3461. bucket = get_bucket(state->pos);
  3462. do {
  3463. dev = dev_from_same_bucket(seq);
  3464. if (dev)
  3465. return dev;
  3466. bucket++;
  3467. state->pos = set_bucket_offset(bucket, 0);
  3468. } while (bucket < NETDEV_HASHENTRIES);
  3469. return NULL;
  3470. }
  3471. /*
  3472. * This is invoked by the /proc filesystem handler to display a device
  3473. * in detail.
  3474. */
  3475. void *dev_seq_start(struct seq_file *seq, loff_t *pos)
  3476. __acquires(RCU)
  3477. {
  3478. struct dev_iter_state *state = seq->private;
  3479. rcu_read_lock();
  3480. if (!*pos)
  3481. return SEQ_START_TOKEN;
  3482. /* check for end of the hash */
  3483. if (state->pos == 0 && *pos > 1)
  3484. return NULL;
  3485. return dev_from_new_bucket(seq);
  3486. }
  3487. void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  3488. {
  3489. struct net_device *dev;
  3490. ++*pos;
  3491. if (v == SEQ_START_TOKEN)
  3492. return dev_from_new_bucket(seq);
  3493. dev = dev_from_same_bucket(seq);
  3494. if (dev)
  3495. return dev;
  3496. return dev_from_new_bucket(seq);
  3497. }
  3498. void dev_seq_stop(struct seq_file *seq, void *v)
  3499. __releases(RCU)
  3500. {
  3501. rcu_read_unlock();
  3502. }
  3503. static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
  3504. {
  3505. struct rtnl_link_stats64 temp;
  3506. const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
  3507. seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
  3508. "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
  3509. dev->name, stats->rx_bytes, stats->rx_packets,
  3510. stats->rx_errors,
  3511. stats->rx_dropped + stats->rx_missed_errors,
  3512. stats->rx_fifo_errors,
  3513. stats->rx_length_errors + stats->rx_over_errors +
  3514. stats->rx_crc_errors + stats->rx_frame_errors,
  3515. stats->rx_compressed, stats->multicast,
  3516. stats->tx_bytes, stats->tx_packets,
  3517. stats->tx_errors, stats->tx_dropped,
  3518. stats->tx_fifo_errors, stats->collisions,
  3519. stats->tx_carrier_errors +
  3520. stats->tx_aborted_errors +
  3521. stats->tx_window_errors +
  3522. stats->tx_heartbeat_errors,
  3523. stats->tx_compressed);
  3524. }
  3525. /*
  3526. * Called from the PROCfs module. This now uses the new arbitrary sized
  3527. * /proc/net interface to create /proc/net/dev
  3528. */
  3529. static int dev_seq_show(struct seq_file *seq, void *v)
  3530. {
  3531. if (v == SEQ_START_TOKEN)
  3532. seq_puts(seq, "Inter-| Receive "
  3533. " | Transmit\n"
  3534. " face |bytes packets errs drop fifo frame "
  3535. "compressed multicast|bytes packets errs "
  3536. "drop fifo colls carrier compressed\n");
  3537. else
  3538. dev_seq_printf_stats(seq, v);
  3539. return 0;
  3540. }
  3541. static struct softnet_data *softnet_get_online(loff_t *pos)
  3542. {
  3543. struct softnet_data *sd = NULL;
  3544. while (*pos < nr_cpu_ids)
  3545. if (cpu_online(*pos)) {
  3546. sd = &per_cpu(softnet_data, *pos);
  3547. break;
  3548. } else
  3549. ++*pos;
  3550. return sd;
  3551. }
  3552. static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
  3553. {
  3554. return softnet_get_online(pos);
  3555. }
  3556. static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  3557. {
  3558. ++*pos;
  3559. return softnet_get_online(pos);
  3560. }
  3561. static void softnet_seq_stop(struct seq_file *seq, void *v)
  3562. {
  3563. }
  3564. static int softnet_seq_show(struct seq_file *seq, void *v)
  3565. {
  3566. struct softnet_data *sd = v;
  3567. seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
  3568. sd->processed, sd->dropped, sd->time_squeeze, 0,
  3569. 0, 0, 0, 0, /* was fastroute */
  3570. sd->cpu_collision, sd->received_rps);
  3571. return 0;
  3572. }
  3573. static const struct seq_operations dev_seq_ops = {
  3574. .start = dev_seq_start,
  3575. .next = dev_seq_next,
  3576. .stop = dev_seq_stop,
  3577. .show = dev_seq_show,
  3578. };
  3579. static int dev_seq_open(struct inode *inode, struct file *file)
  3580. {
  3581. return seq_open_net(inode, file, &dev_seq_ops,
  3582. sizeof(struct dev_iter_state));
  3583. }
  3584. int dev_seq_open_ops(struct inode *inode, struct file *file,
  3585. const struct seq_operations *ops)
  3586. {
  3587. return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
  3588. }
  3589. static const struct file_operations dev_seq_fops = {
  3590. .owner = THIS_MODULE,
  3591. .open = dev_seq_open,
  3592. .read = seq_read,
  3593. .llseek = seq_lseek,
  3594. .release = seq_release_net,
  3595. };
  3596. static const struct seq_operations softnet_seq_ops = {
  3597. .start = softnet_seq_start,
  3598. .next = softnet_seq_next,
  3599. .stop = softnet_seq_stop,
  3600. .show = softnet_seq_show,
  3601. };
  3602. static int softnet_seq_open(struct inode *inode, struct file *file)
  3603. {
  3604. return seq_open(file, &softnet_seq_ops);
  3605. }
  3606. static const struct file_operations softnet_seq_fops = {
  3607. .owner = THIS_MODULE,
  3608. .open = softnet_seq_open,
  3609. .read = seq_read,
  3610. .llseek = seq_lseek,
  3611. .release = seq_release,
  3612. };
  3613. static void *ptype_get_idx(loff_t pos)
  3614. {
  3615. struct packet_type *pt = NULL;
  3616. loff_t i = 0;
  3617. int t;
  3618. list_for_each_entry_rcu(pt, &ptype_all, list) {
  3619. if (i == pos)
  3620. return pt;
  3621. ++i;
  3622. }
  3623. for (t = 0; t < PTYPE_HASH_SIZE; t++) {
  3624. list_for_each_entry_rcu(pt, &ptype_base[t], list) {
  3625. if (i == pos)
  3626. return pt;
  3627. ++i;
  3628. }
  3629. }
  3630. return NULL;
  3631. }
  3632. static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
  3633. __acquires(RCU)
  3634. {
  3635. rcu_read_lock();
  3636. return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
  3637. }
  3638. static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  3639. {
  3640. struct packet_type *pt;
  3641. struct list_head *nxt;
  3642. int hash;
  3643. ++*pos;
  3644. if (v == SEQ_START_TOKEN)
  3645. return ptype_get_idx(0);
  3646. pt = v;
  3647. nxt = pt->list.next;
  3648. if (pt->type == htons(ETH_P_ALL)) {
  3649. if (nxt != &ptype_all)
  3650. goto found;
  3651. hash = 0;
  3652. nxt = ptype_base[0].next;
  3653. } else
  3654. hash = ntohs(pt->type) & PTYPE_HASH_MASK;
  3655. while (nxt == &ptype_base[hash]) {
  3656. if (++hash >= PTYPE_HASH_SIZE)
  3657. return NULL;
  3658. nxt = ptype_base[hash].next;
  3659. }
  3660. found:
  3661. return list_entry(nxt, struct packet_type, list);
  3662. }
  3663. static void ptype_seq_stop(struct seq_file *seq, void *v)
  3664. __releases(RCU)
  3665. {
  3666. rcu_read_unlock();
  3667. }
  3668. static int ptype_seq_show(struct seq_file *seq, void *v)
  3669. {
  3670. struct packet_type *pt = v;
  3671. if (v == SEQ_START_TOKEN)
  3672. seq_puts(seq, "Type Device Function\n");
  3673. else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
  3674. if (pt->type == htons(ETH_P_ALL))
  3675. seq_puts(seq, "ALL ");
  3676. else
  3677. seq_printf(seq, "%04x", ntohs(pt->type));
  3678. seq_printf(seq, " %-8s %pF\n",
  3679. pt->dev ? pt->dev->name : "", pt->func);
  3680. }
  3681. return 0;
  3682. }
  3683. static const struct seq_operations ptype_seq_ops = {
  3684. .start = ptype_seq_start,
  3685. .next = ptype_seq_next,
  3686. .stop = ptype_seq_stop,
  3687. .show = ptype_seq_show,
  3688. };
  3689. static int ptype_seq_open(struct inode *inode, struct file *file)
  3690. {
  3691. return seq_open_net(inode, file, &ptype_seq_ops,
  3692. sizeof(struct seq_net_private));
  3693. }
  3694. static const struct file_operations ptype_seq_fops = {
  3695. .owner = THIS_MODULE,
  3696. .open = ptype_seq_open,
  3697. .read = seq_read,
  3698. .llseek = seq_lseek,
  3699. .release = seq_release_net,
  3700. };
  3701. static int __net_init dev_proc_net_init(struct net *net)
  3702. {
  3703. int rc = -ENOMEM;
  3704. if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
  3705. goto out;
  3706. if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
  3707. goto out_dev;
  3708. if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
  3709. goto out_softnet;
  3710. if (wext_proc_init(net))
  3711. goto out_ptype;
  3712. rc = 0;
  3713. out:
  3714. return rc;
  3715. out_ptype:
  3716. proc_net_remove(net, "ptype");
  3717. out_softnet:
  3718. proc_net_remove(net, "softnet_stat");
  3719. out_dev:
  3720. proc_net_remove(net, "dev");
  3721. goto out;
  3722. }
  3723. static void __net_exit dev_proc_net_exit(struct net *net)
  3724. {
  3725. wext_proc_exit(net);
  3726. proc_net_remove(net, "ptype");
  3727. proc_net_remove(net, "softnet_stat");
  3728. proc_net_remove(net, "dev");
  3729. }
  3730. static struct pernet_operations __net_initdata dev_proc_ops = {
  3731. .init = dev_proc_net_init,
  3732. .exit = dev_proc_net_exit,
  3733. };
  3734. static int __init dev_proc_init(void)
  3735. {
  3736. return register_pernet_subsys(&dev_proc_ops);
  3737. }
  3738. #else
  3739. #define dev_proc_init() 0
  3740. #endif /* CONFIG_PROC_FS */
  3741. /**
  3742. * netdev_set_master - set up master pointer
  3743. * @slave: slave device
  3744. * @master: new master device
  3745. *
  3746. * Changes the master device of the slave. Pass %NULL to break the
  3747. * bonding. The caller must hold the RTNL semaphore. On a failure
  3748. * a negative errno code is returned. On success the reference counts
  3749. * are adjusted and the function returns zero.
  3750. */
  3751. int netdev_set_master(struct net_device *slave, struct net_device *master)
  3752. {
  3753. struct net_device *old = slave->master;
  3754. ASSERT_RTNL();
  3755. if (master) {
  3756. if (old)
  3757. return -EBUSY;
  3758. dev_hold(master);
  3759. }
  3760. slave->master = master;
  3761. if (old)
  3762. dev_put(old);
  3763. return 0;
  3764. }
  3765. EXPORT_SYMBOL(netdev_set_master);
  3766. /**
  3767. * netdev_set_bond_master - set up bonding master/slave pair
  3768. * @slave: slave device
  3769. * @master: new master device
  3770. *
  3771. * Changes the master device of the slave. Pass %NULL to break the
  3772. * bonding. The caller must hold the RTNL semaphore. On a failure
  3773. * a negative errno code is returned. On success %RTM_NEWLINK is sent
  3774. * to the routing socket and the function returns zero.
  3775. */
  3776. int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
  3777. {
  3778. int err;
  3779. ASSERT_RTNL();
  3780. err = netdev_set_master(slave, master);
  3781. if (err)
  3782. return err;
  3783. if (master)
  3784. slave->flags |= IFF_SLAVE;
  3785. else
  3786. slave->flags &= ~IFF_SLAVE;
  3787. rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
  3788. return 0;
  3789. }
  3790. EXPORT_SYMBOL(netdev_set_bond_master);
  3791. static void dev_change_rx_flags(struct net_device *dev, int flags)
  3792. {
  3793. const struct net_device_ops *ops = dev->netdev_ops;
  3794. if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
  3795. ops->ndo_change_rx_flags(dev, flags);
  3796. }
  3797. static int __dev_set_promiscuity(struct net_device *dev, int inc)
  3798. {
  3799. unsigned int old_flags = dev->flags;
  3800. uid_t uid;
  3801. gid_t gid;
  3802. ASSERT_RTNL();
  3803. dev->flags |= IFF_PROMISC;
  3804. dev->promiscuity += inc;
  3805. if (dev->promiscuity == 0) {
  3806. /*
  3807. * Avoid overflow.
  3808. * If inc causes overflow, untouch promisc and return error.
  3809. */
  3810. if (inc < 0)
  3811. dev->flags &= ~IFF_PROMISC;
  3812. else {
  3813. dev->promiscuity -= inc;
  3814. pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
  3815. dev->name);
  3816. return -EOVERFLOW;
  3817. }
  3818. }
  3819. if (dev->flags != old_flags) {
  3820. pr_info("device %s %s promiscuous mode\n",
  3821. dev->name,
  3822. dev->flags & IFF_PROMISC ? "entered" : "left");
  3823. if (audit_enabled) {
  3824. current_uid_gid(&uid, &gid);
  3825. audit_log(current->audit_context, GFP_ATOMIC,
  3826. AUDIT_ANOM_PROMISCUOUS,
  3827. "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
  3828. dev->name, (dev->flags & IFF_PROMISC),
  3829. (old_flags & IFF_PROMISC),
  3830. audit_get_loginuid(current),
  3831. uid, gid,
  3832. audit_get_sessionid(current));
  3833. }
  3834. dev_change_rx_flags(dev, IFF_PROMISC);
  3835. }
  3836. return 0;
  3837. }
  3838. /**
  3839. * dev_set_promiscuity - update promiscuity count on a device
  3840. * @dev: device
  3841. * @inc: modifier
  3842. *
  3843. * Add or remove promiscuity from a device. While the count in the device
  3844. * remains above zero the interface remains promiscuous. Once it hits zero
  3845. * the device reverts back to normal filtering operation. A negative inc
  3846. * value is used to drop promiscuity on the device.
  3847. * Return 0 if successful or a negative errno code on error.
  3848. */
  3849. int dev_set_promiscuity(struct net_device *dev, int inc)
  3850. {
  3851. unsigned int old_flags = dev->flags;
  3852. int err;
  3853. err = __dev_set_promiscuity(dev, inc);
  3854. if (err < 0)
  3855. return err;
  3856. if (dev->flags != old_flags)
  3857. dev_set_rx_mode(dev);
  3858. return err;
  3859. }
  3860. EXPORT_SYMBOL(dev_set_promiscuity);
  3861. /**
  3862. * dev_set_allmulti - update allmulti count on a device
  3863. * @dev: device
  3864. * @inc: modifier
  3865. *
  3866. * Add or remove reception of all multicast frames to a device. While the
  3867. * count in the device remains above zero the interface remains listening
  3868. * to all interfaces. Once it hits zero the device reverts back to normal
  3869. * filtering operation. A negative @inc value is used to drop the counter
  3870. * when releasing a resource needing all multicasts.
  3871. * Return 0 if successful or a negative errno code on error.
  3872. */
  3873. int dev_set_allmulti(struct net_device *dev, int inc)
  3874. {
  3875. unsigned int old_flags = dev->flags;
  3876. ASSERT_RTNL();
  3877. dev->flags |= IFF_ALLMULTI;
  3878. dev->allmulti += inc;
  3879. if (dev->allmulti == 0) {
  3880. /*
  3881. * Avoid overflow.
  3882. * If inc causes overflow, untouch allmulti and return error.
  3883. */
  3884. if (inc < 0)
  3885. dev->flags &= ~IFF_ALLMULTI;
  3886. else {
  3887. dev->allmulti -= inc;
  3888. pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
  3889. dev->name);
  3890. return -EOVERFLOW;
  3891. }
  3892. }
  3893. if (dev->flags ^ old_flags) {
  3894. dev_change_rx_flags(dev, IFF_ALLMULTI);
  3895. dev_set_rx_mode(dev);
  3896. }
  3897. return 0;
  3898. }
  3899. EXPORT_SYMBOL(dev_set_allmulti);
  3900. /*
  3901. * Upload unicast and multicast address lists to device and
  3902. * configure RX filtering. When the device doesn't support unicast
  3903. * filtering it is put in promiscuous mode while unicast addresses
  3904. * are present.
  3905. */
  3906. void __dev_set_rx_mode(struct net_device *dev)
  3907. {
  3908. const struct net_device_ops *ops = dev->netdev_ops;
  3909. /* dev_open will call this function so the list will stay sane. */
  3910. if (!(dev->flags&IFF_UP))
  3911. return;
  3912. if (!netif_device_present(dev))
  3913. return;
  3914. if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
  3915. /* Unicast addresses changes may only happen under the rtnl,
  3916. * therefore calling __dev_set_promiscuity here is safe.
  3917. */
  3918. if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
  3919. __dev_set_promiscuity(dev, 1);
  3920. dev->uc_promisc = true;
  3921. } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
  3922. __dev_set_promiscuity(dev, -1);
  3923. dev->uc_promisc = false;
  3924. }
  3925. }
  3926. if (ops->ndo_set_rx_mode)
  3927. ops->ndo_set_rx_mode(dev);
  3928. }
  3929. void dev_set_rx_mode(struct net_device *dev)
  3930. {
  3931. netif_addr_lock_bh(dev);
  3932. __dev_set_rx_mode(dev);
  3933. netif_addr_unlock_bh(dev);
  3934. }
  3935. /**
  3936. * dev_get_flags - get flags reported to userspace
  3937. * @dev: device
  3938. *
  3939. * Get the combination of flag bits exported through APIs to userspace.
  3940. */
  3941. unsigned dev_get_flags(const struct net_device *dev)
  3942. {
  3943. unsigned flags;
  3944. flags = (dev->flags & ~(IFF_PROMISC |
  3945. IFF_ALLMULTI |
  3946. IFF_RUNNING |
  3947. IFF_LOWER_UP |
  3948. IFF_DORMANT)) |
  3949. (dev->gflags & (IFF_PROMISC |
  3950. IFF_ALLMULTI));
  3951. if (netif_running(dev)) {
  3952. if (netif_oper_up(dev))
  3953. flags |= IFF_RUNNING;
  3954. if (netif_carrier_ok(dev))
  3955. flags |= IFF_LOWER_UP;
  3956. if (netif_dormant(dev))
  3957. flags |= IFF_DORMANT;
  3958. }
  3959. return flags;
  3960. }
  3961. EXPORT_SYMBOL(dev_get_flags);
  3962. int __dev_change_flags(struct net_device *dev, unsigned int flags)
  3963. {
  3964. unsigned int old_flags = dev->flags;
  3965. int ret;
  3966. ASSERT_RTNL();
  3967. /*
  3968. * Set the flags on our device.
  3969. */
  3970. dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
  3971. IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
  3972. IFF_AUTOMEDIA)) |
  3973. (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
  3974. IFF_ALLMULTI));
  3975. /*
  3976. * Load in the correct multicast list now the flags have changed.
  3977. */
  3978. if ((old_flags ^ flags) & IFF_MULTICAST)
  3979. dev_change_rx_flags(dev, IFF_MULTICAST);
  3980. dev_set_rx_mode(dev);
  3981. /*
  3982. * Have we downed the interface. We handle IFF_UP ourselves
  3983. * according to user attempts to set it, rather than blindly
  3984. * setting it.
  3985. */
  3986. ret = 0;
  3987. if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
  3988. ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
  3989. if (!ret)
  3990. dev_set_rx_mode(dev);
  3991. }
  3992. if ((flags ^ dev->gflags) & IFF_PROMISC) {
  3993. int inc = (flags & IFF_PROMISC) ? 1 : -1;
  3994. dev->gflags ^= IFF_PROMISC;
  3995. dev_set_promiscuity(dev, inc);
  3996. }
  3997. /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
  3998. is important. Some (broken) drivers set IFF_PROMISC, when
  3999. IFF_ALLMULTI is requested not asking us and not reporting.
  4000. */
  4001. if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
  4002. int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
  4003. dev->gflags ^= IFF_ALLMULTI;
  4004. dev_set_allmulti(dev, inc);
  4005. }
  4006. return ret;
  4007. }
  4008. void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
  4009. {
  4010. unsigned int changes = dev->flags ^ old_flags;
  4011. if (changes & IFF_UP) {
  4012. if (dev->flags & IFF_UP)
  4013. call_netdevice_notifiers(NETDEV_UP, dev);
  4014. else
  4015. call_netdevice_notifiers(NETDEV_DOWN, dev);
  4016. }
  4017. if (dev->flags & IFF_UP &&
  4018. (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
  4019. call_netdevice_notifiers(NETDEV_CHANGE, dev);
  4020. }
  4021. /**
  4022. * dev_change_flags - change device settings
  4023. * @dev: device
  4024. * @flags: device state flags
  4025. *
  4026. * Change settings on device based state flags. The flags are
  4027. * in the userspace exported format.
  4028. */
  4029. int dev_change_flags(struct net_device *dev, unsigned int flags)
  4030. {
  4031. int ret;
  4032. unsigned int changes, old_flags = dev->flags;
  4033. ret = __dev_change_flags(dev, flags);
  4034. if (ret < 0)
  4035. return ret;
  4036. changes = old_flags ^ dev->flags;
  4037. if (changes)
  4038. rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
  4039. __dev_notify_flags(dev, old_flags);
  4040. return ret;
  4041. }
  4042. EXPORT_SYMBOL(dev_change_flags);
  4043. /**
  4044. * dev_set_mtu - Change maximum transfer unit
  4045. * @dev: device
  4046. * @new_mtu: new transfer unit
  4047. *
  4048. * Change the maximum transfer size of the network device.
  4049. */
  4050. int dev_set_mtu(struct net_device *dev, int new_mtu)
  4051. {
  4052. const struct net_device_ops *ops = dev->netdev_ops;
  4053. int err;
  4054. if (new_mtu == dev->mtu)
  4055. return 0;
  4056. /* MTU must be positive. */
  4057. if (new_mtu < 0)
  4058. return -EINVAL;
  4059. if (!netif_device_present(dev))
  4060. return -ENODEV;
  4061. err = 0;
  4062. if (ops->ndo_change_mtu)
  4063. err = ops->ndo_change_mtu(dev, new_mtu);
  4064. else
  4065. dev->mtu = new_mtu;
  4066. if (!err && dev->flags & IFF_UP)
  4067. call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
  4068. return err;
  4069. }
  4070. EXPORT_SYMBOL(dev_set_mtu);
  4071. /**
  4072. * dev_set_group - Change group this device belongs to
  4073. * @dev: device
  4074. * @new_group: group this device should belong to
  4075. */
  4076. void dev_set_group(struct net_device *dev, int new_group)
  4077. {
  4078. dev->group = new_group;
  4079. }
  4080. EXPORT_SYMBOL(dev_set_group);
  4081. /**
  4082. * dev_set_mac_address - Change Media Access Control Address
  4083. * @dev: device
  4084. * @sa: new address
  4085. *
  4086. * Change the hardware (MAC) address of the device
  4087. */
  4088. int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
  4089. {
  4090. const struct net_device_ops *ops = dev->netdev_ops;
  4091. int err;
  4092. if (!ops->ndo_set_mac_address)
  4093. return -EOPNOTSUPP;
  4094. if (sa->sa_family != dev->type)
  4095. return -EINVAL;
  4096. if (!netif_device_present(dev))
  4097. return -ENODEV;
  4098. err = ops->ndo_set_mac_address(dev, sa);
  4099. if (!err)
  4100. call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
  4101. return err;
  4102. }
  4103. EXPORT_SYMBOL(dev_set_mac_address);
  4104. /*
  4105. * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
  4106. */
  4107. static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
  4108. {
  4109. int err;
  4110. struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
  4111. if (!dev)
  4112. return -ENODEV;
  4113. switch (cmd) {
  4114. case SIOCGIFFLAGS: /* Get interface flags */
  4115. ifr->ifr_flags = (short) dev_get_flags(dev);
  4116. return 0;
  4117. case SIOCGIFMETRIC: /* Get the metric on the interface
  4118. (currently unused) */
  4119. ifr->ifr_metric = 0;
  4120. return 0;
  4121. case SIOCGIFMTU: /* Get the MTU of a device */
  4122. ifr->ifr_mtu = dev->mtu;
  4123. return 0;
  4124. case SIOCGIFHWADDR:
  4125. if (!dev->addr_len)
  4126. memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
  4127. else
  4128. memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
  4129. min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
  4130. ifr->ifr_hwaddr.sa_family = dev->type;
  4131. return 0;
  4132. case SIOCGIFSLAVE:
  4133. err = -EINVAL;
  4134. break;
  4135. case SIOCGIFMAP:
  4136. ifr->ifr_map.mem_start = dev->mem_start;
  4137. ifr->ifr_map.mem_end = dev->mem_end;
  4138. ifr->ifr_map.base_addr = dev->base_addr;
  4139. ifr->ifr_map.irq = dev->irq;
  4140. ifr->ifr_map.dma = dev->dma;
  4141. ifr->ifr_map.port = dev->if_port;
  4142. return 0;
  4143. case SIOCGIFINDEX:
  4144. ifr->ifr_ifindex = dev->ifindex;
  4145. return 0;
  4146. case SIOCGIFTXQLEN:
  4147. ifr->ifr_qlen = dev->tx_queue_len;
  4148. return 0;
  4149. default:
  4150. /* dev_ioctl() should ensure this case
  4151. * is never reached
  4152. */
  4153. WARN_ON(1);
  4154. err = -ENOTTY;
  4155. break;
  4156. }
  4157. return err;
  4158. }
  4159. /*
  4160. * Perform the SIOCxIFxxx calls, inside rtnl_lock()
  4161. */
  4162. static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
  4163. {
  4164. int err;
  4165. struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
  4166. const struct net_device_ops *ops;
  4167. if (!dev)
  4168. return -ENODEV;
  4169. ops = dev->netdev_ops;
  4170. switch (cmd) {
  4171. case SIOCSIFFLAGS: /* Set interface flags */
  4172. return dev_change_flags(dev, ifr->ifr_flags);
  4173. case SIOCSIFMETRIC: /* Set the metric on the interface
  4174. (currently unused) */
  4175. return -EOPNOTSUPP;
  4176. case SIOCSIFMTU: /* Set the MTU of a device */
  4177. return dev_set_mtu(dev, ifr->ifr_mtu);
  4178. case SIOCSIFHWADDR:
  4179. return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
  4180. case SIOCSIFHWBROADCAST:
  4181. if (ifr->ifr_hwaddr.sa_family != dev->type)
  4182. return -EINVAL;
  4183. memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
  4184. min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
  4185. call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
  4186. return 0;
  4187. case SIOCSIFMAP:
  4188. if (ops->ndo_set_config) {
  4189. if (!netif_device_present(dev))
  4190. return -ENODEV;
  4191. return ops->ndo_set_config(dev, &ifr->ifr_map);
  4192. }
  4193. return -EOPNOTSUPP;
  4194. case SIOCADDMULTI:
  4195. if (!ops->ndo_set_rx_mode ||
  4196. ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
  4197. return -EINVAL;
  4198. if (!netif_device_present(dev))
  4199. return -ENODEV;
  4200. return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
  4201. case SIOCDELMULTI:
  4202. if (!ops->ndo_set_rx_mode ||
  4203. ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
  4204. return -EINVAL;
  4205. if (!netif_device_present(dev))
  4206. return -ENODEV;
  4207. return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
  4208. case SIOCSIFTXQLEN:
  4209. if (ifr->ifr_qlen < 0)
  4210. return -EINVAL;
  4211. dev->tx_queue_len = ifr->ifr_qlen;
  4212. return 0;
  4213. case SIOCSIFNAME:
  4214. ifr->ifr_newname[IFNAMSIZ-1] = '\0';
  4215. return dev_change_name(dev, ifr->ifr_newname);
  4216. case SIOCSHWTSTAMP:
  4217. err = net_hwtstamp_validate(ifr);
  4218. if (err)
  4219. return err;
  4220. /* fall through */
  4221. /*
  4222. * Unknown or private ioctl
  4223. */
  4224. default:
  4225. if ((cmd >= SIOCDEVPRIVATE &&
  4226. cmd <= SIOCDEVPRIVATE + 15) ||
  4227. cmd == SIOCBONDENSLAVE ||
  4228. cmd == SIOCBONDRELEASE ||
  4229. cmd == SIOCBONDSETHWADDR ||
  4230. cmd == SIOCBONDSLAVEINFOQUERY ||
  4231. cmd == SIOCBONDINFOQUERY ||
  4232. cmd == SIOCBONDCHANGEACTIVE ||
  4233. cmd == SIOCGMIIPHY ||
  4234. cmd == SIOCGMIIREG ||
  4235. cmd == SIOCSMIIREG ||
  4236. cmd == SIOCBRADDIF ||
  4237. cmd == SIOCBRDELIF ||
  4238. cmd == SIOCSHWTSTAMP ||
  4239. cmd == SIOCWANDEV) {
  4240. err = -EOPNOTSUPP;
  4241. if (ops->ndo_do_ioctl) {
  4242. if (netif_device_present(dev))
  4243. err = ops->ndo_do_ioctl(dev, ifr, cmd);
  4244. else
  4245. err = -ENODEV;
  4246. }
  4247. } else
  4248. err = -EINVAL;
  4249. }
  4250. return err;
  4251. }
  4252. /*
  4253. * This function handles all "interface"-type I/O control requests. The actual
  4254. * 'doing' part of this is dev_ifsioc above.
  4255. */
  4256. /**
  4257. * dev_ioctl - network device ioctl
  4258. * @net: the applicable net namespace
  4259. * @cmd: command to issue
  4260. * @arg: pointer to a struct ifreq in user space
  4261. *
  4262. * Issue ioctl functions to devices. This is normally called by the
  4263. * user space syscall interfaces but can sometimes be useful for
  4264. * other purposes. The return value is the return from the syscall if
  4265. * positive or a negative errno code on error.
  4266. */
  4267. int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
  4268. {
  4269. struct ifreq ifr;
  4270. int ret;
  4271. char *colon;
  4272. /* One special case: SIOCGIFCONF takes ifconf argument
  4273. and requires shared lock, because it sleeps writing
  4274. to user space.
  4275. */
  4276. if (cmd == SIOCGIFCONF) {
  4277. rtnl_lock();
  4278. ret = dev_ifconf(net, (char __user *) arg);
  4279. rtnl_unlock();
  4280. return ret;
  4281. }
  4282. if (cmd == SIOCGIFNAME)
  4283. return dev_ifname(net, (struct ifreq __user *)arg);
  4284. if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
  4285. return -EFAULT;
  4286. ifr.ifr_name[IFNAMSIZ-1] = 0;
  4287. colon = strchr(ifr.ifr_name, ':');
  4288. if (colon)
  4289. *colon = 0;
  4290. /*
  4291. * See which interface the caller is talking about.
  4292. */
  4293. switch (cmd) {
  4294. /*
  4295. * These ioctl calls:
  4296. * - can be done by all.
  4297. * - atomic and do not require locking.
  4298. * - return a value
  4299. */
  4300. case SIOCGIFFLAGS:
  4301. case SIOCGIFMETRIC:
  4302. case SIOCGIFMTU:
  4303. case SIOCGIFHWADDR:
  4304. case SIOCGIFSLAVE:
  4305. case SIOCGIFMAP:
  4306. case SIOCGIFINDEX:
  4307. case SIOCGIFTXQLEN:
  4308. dev_load(net, ifr.ifr_name);
  4309. rcu_read_lock();
  4310. ret = dev_ifsioc_locked(net, &ifr, cmd);
  4311. rcu_read_unlock();
  4312. if (!ret) {
  4313. if (colon)
  4314. *colon = ':';
  4315. if (copy_to_user(arg, &ifr,
  4316. sizeof(struct ifreq)))
  4317. ret = -EFAULT;
  4318. }
  4319. return ret;
  4320. case SIOCETHTOOL:
  4321. dev_load(net, ifr.ifr_name);
  4322. rtnl_lock();
  4323. ret = dev_ethtool(net, &ifr);
  4324. rtnl_unlock();
  4325. if (!ret) {
  4326. if (colon)
  4327. *colon = ':';
  4328. if (copy_to_user(arg, &ifr,
  4329. sizeof(struct ifreq)))
  4330. ret = -EFAULT;
  4331. }
  4332. return ret;
  4333. /*
  4334. * These ioctl calls:
  4335. * - require superuser power.
  4336. * - require strict serialization.
  4337. * - return a value
  4338. */
  4339. case SIOCGMIIPHY:
  4340. case SIOCGMIIREG:
  4341. case SIOCSIFNAME:
  4342. if (!capable(CAP_NET_ADMIN))
  4343. return -EPERM;
  4344. dev_load(net, ifr.ifr_name);
  4345. rtnl_lock();
  4346. ret = dev_ifsioc(net, &ifr, cmd);
  4347. rtnl_unlock();
  4348. if (!ret) {
  4349. if (colon)
  4350. *colon = ':';
  4351. if (copy_to_user(arg, &ifr,
  4352. sizeof(struct ifreq)))
  4353. ret = -EFAULT;
  4354. }
  4355. return ret;
  4356. /*
  4357. * These ioctl calls:
  4358. * - require superuser power.
  4359. * - require strict serialization.
  4360. * - do not return a value
  4361. */
  4362. case SIOCSIFFLAGS:
  4363. case SIOCSIFMETRIC:
  4364. case SIOCSIFMTU:
  4365. case SIOCSIFMAP:
  4366. case SIOCSIFHWADDR:
  4367. case SIOCSIFSLAVE:
  4368. case SIOCADDMULTI:
  4369. case SIOCDELMULTI:
  4370. case SIOCSIFHWBROADCAST:
  4371. case SIOCSIFTXQLEN:
  4372. case SIOCSMIIREG:
  4373. case SIOCBONDENSLAVE:
  4374. case SIOCBONDRELEASE:
  4375. case SIOCBONDSETHWADDR:
  4376. case SIOCBONDCHANGEACTIVE:
  4377. case SIOCBRADDIF:
  4378. case SIOCBRDELIF:
  4379. case SIOCSHWTSTAMP:
  4380. if (!capable(CAP_NET_ADMIN))
  4381. return -EPERM;
  4382. /* fall through */
  4383. case SIOCBONDSLAVEINFOQUERY:
  4384. case SIOCBONDINFOQUERY:
  4385. dev_load(net, ifr.ifr_name);
  4386. rtnl_lock();
  4387. ret = dev_ifsioc(net, &ifr, cmd);
  4388. rtnl_unlock();
  4389. return ret;
  4390. case SIOCGIFMEM:
  4391. /* Get the per device memory space. We can add this but
  4392. * currently do not support it */
  4393. case SIOCSIFMEM:
  4394. /* Set the per device memory buffer space.
  4395. * Not applicable in our case */
  4396. case SIOCSIFLINK:
  4397. return -ENOTTY;
  4398. /*
  4399. * Unknown or private ioctl.
  4400. */
  4401. default:
  4402. if (cmd == SIOCWANDEV ||
  4403. (cmd >= SIOCDEVPRIVATE &&
  4404. cmd <= SIOCDEVPRIVATE + 15)) {
  4405. dev_load(net, ifr.ifr_name);
  4406. rtnl_lock();
  4407. ret = dev_ifsioc(net, &ifr, cmd);
  4408. rtnl_unlock();
  4409. if (!ret && copy_to_user(arg, &ifr,
  4410. sizeof(struct ifreq)))
  4411. ret = -EFAULT;
  4412. return ret;
  4413. }
  4414. /* Take care of Wireless Extensions */
  4415. if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
  4416. return wext_handle_ioctl(net, &ifr, cmd, arg);
  4417. return -ENOTTY;
  4418. }
  4419. }
  4420. /**
  4421. * dev_new_index - allocate an ifindex
  4422. * @net: the applicable net namespace
  4423. *
  4424. * Returns a suitable unique value for a new device interface
  4425. * number. The caller must hold the rtnl semaphore or the
  4426. * dev_base_lock to be sure it remains unique.
  4427. */
  4428. static int dev_new_index(struct net *net)
  4429. {
  4430. static int ifindex;
  4431. for (;;) {
  4432. if (++ifindex <= 0)
  4433. ifindex = 1;
  4434. if (!__dev_get_by_index(net, ifindex))
  4435. return ifindex;
  4436. }
  4437. }
  4438. /* Delayed registration/unregisteration */
  4439. static LIST_HEAD(net_todo_list);
  4440. static void net_set_todo(struct net_device *dev)
  4441. {
  4442. list_add_tail(&dev->todo_list, &net_todo_list);
  4443. }
  4444. static void rollback_registered_many(struct list_head *head)
  4445. {
  4446. struct net_device *dev, *tmp;
  4447. BUG_ON(dev_boot_phase);
  4448. ASSERT_RTNL();
  4449. list_for_each_entry_safe(dev, tmp, head, unreg_list) {
  4450. /* Some devices call without registering
  4451. * for initialization unwind. Remove those
  4452. * devices and proceed with the remaining.
  4453. */
  4454. if (dev->reg_state == NETREG_UNINITIALIZED) {
  4455. pr_debug("unregister_netdevice: device %s/%p never was registered\n",
  4456. dev->name, dev);
  4457. WARN_ON(1);
  4458. list_del(&dev->unreg_list);
  4459. continue;
  4460. }
  4461. dev->dismantle = true;
  4462. BUG_ON(dev->reg_state != NETREG_REGISTERED);
  4463. }
  4464. /* If device is running, close it first. */
  4465. dev_close_many(head);
  4466. list_for_each_entry(dev, head, unreg_list) {
  4467. /* And unlink it from device chain. */
  4468. unlist_netdevice(dev);
  4469. dev->reg_state = NETREG_UNREGISTERING;
  4470. }
  4471. synchronize_net();
  4472. list_for_each_entry(dev, head, unreg_list) {
  4473. /* Shutdown queueing discipline. */
  4474. dev_shutdown(dev);
  4475. /* Notify protocols, that we are about to destroy
  4476. this device. They should clean all the things.
  4477. */
  4478. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  4479. if (!dev->rtnl_link_ops ||
  4480. dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
  4481. rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
  4482. /*
  4483. * Flush the unicast and multicast chains
  4484. */
  4485. dev_uc_flush(dev);
  4486. dev_mc_flush(dev);
  4487. if (dev->netdev_ops->ndo_uninit)
  4488. dev->netdev_ops->ndo_uninit(dev);
  4489. /* Notifier chain MUST detach us from master device. */
  4490. WARN_ON(dev->master);
  4491. /* Remove entries from kobject tree */
  4492. netdev_unregister_kobject(dev);
  4493. }
  4494. /* Process any work delayed until the end of the batch */
  4495. dev = list_first_entry(head, struct net_device, unreg_list);
  4496. call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
  4497. synchronize_net();
  4498. list_for_each_entry(dev, head, unreg_list)
  4499. dev_put(dev);
  4500. }
  4501. static void rollback_registered(struct net_device *dev)
  4502. {
  4503. LIST_HEAD(single);
  4504. list_add(&dev->unreg_list, &single);
  4505. rollback_registered_many(&single);
  4506. list_del(&single);
  4507. }
  4508. static netdev_features_t netdev_fix_features(struct net_device *dev,
  4509. netdev_features_t features)
  4510. {
  4511. /* Fix illegal checksum combinations */
  4512. if ((features & NETIF_F_HW_CSUM) &&
  4513. (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
  4514. netdev_warn(dev, "mixed HW and IP checksum settings.\n");
  4515. features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
  4516. }
  4517. /* Fix illegal SG+CSUM combinations. */
  4518. if ((features & NETIF_F_SG) &&
  4519. !(features & NETIF_F_ALL_CSUM)) {
  4520. netdev_dbg(dev,
  4521. "Dropping NETIF_F_SG since no checksum feature.\n");
  4522. features &= ~NETIF_F_SG;
  4523. }
  4524. /* TSO requires that SG is present as well. */
  4525. if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
  4526. netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
  4527. features &= ~NETIF_F_ALL_TSO;
  4528. }
  4529. /* TSO ECN requires that TSO is present as well. */
  4530. if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
  4531. features &= ~NETIF_F_TSO_ECN;
  4532. /* Software GSO depends on SG. */
  4533. if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
  4534. netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
  4535. features &= ~NETIF_F_GSO;
  4536. }
  4537. /* UFO needs SG and checksumming */
  4538. if (features & NETIF_F_UFO) {
  4539. /* maybe split UFO into V4 and V6? */
  4540. if (!((features & NETIF_F_GEN_CSUM) ||
  4541. (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
  4542. == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
  4543. netdev_dbg(dev,
  4544. "Dropping NETIF_F_UFO since no checksum offload features.\n");
  4545. features &= ~NETIF_F_UFO;
  4546. }
  4547. if (!(features & NETIF_F_SG)) {
  4548. netdev_dbg(dev,
  4549. "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
  4550. features &= ~NETIF_F_UFO;
  4551. }
  4552. }
  4553. return features;
  4554. }
  4555. int __netdev_update_features(struct net_device *dev)
  4556. {
  4557. netdev_features_t features;
  4558. int err = 0;
  4559. ASSERT_RTNL();
  4560. features = netdev_get_wanted_features(dev);
  4561. if (dev->netdev_ops->ndo_fix_features)
  4562. features = dev->netdev_ops->ndo_fix_features(dev, features);
  4563. /* driver might be less strict about feature dependencies */
  4564. features = netdev_fix_features(dev, features);
  4565. if (dev->features == features)
  4566. return 0;
  4567. netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
  4568. &dev->features, &features);
  4569. if (dev->netdev_ops->ndo_set_features)
  4570. err = dev->netdev_ops->ndo_set_features(dev, features);
  4571. if (unlikely(err < 0)) {
  4572. netdev_err(dev,
  4573. "set_features() failed (%d); wanted %pNF, left %pNF\n",
  4574. err, &features, &dev->features);
  4575. return -1;
  4576. }
  4577. if (!err)
  4578. dev->features = features;
  4579. return 1;
  4580. }
  4581. /**
  4582. * netdev_update_features - recalculate device features
  4583. * @dev: the device to check
  4584. *
  4585. * Recalculate dev->features set and send notifications if it
  4586. * has changed. Should be called after driver or hardware dependent
  4587. * conditions might have changed that influence the features.
  4588. */
  4589. void netdev_update_features(struct net_device *dev)
  4590. {
  4591. if (__netdev_update_features(dev))
  4592. netdev_features_change(dev);
  4593. }
  4594. EXPORT_SYMBOL(netdev_update_features);
  4595. /**
  4596. * netdev_change_features - recalculate device features
  4597. * @dev: the device to check
  4598. *
  4599. * Recalculate dev->features set and send notifications even
  4600. * if they have not changed. Should be called instead of
  4601. * netdev_update_features() if also dev->vlan_features might
  4602. * have changed to allow the changes to be propagated to stacked
  4603. * VLAN devices.
  4604. */
  4605. void netdev_change_features(struct net_device *dev)
  4606. {
  4607. __netdev_update_features(dev);
  4608. netdev_features_change(dev);
  4609. }
  4610. EXPORT_SYMBOL(netdev_change_features);
  4611. /**
  4612. * netif_stacked_transfer_operstate - transfer operstate
  4613. * @rootdev: the root or lower level device to transfer state from
  4614. * @dev: the device to transfer operstate to
  4615. *
  4616. * Transfer operational state from root to device. This is normally
  4617. * called when a stacking relationship exists between the root
  4618. * device and the device(a leaf device).
  4619. */
  4620. void netif_stacked_transfer_operstate(const struct net_device *rootdev,
  4621. struct net_device *dev)
  4622. {
  4623. if (rootdev->operstate == IF_OPER_DORMANT)
  4624. netif_dormant_on(dev);
  4625. else
  4626. netif_dormant_off(dev);
  4627. if (netif_carrier_ok(rootdev)) {
  4628. if (!netif_carrier_ok(dev))
  4629. netif_carrier_on(dev);
  4630. } else {
  4631. if (netif_carrier_ok(dev))
  4632. netif_carrier_off(dev);
  4633. }
  4634. }
  4635. EXPORT_SYMBOL(netif_stacked_transfer_operstate);
  4636. #ifdef CONFIG_RPS
  4637. static int netif_alloc_rx_queues(struct net_device *dev)
  4638. {
  4639. unsigned int i, count = dev->num_rx_queues;
  4640. struct netdev_rx_queue *rx;
  4641. BUG_ON(count < 1);
  4642. rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
  4643. if (!rx) {
  4644. pr_err("netdev: Unable to allocate %u rx queues\n", count);
  4645. return -ENOMEM;
  4646. }
  4647. dev->_rx = rx;
  4648. for (i = 0; i < count; i++)
  4649. rx[i].dev = dev;
  4650. return 0;
  4651. }
  4652. #endif
  4653. static void netdev_init_one_queue(struct net_device *dev,
  4654. struct netdev_queue *queue, void *_unused)
  4655. {
  4656. /* Initialize queue lock */
  4657. spin_lock_init(&queue->_xmit_lock);
  4658. netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
  4659. queue->xmit_lock_owner = -1;
  4660. netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
  4661. queue->dev = dev;
  4662. #ifdef CONFIG_BQL
  4663. dql_init(&queue->dql, HZ);
  4664. #endif
  4665. }
  4666. static int netif_alloc_netdev_queues(struct net_device *dev)
  4667. {
  4668. unsigned int count = dev->num_tx_queues;
  4669. struct netdev_queue *tx;
  4670. BUG_ON(count < 1);
  4671. tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
  4672. if (!tx) {
  4673. pr_err("netdev: Unable to allocate %u tx queues\n", count);
  4674. return -ENOMEM;
  4675. }
  4676. dev->_tx = tx;
  4677. netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
  4678. spin_lock_init(&dev->tx_global_lock);
  4679. return 0;
  4680. }
  4681. /**
  4682. * register_netdevice - register a network device
  4683. * @dev: device to register
  4684. *
  4685. * Take a completed network device structure and add it to the kernel
  4686. * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
  4687. * chain. 0 is returned on success. A negative errno code is returned
  4688. * on a failure to set up the device, or if the name is a duplicate.
  4689. *
  4690. * Callers must hold the rtnl semaphore. You may want
  4691. * register_netdev() instead of this.
  4692. *
  4693. * BUGS:
  4694. * The locking appears insufficient to guarantee two parallel registers
  4695. * will not get the same name.
  4696. */
  4697. int register_netdevice(struct net_device *dev)
  4698. {
  4699. int ret;
  4700. struct net *net = dev_net(dev);
  4701. BUG_ON(dev_boot_phase);
  4702. ASSERT_RTNL();
  4703. might_sleep();
  4704. /* When net_device's are persistent, this will be fatal. */
  4705. BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
  4706. BUG_ON(!net);
  4707. spin_lock_init(&dev->addr_list_lock);
  4708. netdev_set_addr_lockdep_class(dev);
  4709. dev->iflink = -1;
  4710. ret = dev_get_valid_name(dev, dev->name);
  4711. if (ret < 0)
  4712. goto out;
  4713. /* Init, if this function is available */
  4714. if (dev->netdev_ops->ndo_init) {
  4715. ret = dev->netdev_ops->ndo_init(dev);
  4716. if (ret) {
  4717. if (ret > 0)
  4718. ret = -EIO;
  4719. goto out;
  4720. }
  4721. }
  4722. dev->ifindex = dev_new_index(net);
  4723. if (dev->iflink == -1)
  4724. dev->iflink = dev->ifindex;
  4725. /* Transfer changeable features to wanted_features and enable
  4726. * software offloads (GSO and GRO).
  4727. */
  4728. dev->hw_features |= NETIF_F_SOFT_FEATURES;
  4729. dev->features |= NETIF_F_SOFT_FEATURES;
  4730. dev->wanted_features = dev->features & dev->hw_features;
  4731. /* Turn on no cache copy if HW is doing checksum */
  4732. if (!(dev->flags & IFF_LOOPBACK)) {
  4733. dev->hw_features |= NETIF_F_NOCACHE_COPY;
  4734. if (dev->features & NETIF_F_ALL_CSUM) {
  4735. dev->wanted_features |= NETIF_F_NOCACHE_COPY;
  4736. dev->features |= NETIF_F_NOCACHE_COPY;
  4737. }
  4738. }
  4739. /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
  4740. */
  4741. dev->vlan_features |= NETIF_F_HIGHDMA;
  4742. ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
  4743. ret = notifier_to_errno(ret);
  4744. if (ret)
  4745. goto err_uninit;
  4746. ret = netdev_register_kobject(dev);
  4747. if (ret)
  4748. goto err_uninit;
  4749. dev->reg_state = NETREG_REGISTERED;
  4750. __netdev_update_features(dev);
  4751. /*
  4752. * Default initial state at registry is that the
  4753. * device is present.
  4754. */
  4755. set_bit(__LINK_STATE_PRESENT, &dev->state);
  4756. dev_init_scheduler(dev);
  4757. dev_hold(dev);
  4758. list_netdevice(dev);
  4759. /* Notify protocols, that a new device appeared. */
  4760. ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
  4761. ret = notifier_to_errno(ret);
  4762. if (ret) {
  4763. rollback_registered(dev);
  4764. dev->reg_state = NETREG_UNREGISTERED;
  4765. }
  4766. /*
  4767. * Prevent userspace races by waiting until the network
  4768. * device is fully setup before sending notifications.
  4769. */
  4770. if (!dev->rtnl_link_ops ||
  4771. dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
  4772. rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
  4773. out:
  4774. return ret;
  4775. err_uninit:
  4776. if (dev->netdev_ops->ndo_uninit)
  4777. dev->netdev_ops->ndo_uninit(dev);
  4778. goto out;
  4779. }
  4780. EXPORT_SYMBOL(register_netdevice);
  4781. /**
  4782. * init_dummy_netdev - init a dummy network device for NAPI
  4783. * @dev: device to init
  4784. *
  4785. * This takes a network device structure and initialize the minimum
  4786. * amount of fields so it can be used to schedule NAPI polls without
  4787. * registering a full blown interface. This is to be used by drivers
  4788. * that need to tie several hardware interfaces to a single NAPI
  4789. * poll scheduler due to HW limitations.
  4790. */
  4791. int init_dummy_netdev(struct net_device *dev)
  4792. {
  4793. /* Clear everything. Note we don't initialize spinlocks
  4794. * are they aren't supposed to be taken by any of the
  4795. * NAPI code and this dummy netdev is supposed to be
  4796. * only ever used for NAPI polls
  4797. */
  4798. memset(dev, 0, sizeof(struct net_device));
  4799. /* make sure we BUG if trying to hit standard
  4800. * register/unregister code path
  4801. */
  4802. dev->reg_state = NETREG_DUMMY;
  4803. /* NAPI wants this */
  4804. INIT_LIST_HEAD(&dev->napi_list);
  4805. /* a dummy interface is started by default */
  4806. set_bit(__LINK_STATE_PRESENT, &dev->state);
  4807. set_bit(__LINK_STATE_START, &dev->state);
  4808. /* Note : We dont allocate pcpu_refcnt for dummy devices,
  4809. * because users of this 'device' dont need to change
  4810. * its refcount.
  4811. */
  4812. return 0;
  4813. }
  4814. EXPORT_SYMBOL_GPL(init_dummy_netdev);
  4815. /**
  4816. * register_netdev - register a network device
  4817. * @dev: device to register
  4818. *
  4819. * Take a completed network device structure and add it to the kernel
  4820. * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
  4821. * chain. 0 is returned on success. A negative errno code is returned
  4822. * on a failure to set up the device, or if the name is a duplicate.
  4823. *
  4824. * This is a wrapper around register_netdevice that takes the rtnl semaphore
  4825. * and expands the device name if you passed a format string to
  4826. * alloc_netdev.
  4827. */
  4828. int register_netdev(struct net_device *dev)
  4829. {
  4830. int err;
  4831. rtnl_lock();
  4832. err = register_netdevice(dev);
  4833. rtnl_unlock();
  4834. return err;
  4835. }
  4836. EXPORT_SYMBOL(register_netdev);
  4837. int netdev_refcnt_read(const struct net_device *dev)
  4838. {
  4839. int i, refcnt = 0;
  4840. for_each_possible_cpu(i)
  4841. refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
  4842. return refcnt;
  4843. }
  4844. EXPORT_SYMBOL(netdev_refcnt_read);
  4845. /*
  4846. * netdev_wait_allrefs - wait until all references are gone.
  4847. *
  4848. * This is called when unregistering network devices.
  4849. *
  4850. * Any protocol or device that holds a reference should register
  4851. * for netdevice notification, and cleanup and put back the
  4852. * reference if they receive an UNREGISTER event.
  4853. * We can get stuck here if buggy protocols don't correctly
  4854. * call dev_put.
  4855. */
  4856. static void netdev_wait_allrefs(struct net_device *dev)
  4857. {
  4858. unsigned long rebroadcast_time, warning_time;
  4859. int refcnt;
  4860. linkwatch_forget_dev(dev);
  4861. rebroadcast_time = warning_time = jiffies;
  4862. refcnt = netdev_refcnt_read(dev);
  4863. while (refcnt != 0) {
  4864. if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
  4865. rtnl_lock();
  4866. /* Rebroadcast unregister notification */
  4867. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  4868. /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
  4869. * should have already handle it the first time */
  4870. if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
  4871. &dev->state)) {
  4872. /* We must not have linkwatch events
  4873. * pending on unregister. If this
  4874. * happens, we simply run the queue
  4875. * unscheduled, resulting in a noop
  4876. * for this device.
  4877. */
  4878. linkwatch_run_queue();
  4879. }
  4880. __rtnl_unlock();
  4881. rebroadcast_time = jiffies;
  4882. }
  4883. msleep(250);
  4884. refcnt = netdev_refcnt_read(dev);
  4885. if (time_after(jiffies, warning_time + 10 * HZ)) {
  4886. pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
  4887. dev->name, refcnt);
  4888. warning_time = jiffies;
  4889. }
  4890. }
  4891. }
  4892. /* The sequence is:
  4893. *
  4894. * rtnl_lock();
  4895. * ...
  4896. * register_netdevice(x1);
  4897. * register_netdevice(x2);
  4898. * ...
  4899. * unregister_netdevice(y1);
  4900. * unregister_netdevice(y2);
  4901. * ...
  4902. * rtnl_unlock();
  4903. * free_netdev(y1);
  4904. * free_netdev(y2);
  4905. *
  4906. * We are invoked by rtnl_unlock().
  4907. * This allows us to deal with problems:
  4908. * 1) We can delete sysfs objects which invoke hotplug
  4909. * without deadlocking with linkwatch via keventd.
  4910. * 2) Since we run with the RTNL semaphore not held, we can sleep
  4911. * safely in order to wait for the netdev refcnt to drop to zero.
  4912. *
  4913. * We must not return until all unregister events added during
  4914. * the interval the lock was held have been completed.
  4915. */
  4916. void netdev_run_todo(void)
  4917. {
  4918. struct list_head list;
  4919. /* Snapshot list, allow later requests */
  4920. list_replace_init(&net_todo_list, &list);
  4921. __rtnl_unlock();
  4922. /* Wait for rcu callbacks to finish before attempting to drain
  4923. * the device list. This usually avoids a 250ms wait.
  4924. */
  4925. if (!list_empty(&list))
  4926. rcu_barrier();
  4927. while (!list_empty(&list)) {
  4928. struct net_device *dev
  4929. = list_first_entry(&list, struct net_device, todo_list);
  4930. list_del(&dev->todo_list);
  4931. if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
  4932. pr_err("network todo '%s' but state %d\n",
  4933. dev->name, dev->reg_state);
  4934. dump_stack();
  4935. continue;
  4936. }
  4937. dev->reg_state = NETREG_UNREGISTERED;
  4938. on_each_cpu(flush_backlog, dev, 1);
  4939. netdev_wait_allrefs(dev);
  4940. /* paranoia */
  4941. BUG_ON(netdev_refcnt_read(dev));
  4942. WARN_ON(rcu_access_pointer(dev->ip_ptr));
  4943. WARN_ON(rcu_access_pointer(dev->ip6_ptr));
  4944. WARN_ON(dev->dn_ptr);
  4945. if (dev->destructor)
  4946. dev->destructor(dev);
  4947. /* Free network device */
  4948. kobject_put(&dev->dev.kobj);
  4949. }
  4950. }
  4951. /* Convert net_device_stats to rtnl_link_stats64. They have the same
  4952. * fields in the same order, with only the type differing.
  4953. */
  4954. void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
  4955. const struct net_device_stats *netdev_stats)
  4956. {
  4957. #if BITS_PER_LONG == 64
  4958. BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
  4959. memcpy(stats64, netdev_stats, sizeof(*stats64));
  4960. #else
  4961. size_t i, n = sizeof(*stats64) / sizeof(u64);
  4962. const unsigned long *src = (const unsigned long *)netdev_stats;
  4963. u64 *dst = (u64 *)stats64;
  4964. BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
  4965. sizeof(*stats64) / sizeof(u64));
  4966. for (i = 0; i < n; i++)
  4967. dst[i] = src[i];
  4968. #endif
  4969. }
  4970. EXPORT_SYMBOL(netdev_stats_to_stats64);
  4971. /**
  4972. * dev_get_stats - get network device statistics
  4973. * @dev: device to get statistics from
  4974. * @storage: place to store stats
  4975. *
  4976. * Get network statistics from device. Return @storage.
  4977. * The device driver may provide its own method by setting
  4978. * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
  4979. * otherwise the internal statistics structure is used.
  4980. */
  4981. struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
  4982. struct rtnl_link_stats64 *storage)
  4983. {
  4984. const struct net_device_ops *ops = dev->netdev_ops;
  4985. if (ops->ndo_get_stats64) {
  4986. memset(storage, 0, sizeof(*storage));
  4987. ops->ndo_get_stats64(dev, storage);
  4988. } else if (ops->ndo_get_stats) {
  4989. netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
  4990. } else {
  4991. netdev_stats_to_stats64(storage, &dev->stats);
  4992. }
  4993. storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
  4994. return storage;
  4995. }
  4996. EXPORT_SYMBOL(dev_get_stats);
  4997. struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
  4998. {
  4999. struct netdev_queue *queue = dev_ingress_queue(dev);
  5000. #ifdef CONFIG_NET_CLS_ACT
  5001. if (queue)
  5002. return queue;
  5003. queue = kzalloc(sizeof(*queue), GFP_KERNEL);
  5004. if (!queue)
  5005. return NULL;
  5006. netdev_init_one_queue(dev, queue, NULL);
  5007. queue->qdisc = &noop_qdisc;
  5008. queue->qdisc_sleeping = &noop_qdisc;
  5009. rcu_assign_pointer(dev->ingress_queue, queue);
  5010. #endif
  5011. return queue;
  5012. }
  5013. /**
  5014. * alloc_netdev_mqs - allocate network device
  5015. * @sizeof_priv: size of private data to allocate space for
  5016. * @name: device name format string
  5017. * @setup: callback to initialize device
  5018. * @txqs: the number of TX subqueues to allocate
  5019. * @rxqs: the number of RX subqueues to allocate
  5020. *
  5021. * Allocates a struct net_device with private data area for driver use
  5022. * and performs basic initialization. Also allocates subquue structs
  5023. * for each queue on the device.
  5024. */
  5025. struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
  5026. void (*setup)(struct net_device *),
  5027. unsigned int txqs, unsigned int rxqs)
  5028. {
  5029. struct net_device *dev;
  5030. size_t alloc_size;
  5031. struct net_device *p;
  5032. BUG_ON(strlen(name) >= sizeof(dev->name));
  5033. if (txqs < 1) {
  5034. pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
  5035. return NULL;
  5036. }
  5037. #ifdef CONFIG_RPS
  5038. if (rxqs < 1) {
  5039. pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
  5040. return NULL;
  5041. }
  5042. #endif
  5043. alloc_size = sizeof(struct net_device);
  5044. if (sizeof_priv) {
  5045. /* ensure 32-byte alignment of private area */
  5046. alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
  5047. alloc_size += sizeof_priv;
  5048. }
  5049. /* ensure 32-byte alignment of whole construct */
  5050. alloc_size += NETDEV_ALIGN - 1;
  5051. p = kzalloc(alloc_size, GFP_KERNEL);
  5052. if (!p) {
  5053. pr_err("alloc_netdev: Unable to allocate device\n");
  5054. return NULL;
  5055. }
  5056. dev = PTR_ALIGN(p, NETDEV_ALIGN);
  5057. dev->padded = (char *)dev - (char *)p;
  5058. dev->pcpu_refcnt = alloc_percpu(int);
  5059. if (!dev->pcpu_refcnt)
  5060. goto free_p;
  5061. if (dev_addr_init(dev))
  5062. goto free_pcpu;
  5063. dev_mc_init(dev);
  5064. dev_uc_init(dev);
  5065. dev_net_set(dev, &init_net);
  5066. dev->gso_max_size = GSO_MAX_SIZE;
  5067. INIT_LIST_HEAD(&dev->napi_list);
  5068. INIT_LIST_HEAD(&dev->unreg_list);
  5069. INIT_LIST_HEAD(&dev->link_watch_list);
  5070. dev->priv_flags = IFF_XMIT_DST_RELEASE;
  5071. setup(dev);
  5072. dev->num_tx_queues = txqs;
  5073. dev->real_num_tx_queues = txqs;
  5074. if (netif_alloc_netdev_queues(dev))
  5075. goto free_all;
  5076. #ifdef CONFIG_RPS
  5077. dev->num_rx_queues = rxqs;
  5078. dev->real_num_rx_queues = rxqs;
  5079. if (netif_alloc_rx_queues(dev))
  5080. goto free_all;
  5081. #endif
  5082. strcpy(dev->name, name);
  5083. dev->group = INIT_NETDEV_GROUP;
  5084. return dev;
  5085. free_all:
  5086. free_netdev(dev);
  5087. return NULL;
  5088. free_pcpu:
  5089. free_percpu(dev->pcpu_refcnt);
  5090. kfree(dev->_tx);
  5091. #ifdef CONFIG_RPS
  5092. kfree(dev->_rx);
  5093. #endif
  5094. free_p:
  5095. kfree(p);
  5096. return NULL;
  5097. }
  5098. EXPORT_SYMBOL(alloc_netdev_mqs);
  5099. /**
  5100. * free_netdev - free network device
  5101. * @dev: device
  5102. *
  5103. * This function does the last stage of destroying an allocated device
  5104. * interface. The reference to the device object is released.
  5105. * If this is the last reference then it will be freed.
  5106. */
  5107. void free_netdev(struct net_device *dev)
  5108. {
  5109. struct napi_struct *p, *n;
  5110. release_net(dev_net(dev));
  5111. kfree(dev->_tx);
  5112. #ifdef CONFIG_RPS
  5113. kfree(dev->_rx);
  5114. #endif
  5115. kfree(rcu_dereference_protected(dev->ingress_queue, 1));
  5116. /* Flush device addresses */
  5117. dev_addr_flush(dev);
  5118. list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
  5119. netif_napi_del(p);
  5120. free_percpu(dev->pcpu_refcnt);
  5121. dev->pcpu_refcnt = NULL;
  5122. /* Compatibility with error handling in drivers */
  5123. if (dev->reg_state == NETREG_UNINITIALIZED) {
  5124. kfree((char *)dev - dev->padded);
  5125. return;
  5126. }
  5127. BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
  5128. dev->reg_state = NETREG_RELEASED;
  5129. /* will free via device release */
  5130. put_device(&dev->dev);
  5131. }
  5132. EXPORT_SYMBOL(free_netdev);
  5133. /**
  5134. * synchronize_net - Synchronize with packet receive processing
  5135. *
  5136. * Wait for packets currently being received to be done.
  5137. * Does not block later packets from starting.
  5138. */
  5139. void synchronize_net(void)
  5140. {
  5141. might_sleep();
  5142. if (rtnl_is_locked())
  5143. synchronize_rcu_expedited();
  5144. else
  5145. synchronize_rcu();
  5146. }
  5147. EXPORT_SYMBOL(synchronize_net);
  5148. /**
  5149. * unregister_netdevice_queue - remove device from the kernel
  5150. * @dev: device
  5151. * @head: list
  5152. *
  5153. * This function shuts down a device interface and removes it
  5154. * from the kernel tables.
  5155. * If head not NULL, device is queued to be unregistered later.
  5156. *
  5157. * Callers must hold the rtnl semaphore. You may want
  5158. * unregister_netdev() instead of this.
  5159. */
  5160. void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
  5161. {
  5162. ASSERT_RTNL();
  5163. if (head) {
  5164. list_move_tail(&dev->unreg_list, head);
  5165. } else {
  5166. rollback_registered(dev);
  5167. /* Finish processing unregister after unlock */
  5168. net_set_todo(dev);
  5169. }
  5170. }
  5171. EXPORT_SYMBOL(unregister_netdevice_queue);
  5172. /**
  5173. * unregister_netdevice_many - unregister many devices
  5174. * @head: list of devices
  5175. */
  5176. void unregister_netdevice_many(struct list_head *head)
  5177. {
  5178. struct net_device *dev;
  5179. if (!list_empty(head)) {
  5180. rollback_registered_many(head);
  5181. list_for_each_entry(dev, head, unreg_list)
  5182. net_set_todo(dev);
  5183. }
  5184. }
  5185. EXPORT_SYMBOL(unregister_netdevice_many);
  5186. /**
  5187. * unregister_netdev - remove device from the kernel
  5188. * @dev: device
  5189. *
  5190. * This function shuts down a device interface and removes it
  5191. * from the kernel tables.
  5192. *
  5193. * This is just a wrapper for unregister_netdevice that takes
  5194. * the rtnl semaphore. In general you want to use this and not
  5195. * unregister_netdevice.
  5196. */
  5197. void unregister_netdev(struct net_device *dev)
  5198. {
  5199. rtnl_lock();
  5200. unregister_netdevice(dev);
  5201. rtnl_unlock();
  5202. }
  5203. EXPORT_SYMBOL(unregister_netdev);
  5204. /**
  5205. * dev_change_net_namespace - move device to different nethost namespace
  5206. * @dev: device
  5207. * @net: network namespace
  5208. * @pat: If not NULL name pattern to try if the current device name
  5209. * is already taken in the destination network namespace.
  5210. *
  5211. * This function shuts down a device interface and moves it
  5212. * to a new network namespace. On success 0 is returned, on
  5213. * a failure a netagive errno code is returned.
  5214. *
  5215. * Callers must hold the rtnl semaphore.
  5216. */
  5217. int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
  5218. {
  5219. int err;
  5220. ASSERT_RTNL();
  5221. /* Don't allow namespace local devices to be moved. */
  5222. err = -EINVAL;
  5223. if (dev->features & NETIF_F_NETNS_LOCAL)
  5224. goto out;
  5225. /* Ensure the device has been registrered */
  5226. err = -EINVAL;
  5227. if (dev->reg_state != NETREG_REGISTERED)
  5228. goto out;
  5229. /* Get out if there is nothing todo */
  5230. err = 0;
  5231. if (net_eq(dev_net(dev), net))
  5232. goto out;
  5233. /* Pick the destination device name, and ensure
  5234. * we can use it in the destination network namespace.
  5235. */
  5236. err = -EEXIST;
  5237. if (__dev_get_by_name(net, dev->name)) {
  5238. /* We get here if we can't use the current device name */
  5239. if (!pat)
  5240. goto out;
  5241. if (dev_get_valid_name(dev, pat) < 0)
  5242. goto out;
  5243. }
  5244. /*
  5245. * And now a mini version of register_netdevice unregister_netdevice.
  5246. */
  5247. /* If device is running close it first. */
  5248. dev_close(dev);
  5249. /* And unlink it from device chain */
  5250. err = -ENODEV;
  5251. unlist_netdevice(dev);
  5252. synchronize_net();
  5253. /* Shutdown queueing discipline. */
  5254. dev_shutdown(dev);
  5255. /* Notify protocols, that we are about to destroy
  5256. this device. They should clean all the things.
  5257. Note that dev->reg_state stays at NETREG_REGISTERED.
  5258. This is wanted because this way 8021q and macvlan know
  5259. the device is just moving and can keep their slaves up.
  5260. */
  5261. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  5262. call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
  5263. rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
  5264. /*
  5265. * Flush the unicast and multicast chains
  5266. */
  5267. dev_uc_flush(dev);
  5268. dev_mc_flush(dev);
  5269. /* Actually switch the network namespace */
  5270. dev_net_set(dev, net);
  5271. /* If there is an ifindex conflict assign a new one */
  5272. if (__dev_get_by_index(net, dev->ifindex)) {
  5273. int iflink = (dev->iflink == dev->ifindex);
  5274. dev->ifindex = dev_new_index(net);
  5275. if (iflink)
  5276. dev->iflink = dev->ifindex;
  5277. }
  5278. /* Fixup kobjects */
  5279. err = device_rename(&dev->dev, dev->name);
  5280. WARN_ON(err);
  5281. /* Add the device back in the hashes */
  5282. list_netdevice(dev);
  5283. /* Notify protocols, that a new device appeared. */
  5284. call_netdevice_notifiers(NETDEV_REGISTER, dev);
  5285. /*
  5286. * Prevent userspace races by waiting until the network
  5287. * device is fully setup before sending notifications.
  5288. */
  5289. rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
  5290. synchronize_net();
  5291. err = 0;
  5292. out:
  5293. return err;
  5294. }
  5295. EXPORT_SYMBOL_GPL(dev_change_net_namespace);
  5296. static int dev_cpu_callback(struct notifier_block *nfb,
  5297. unsigned long action,
  5298. void *ocpu)
  5299. {
  5300. struct sk_buff **list_skb;
  5301. struct sk_buff *skb;
  5302. unsigned int cpu, oldcpu = (unsigned long)ocpu;
  5303. struct softnet_data *sd, *oldsd;
  5304. if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
  5305. return NOTIFY_OK;
  5306. local_irq_disable();
  5307. cpu = smp_processor_id();
  5308. sd = &per_cpu(softnet_data, cpu);
  5309. oldsd = &per_cpu(softnet_data, oldcpu);
  5310. /* Find end of our completion_queue. */
  5311. list_skb = &sd->completion_queue;
  5312. while (*list_skb)
  5313. list_skb = &(*list_skb)->next;
  5314. /* Append completion queue from offline CPU. */
  5315. *list_skb = oldsd->completion_queue;
  5316. oldsd->completion_queue = NULL;
  5317. /* Append output queue from offline CPU. */
  5318. if (oldsd->output_queue) {
  5319. *sd->output_queue_tailp = oldsd->output_queue;
  5320. sd->output_queue_tailp = oldsd->output_queue_tailp;
  5321. oldsd->output_queue = NULL;
  5322. oldsd->output_queue_tailp = &oldsd->output_queue;
  5323. }
  5324. /* Append NAPI poll list from offline CPU. */
  5325. if (!list_empty(&oldsd->poll_list)) {
  5326. list_splice_init(&oldsd->poll_list, &sd->poll_list);
  5327. raise_softirq_irqoff(NET_RX_SOFTIRQ);
  5328. }
  5329. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  5330. local_irq_enable();
  5331. /* Process offline CPU's input_pkt_queue */
  5332. while ((skb = __skb_dequeue(&oldsd->process_queue))) {
  5333. netif_rx(skb);
  5334. input_queue_head_incr(oldsd);
  5335. }
  5336. while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
  5337. netif_rx(skb);
  5338. input_queue_head_incr(oldsd);
  5339. }
  5340. return NOTIFY_OK;
  5341. }
  5342. /**
  5343. * netdev_increment_features - increment feature set by one
  5344. * @all: current feature set
  5345. * @one: new feature set
  5346. * @mask: mask feature set
  5347. *
  5348. * Computes a new feature set after adding a device with feature set
  5349. * @one to the master device with current feature set @all. Will not
  5350. * enable anything that is off in @mask. Returns the new feature set.
  5351. */
  5352. netdev_features_t netdev_increment_features(netdev_features_t all,
  5353. netdev_features_t one, netdev_features_t mask)
  5354. {
  5355. if (mask & NETIF_F_GEN_CSUM)
  5356. mask |= NETIF_F_ALL_CSUM;
  5357. mask |= NETIF_F_VLAN_CHALLENGED;
  5358. all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
  5359. all &= one | ~NETIF_F_ALL_FOR_ALL;
  5360. /* If one device supports hw checksumming, set for all. */
  5361. if (all & NETIF_F_GEN_CSUM)
  5362. all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
  5363. return all;
  5364. }
  5365. EXPORT_SYMBOL(netdev_increment_features);
  5366. static struct hlist_head *netdev_create_hash(void)
  5367. {
  5368. int i;
  5369. struct hlist_head *hash;
  5370. hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
  5371. if (hash != NULL)
  5372. for (i = 0; i < NETDEV_HASHENTRIES; i++)
  5373. INIT_HLIST_HEAD(&hash[i]);
  5374. return hash;
  5375. }
  5376. /* Initialize per network namespace state */
  5377. static int __net_init netdev_init(struct net *net)
  5378. {
  5379. INIT_LIST_HEAD(&net->dev_base_head);
  5380. net->dev_name_head = netdev_create_hash();
  5381. if (net->dev_name_head == NULL)
  5382. goto err_name;
  5383. net->dev_index_head = netdev_create_hash();
  5384. if (net->dev_index_head == NULL)
  5385. goto err_idx;
  5386. return 0;
  5387. err_idx:
  5388. kfree(net->dev_name_head);
  5389. err_name:
  5390. return -ENOMEM;
  5391. }
  5392. /**
  5393. * netdev_drivername - network driver for the device
  5394. * @dev: network device
  5395. *
  5396. * Determine network driver for device.
  5397. */
  5398. const char *netdev_drivername(const struct net_device *dev)
  5399. {
  5400. const struct device_driver *driver;
  5401. const struct device *parent;
  5402. const char *empty = "";
  5403. parent = dev->dev.parent;
  5404. if (!parent)
  5405. return empty;
  5406. driver = parent->driver;
  5407. if (driver && driver->name)
  5408. return driver->name;
  5409. return empty;
  5410. }
  5411. int __netdev_printk(const char *level, const struct net_device *dev,
  5412. struct va_format *vaf)
  5413. {
  5414. int r;
  5415. if (dev && dev->dev.parent)
  5416. r = dev_printk(level, dev->dev.parent, "%s: %pV",
  5417. netdev_name(dev), vaf);
  5418. else if (dev)
  5419. r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
  5420. else
  5421. r = printk("%s(NULL net_device): %pV", level, vaf);
  5422. return r;
  5423. }
  5424. EXPORT_SYMBOL(__netdev_printk);
  5425. int netdev_printk(const char *level, const struct net_device *dev,
  5426. const char *format, ...)
  5427. {
  5428. struct va_format vaf;
  5429. va_list args;
  5430. int r;
  5431. va_start(args, format);
  5432. vaf.fmt = format;
  5433. vaf.va = &args;
  5434. r = __netdev_printk(level, dev, &vaf);
  5435. va_end(args);
  5436. return r;
  5437. }
  5438. EXPORT_SYMBOL(netdev_printk);
  5439. #define define_netdev_printk_level(func, level) \
  5440. int func(const struct net_device *dev, const char *fmt, ...) \
  5441. { \
  5442. int r; \
  5443. struct va_format vaf; \
  5444. va_list args; \
  5445. \
  5446. va_start(args, fmt); \
  5447. \
  5448. vaf.fmt = fmt; \
  5449. vaf.va = &args; \
  5450. \
  5451. r = __netdev_printk(level, dev, &vaf); \
  5452. va_end(args); \
  5453. \
  5454. return r; \
  5455. } \
  5456. EXPORT_SYMBOL(func);
  5457. define_netdev_printk_level(netdev_emerg, KERN_EMERG);
  5458. define_netdev_printk_level(netdev_alert, KERN_ALERT);
  5459. define_netdev_printk_level(netdev_crit, KERN_CRIT);
  5460. define_netdev_printk_level(netdev_err, KERN_ERR);
  5461. define_netdev_printk_level(netdev_warn, KERN_WARNING);
  5462. define_netdev_printk_level(netdev_notice, KERN_NOTICE);
  5463. define_netdev_printk_level(netdev_info, KERN_INFO);
  5464. static void __net_exit netdev_exit(struct net *net)
  5465. {
  5466. kfree(net->dev_name_head);
  5467. kfree(net->dev_index_head);
  5468. }
  5469. static struct pernet_operations __net_initdata netdev_net_ops = {
  5470. .init = netdev_init,
  5471. .exit = netdev_exit,
  5472. };
  5473. static void __net_exit default_device_exit(struct net *net)
  5474. {
  5475. struct net_device *dev, *aux;
  5476. /*
  5477. * Push all migratable network devices back to the
  5478. * initial network namespace
  5479. */
  5480. rtnl_lock();
  5481. for_each_netdev_safe(net, dev, aux) {
  5482. int err;
  5483. char fb_name[IFNAMSIZ];
  5484. /* Ignore unmoveable devices (i.e. loopback) */
  5485. if (dev->features & NETIF_F_NETNS_LOCAL)
  5486. continue;
  5487. /* Leave virtual devices for the generic cleanup */
  5488. if (dev->rtnl_link_ops)
  5489. continue;
  5490. /* Push remaining network devices to init_net */
  5491. snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
  5492. err = dev_change_net_namespace(dev, &init_net, fb_name);
  5493. if (err) {
  5494. pr_emerg("%s: failed to move %s to init_net: %d\n",
  5495. __func__, dev->name, err);
  5496. BUG();
  5497. }
  5498. }
  5499. rtnl_unlock();
  5500. }
  5501. static void __net_exit default_device_exit_batch(struct list_head *net_list)
  5502. {
  5503. /* At exit all network devices most be removed from a network
  5504. * namespace. Do this in the reverse order of registration.
  5505. * Do this across as many network namespaces as possible to
  5506. * improve batching efficiency.
  5507. */
  5508. struct net_device *dev;
  5509. struct net *net;
  5510. LIST_HEAD(dev_kill_list);
  5511. rtnl_lock();
  5512. list_for_each_entry(net, net_list, exit_list) {
  5513. for_each_netdev_reverse(net, dev) {
  5514. if (dev->rtnl_link_ops)
  5515. dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
  5516. else
  5517. unregister_netdevice_queue(dev, &dev_kill_list);
  5518. }
  5519. }
  5520. unregister_netdevice_many(&dev_kill_list);
  5521. list_del(&dev_kill_list);
  5522. rtnl_unlock();
  5523. }
  5524. static struct pernet_operations __net_initdata default_device_ops = {
  5525. .exit = default_device_exit,
  5526. .exit_batch = default_device_exit_batch,
  5527. };
  5528. /*
  5529. * Initialize the DEV module. At boot time this walks the device list and
  5530. * unhooks any devices that fail to initialise (normally hardware not
  5531. * present) and leaves us with a valid list of present and active devices.
  5532. *
  5533. */
  5534. /*
  5535. * This is called single threaded during boot, so no need
  5536. * to take the rtnl semaphore.
  5537. */
  5538. static int __init net_dev_init(void)
  5539. {
  5540. int i, rc = -ENOMEM;
  5541. BUG_ON(!dev_boot_phase);
  5542. if (dev_proc_init())
  5543. goto out;
  5544. if (netdev_kobject_init())
  5545. goto out;
  5546. INIT_LIST_HEAD(&ptype_all);
  5547. for (i = 0; i < PTYPE_HASH_SIZE; i++)
  5548. INIT_LIST_HEAD(&ptype_base[i]);
  5549. if (register_pernet_subsys(&netdev_net_ops))
  5550. goto out;
  5551. /*
  5552. * Initialise the packet receive queues.
  5553. */
  5554. for_each_possible_cpu(i) {
  5555. struct softnet_data *sd = &per_cpu(softnet_data, i);
  5556. memset(sd, 0, sizeof(*sd));
  5557. skb_queue_head_init(&sd->input_pkt_queue);
  5558. skb_queue_head_init(&sd->process_queue);
  5559. sd->completion_queue = NULL;
  5560. INIT_LIST_HEAD(&sd->poll_list);
  5561. sd->output_queue = NULL;
  5562. sd->output_queue_tailp = &sd->output_queue;
  5563. #ifdef CONFIG_RPS
  5564. sd->csd.func = rps_trigger_softirq;
  5565. sd->csd.info = sd;
  5566. sd->csd.flags = 0;
  5567. sd->cpu = i;
  5568. #endif
  5569. sd->backlog.poll = process_backlog;
  5570. sd->backlog.weight = weight_p;
  5571. sd->backlog.gro_list = NULL;
  5572. sd->backlog.gro_count = 0;
  5573. }
  5574. dev_boot_phase = 0;
  5575. /* The loopback device is special if any other network devices
  5576. * is present in a network namespace the loopback device must
  5577. * be present. Since we now dynamically allocate and free the
  5578. * loopback device ensure this invariant is maintained by
  5579. * keeping the loopback device as the first device on the
  5580. * list of network devices. Ensuring the loopback devices
  5581. * is the first device that appears and the last network device
  5582. * that disappears.
  5583. */
  5584. if (register_pernet_device(&loopback_net_ops))
  5585. goto out;
  5586. if (register_pernet_device(&default_device_ops))
  5587. goto out;
  5588. open_softirq(NET_TX_SOFTIRQ, net_tx_action);
  5589. open_softirq(NET_RX_SOFTIRQ, net_rx_action);
  5590. hotcpu_notifier(dev_cpu_callback, 0);
  5591. dst_init();
  5592. dev_mcast_init();
  5593. rc = 0;
  5594. out:
  5595. return rc;
  5596. }
  5597. subsys_initcall(net_dev_init);
  5598. static int __init initialize_hashrnd(void)
  5599. {
  5600. get_random_bytes(&hashrnd, sizeof(hashrnd));
  5601. return 0;
  5602. }
  5603. late_initcall_sync(initialize_hashrnd);