dev.c 162 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683
  1. /*
  2. * NET3 Protocol independent device support routines.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Derived from the non IP parts of dev.c 1.0.19
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  13. *
  14. * Additional Authors:
  15. * Florian la Roche <rzsfl@rz.uni-sb.de>
  16. * Alan Cox <gw4pts@gw4pts.ampr.org>
  17. * David Hinds <dahinds@users.sourceforge.net>
  18. * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  19. * Adam Sulmicki <adam@cfar.umd.edu>
  20. * Pekka Riikonen <priikone@poesidon.pspt.fi>
  21. *
  22. * Changes:
  23. * D.J. Barrow : Fixed bug where dev->refcnt gets set
  24. * to 2 if register_netdev gets called
  25. * before net_dev_init & also removed a
  26. * few lines of code in the process.
  27. * Alan Cox : device private ioctl copies fields back.
  28. * Alan Cox : Transmit queue code does relevant
  29. * stunts to keep the queue safe.
  30. * Alan Cox : Fixed double lock.
  31. * Alan Cox : Fixed promisc NULL pointer trap
  32. * ???????? : Support the full private ioctl range
  33. * Alan Cox : Moved ioctl permission check into
  34. * drivers
  35. * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
  36. * Alan Cox : 100 backlog just doesn't cut it when
  37. * you start doing multicast video 8)
  38. * Alan Cox : Rewrote net_bh and list manager.
  39. * Alan Cox : Fix ETH_P_ALL echoback lengths.
  40. * Alan Cox : Took out transmit every packet pass
  41. * Saved a few bytes in the ioctl handler
  42. * Alan Cox : Network driver sets packet type before
  43. * calling netif_rx. Saves a function
  44. * call a packet.
  45. * Alan Cox : Hashed net_bh()
  46. * Richard Kooijman: Timestamp fixes.
  47. * Alan Cox : Wrong field in SIOCGIFDSTADDR
  48. * Alan Cox : Device lock protection.
  49. * Alan Cox : Fixed nasty side effect of device close
  50. * changes.
  51. * Rudi Cilibrasi : Pass the right thing to
  52. * set_mac_address()
  53. * Dave Miller : 32bit quantity for the device lock to
  54. * make it work out on a Sparc.
  55. * Bjorn Ekwall : Added KERNELD hack.
  56. * Alan Cox : Cleaned up the backlog initialise.
  57. * Craig Metz : SIOCGIFCONF fix if space for under
  58. * 1 device.
  59. * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
  60. * is no device open function.
  61. * Andi Kleen : Fix error reporting for SIOCGIFCONF
  62. * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
  63. * Cyrus Durgin : Cleaned for KMOD
  64. * Adam Sulmicki : Bug Fix : Network Device Unload
  65. * A network device unload needs to purge
  66. * the backlog queue.
  67. * Paul Rusty Russell : SIOCSIFNAME
  68. * Pekka Riikonen : Netdev boot-time settings code
  69. * Andrew Morton : Make unregister_netdevice wait
  70. * indefinitely on dev->refcnt
  71. * J Hadi Salim : - Backlog queue sampling
  72. * - netif_rx() feedback
  73. */
  74. #include <asm/uaccess.h>
  75. #include <asm/system.h>
  76. #include <linux/bitops.h>
  77. #include <linux/capability.h>
  78. #include <linux/cpu.h>
  79. #include <linux/types.h>
  80. #include <linux/kernel.h>
  81. #include <linux/hash.h>
  82. #include <linux/slab.h>
  83. #include <linux/sched.h>
  84. #include <linux/mutex.h>
  85. #include <linux/string.h>
  86. #include <linux/mm.h>
  87. #include <linux/socket.h>
  88. #include <linux/sockios.h>
  89. #include <linux/errno.h>
  90. #include <linux/interrupt.h>
  91. #include <linux/if_ether.h>
  92. #include <linux/netdevice.h>
  93. #include <linux/etherdevice.h>
  94. #include <linux/ethtool.h>
  95. #include <linux/notifier.h>
  96. #include <linux/skbuff.h>
  97. #include <net/net_namespace.h>
  98. #include <net/sock.h>
  99. #include <linux/rtnetlink.h>
  100. #include <linux/proc_fs.h>
  101. #include <linux/seq_file.h>
  102. #include <linux/stat.h>
  103. #include <net/dst.h>
  104. #include <net/pkt_sched.h>
  105. #include <net/checksum.h>
  106. #include <net/xfrm.h>
  107. #include <linux/highmem.h>
  108. #include <linux/init.h>
  109. #include <linux/kmod.h>
  110. #include <linux/module.h>
  111. #include <linux/netpoll.h>
  112. #include <linux/rcupdate.h>
  113. #include <linux/delay.h>
  114. #include <net/wext.h>
  115. #include <net/iw_handler.h>
  116. #include <asm/current.h>
  117. #include <linux/audit.h>
  118. #include <linux/dmaengine.h>
  119. #include <linux/err.h>
  120. #include <linux/ctype.h>
  121. #include <linux/if_arp.h>
  122. #include <linux/if_vlan.h>
  123. #include <linux/ip.h>
  124. #include <net/ip.h>
  125. #include <linux/ipv6.h>
  126. #include <linux/in.h>
  127. #include <linux/jhash.h>
  128. #include <linux/random.h>
  129. #include <trace/events/napi.h>
  130. #include <trace/events/net.h>
  131. #include <trace/events/skb.h>
  132. #include <linux/pci.h>
  133. #include <linux/inetdevice.h>
  134. #include <linux/cpu_rmap.h>
  135. #include <linux/if_tunnel.h>
  136. #include <linux/if_pppox.h>
  137. #include <linux/ppp_defs.h>
  138. #include <linux/net_tstamp.h>
  139. #include "net-sysfs.h"
  140. /* Instead of increasing this, you should create a hash table. */
  141. #define MAX_GRO_SKBS 8
  142. /* This should be increased if a protocol with a bigger head is added. */
  143. #define GRO_MAX_HEAD (MAX_HEADER + 128)
  144. /*
  145. * The list of packet types we will receive (as opposed to discard)
  146. * and the routines to invoke.
  147. *
  148. * Why 16. Because with 16 the only overlap we get on a hash of the
  149. * low nibble of the protocol value is RARP/SNAP/X.25.
  150. *
  151. * NOTE: That is no longer true with the addition of VLAN tags. Not
  152. * sure which should go first, but I bet it won't make much
  153. * difference if we are running VLANs. The good news is that
  154. * this protocol won't be in the list unless compiled in, so
  155. * the average user (w/out VLANs) will not be adversely affected.
  156. * --BLG
  157. *
  158. * 0800 IP
  159. * 8100 802.1Q VLAN
  160. * 0001 802.3
  161. * 0002 AX.25
  162. * 0004 802.2
  163. * 8035 RARP
  164. * 0005 SNAP
  165. * 0805 X.25
  166. * 0806 ARP
  167. * 8137 IPX
  168. * 0009 Localtalk
  169. * 86DD IPv6
  170. */
  171. #define PTYPE_HASH_SIZE (16)
  172. #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
  173. static DEFINE_SPINLOCK(ptype_lock);
  174. static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
  175. static struct list_head ptype_all __read_mostly; /* Taps */
  176. /*
  177. * The @dev_base_head list is protected by @dev_base_lock and the rtnl
  178. * semaphore.
  179. *
  180. * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
  181. *
  182. * Writers must hold the rtnl semaphore while they loop through the
  183. * dev_base_head list, and hold dev_base_lock for writing when they do the
  184. * actual updates. This allows pure readers to access the list even
  185. * while a writer is preparing to update it.
  186. *
  187. * To put it another way, dev_base_lock is held for writing only to
  188. * protect against pure readers; the rtnl semaphore provides the
  189. * protection against other writers.
  190. *
  191. * See, for example usages, register_netdevice() and
  192. * unregister_netdevice(), which must be called with the rtnl
  193. * semaphore held.
  194. */
  195. DEFINE_RWLOCK(dev_base_lock);
  196. EXPORT_SYMBOL(dev_base_lock);
  197. static inline void dev_base_seq_inc(struct net *net)
  198. {
  199. while (++net->dev_base_seq == 0);
  200. }
  201. static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
  202. {
  203. unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
  204. return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
  205. }
  206. static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
  207. {
  208. return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
  209. }
  210. static inline void rps_lock(struct softnet_data *sd)
  211. {
  212. #ifdef CONFIG_RPS
  213. spin_lock(&sd->input_pkt_queue.lock);
  214. #endif
  215. }
  216. static inline void rps_unlock(struct softnet_data *sd)
  217. {
  218. #ifdef CONFIG_RPS
  219. spin_unlock(&sd->input_pkt_queue.lock);
  220. #endif
  221. }
  222. /* Device list insertion */
  223. static int list_netdevice(struct net_device *dev)
  224. {
  225. struct net *net = dev_net(dev);
  226. ASSERT_RTNL();
  227. write_lock_bh(&dev_base_lock);
  228. list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
  229. hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
  230. hlist_add_head_rcu(&dev->index_hlist,
  231. dev_index_hash(net, dev->ifindex));
  232. write_unlock_bh(&dev_base_lock);
  233. dev_base_seq_inc(net);
  234. return 0;
  235. }
  236. /* Device list removal
  237. * caller must respect a RCU grace period before freeing/reusing dev
  238. */
  239. static void unlist_netdevice(struct net_device *dev)
  240. {
  241. ASSERT_RTNL();
  242. /* Unlink dev from the device chain */
  243. write_lock_bh(&dev_base_lock);
  244. list_del_rcu(&dev->dev_list);
  245. hlist_del_rcu(&dev->name_hlist);
  246. hlist_del_rcu(&dev->index_hlist);
  247. write_unlock_bh(&dev_base_lock);
  248. dev_base_seq_inc(dev_net(dev));
  249. }
  250. /*
  251. * Our notifier list
  252. */
  253. static RAW_NOTIFIER_HEAD(netdev_chain);
  254. /*
  255. * Device drivers call our routines to queue packets here. We empty the
  256. * queue in the local softnet handler.
  257. */
  258. DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
  259. EXPORT_PER_CPU_SYMBOL(softnet_data);
  260. #ifdef CONFIG_LOCKDEP
  261. /*
  262. * register_netdevice() inits txq->_xmit_lock and sets lockdep class
  263. * according to dev->type
  264. */
  265. static const unsigned short netdev_lock_type[] =
  266. {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
  267. ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
  268. ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
  269. ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
  270. ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
  271. ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
  272. ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
  273. ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
  274. ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
  275. ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
  276. ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
  277. ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
  278. ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
  279. ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
  280. ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
  281. ARPHRD_VOID, ARPHRD_NONE};
  282. static const char *const netdev_lock_name[] =
  283. {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
  284. "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
  285. "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
  286. "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
  287. "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
  288. "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
  289. "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
  290. "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
  291. "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
  292. "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
  293. "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
  294. "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
  295. "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
  296. "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
  297. "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
  298. "_xmit_VOID", "_xmit_NONE"};
  299. static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
  300. static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
  301. static inline unsigned short netdev_lock_pos(unsigned short dev_type)
  302. {
  303. int i;
  304. for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
  305. if (netdev_lock_type[i] == dev_type)
  306. return i;
  307. /* the last key is used by default */
  308. return ARRAY_SIZE(netdev_lock_type) - 1;
  309. }
  310. static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
  311. unsigned short dev_type)
  312. {
  313. int i;
  314. i = netdev_lock_pos(dev_type);
  315. lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
  316. netdev_lock_name[i]);
  317. }
  318. static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
  319. {
  320. int i;
  321. i = netdev_lock_pos(dev->type);
  322. lockdep_set_class_and_name(&dev->addr_list_lock,
  323. &netdev_addr_lock_key[i],
  324. netdev_lock_name[i]);
  325. }
  326. #else
  327. static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
  328. unsigned short dev_type)
  329. {
  330. }
  331. static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
  332. {
  333. }
  334. #endif
  335. /*******************************************************************************
  336. Protocol management and registration routines
  337. *******************************************************************************/
  338. /*
  339. * Add a protocol ID to the list. Now that the input handler is
  340. * smarter we can dispense with all the messy stuff that used to be
  341. * here.
  342. *
  343. * BEWARE!!! Protocol handlers, mangling input packets,
  344. * MUST BE last in hash buckets and checking protocol handlers
  345. * MUST start from promiscuous ptype_all chain in net_bh.
  346. * It is true now, do not change it.
  347. * Explanation follows: if protocol handler, mangling packet, will
  348. * be the first on list, it is not able to sense, that packet
  349. * is cloned and should be copied-on-write, so that it will
  350. * change it and subsequent readers will get broken packet.
  351. * --ANK (980803)
  352. */
  353. static inline struct list_head *ptype_head(const struct packet_type *pt)
  354. {
  355. if (pt->type == htons(ETH_P_ALL))
  356. return &ptype_all;
  357. else
  358. return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
  359. }
  360. /**
  361. * dev_add_pack - add packet handler
  362. * @pt: packet type declaration
  363. *
  364. * Add a protocol handler to the networking stack. The passed &packet_type
  365. * is linked into kernel lists and may not be freed until it has been
  366. * removed from the kernel lists.
  367. *
  368. * This call does not sleep therefore it can not
  369. * guarantee all CPU's that are in middle of receiving packets
  370. * will see the new packet type (until the next received packet).
  371. */
  372. void dev_add_pack(struct packet_type *pt)
  373. {
  374. struct list_head *head = ptype_head(pt);
  375. spin_lock(&ptype_lock);
  376. list_add_rcu(&pt->list, head);
  377. spin_unlock(&ptype_lock);
  378. }
  379. EXPORT_SYMBOL(dev_add_pack);
  380. /**
  381. * __dev_remove_pack - remove packet handler
  382. * @pt: packet type declaration
  383. *
  384. * Remove a protocol handler that was previously added to the kernel
  385. * protocol handlers by dev_add_pack(). The passed &packet_type is removed
  386. * from the kernel lists and can be freed or reused once this function
  387. * returns.
  388. *
  389. * The packet type might still be in use by receivers
  390. * and must not be freed until after all the CPU's have gone
  391. * through a quiescent state.
  392. */
  393. void __dev_remove_pack(struct packet_type *pt)
  394. {
  395. struct list_head *head = ptype_head(pt);
  396. struct packet_type *pt1;
  397. spin_lock(&ptype_lock);
  398. list_for_each_entry(pt1, head, list) {
  399. if (pt == pt1) {
  400. list_del_rcu(&pt->list);
  401. goto out;
  402. }
  403. }
  404. printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
  405. out:
  406. spin_unlock(&ptype_lock);
  407. }
  408. EXPORT_SYMBOL(__dev_remove_pack);
  409. /**
  410. * dev_remove_pack - remove packet handler
  411. * @pt: packet type declaration
  412. *
  413. * Remove a protocol handler that was previously added to the kernel
  414. * protocol handlers by dev_add_pack(). The passed &packet_type is removed
  415. * from the kernel lists and can be freed or reused once this function
  416. * returns.
  417. *
  418. * This call sleeps to guarantee that no CPU is looking at the packet
  419. * type after return.
  420. */
  421. void dev_remove_pack(struct packet_type *pt)
  422. {
  423. __dev_remove_pack(pt);
  424. synchronize_net();
  425. }
  426. EXPORT_SYMBOL(dev_remove_pack);
  427. /******************************************************************************
  428. Device Boot-time Settings Routines
  429. *******************************************************************************/
  430. /* Boot time configuration table */
  431. static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
  432. /**
  433. * netdev_boot_setup_add - add new setup entry
  434. * @name: name of the device
  435. * @map: configured settings for the device
  436. *
  437. * Adds new setup entry to the dev_boot_setup list. The function
  438. * returns 0 on error and 1 on success. This is a generic routine to
  439. * all netdevices.
  440. */
  441. static int netdev_boot_setup_add(char *name, struct ifmap *map)
  442. {
  443. struct netdev_boot_setup *s;
  444. int i;
  445. s = dev_boot_setup;
  446. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
  447. if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
  448. memset(s[i].name, 0, sizeof(s[i].name));
  449. strlcpy(s[i].name, name, IFNAMSIZ);
  450. memcpy(&s[i].map, map, sizeof(s[i].map));
  451. break;
  452. }
  453. }
  454. return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
  455. }
  456. /**
  457. * netdev_boot_setup_check - check boot time settings
  458. * @dev: the netdevice
  459. *
  460. * Check boot time settings for the device.
  461. * The found settings are set for the device to be used
  462. * later in the device probing.
  463. * Returns 0 if no settings found, 1 if they are.
  464. */
  465. int netdev_boot_setup_check(struct net_device *dev)
  466. {
  467. struct netdev_boot_setup *s = dev_boot_setup;
  468. int i;
  469. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
  470. if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
  471. !strcmp(dev->name, s[i].name)) {
  472. dev->irq = s[i].map.irq;
  473. dev->base_addr = s[i].map.base_addr;
  474. dev->mem_start = s[i].map.mem_start;
  475. dev->mem_end = s[i].map.mem_end;
  476. return 1;
  477. }
  478. }
  479. return 0;
  480. }
  481. EXPORT_SYMBOL(netdev_boot_setup_check);
  482. /**
  483. * netdev_boot_base - get address from boot time settings
  484. * @prefix: prefix for network device
  485. * @unit: id for network device
  486. *
  487. * Check boot time settings for the base address of device.
  488. * The found settings are set for the device to be used
  489. * later in the device probing.
  490. * Returns 0 if no settings found.
  491. */
  492. unsigned long netdev_boot_base(const char *prefix, int unit)
  493. {
  494. const struct netdev_boot_setup *s = dev_boot_setup;
  495. char name[IFNAMSIZ];
  496. int i;
  497. sprintf(name, "%s%d", prefix, unit);
  498. /*
  499. * If device already registered then return base of 1
  500. * to indicate not to probe for this interface
  501. */
  502. if (__dev_get_by_name(&init_net, name))
  503. return 1;
  504. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
  505. if (!strcmp(name, s[i].name))
  506. return s[i].map.base_addr;
  507. return 0;
  508. }
  509. /*
  510. * Saves at boot time configured settings for any netdevice.
  511. */
  512. int __init netdev_boot_setup(char *str)
  513. {
  514. int ints[5];
  515. struct ifmap map;
  516. str = get_options(str, ARRAY_SIZE(ints), ints);
  517. if (!str || !*str)
  518. return 0;
  519. /* Save settings */
  520. memset(&map, 0, sizeof(map));
  521. if (ints[0] > 0)
  522. map.irq = ints[1];
  523. if (ints[0] > 1)
  524. map.base_addr = ints[2];
  525. if (ints[0] > 2)
  526. map.mem_start = ints[3];
  527. if (ints[0] > 3)
  528. map.mem_end = ints[4];
  529. /* Add new entry to the list */
  530. return netdev_boot_setup_add(str, &map);
  531. }
  532. __setup("netdev=", netdev_boot_setup);
  533. /*******************************************************************************
  534. Device Interface Subroutines
  535. *******************************************************************************/
  536. /**
  537. * __dev_get_by_name - find a device by its name
  538. * @net: the applicable net namespace
  539. * @name: name to find
  540. *
  541. * Find an interface by name. Must be called under RTNL semaphore
  542. * or @dev_base_lock. If the name is found a pointer to the device
  543. * is returned. If the name is not found then %NULL is returned. The
  544. * reference counters are not incremented so the caller must be
  545. * careful with locks.
  546. */
  547. struct net_device *__dev_get_by_name(struct net *net, const char *name)
  548. {
  549. struct hlist_node *p;
  550. struct net_device *dev;
  551. struct hlist_head *head = dev_name_hash(net, name);
  552. hlist_for_each_entry(dev, p, head, name_hlist)
  553. if (!strncmp(dev->name, name, IFNAMSIZ))
  554. return dev;
  555. return NULL;
  556. }
  557. EXPORT_SYMBOL(__dev_get_by_name);
  558. /**
  559. * dev_get_by_name_rcu - find a device by its name
  560. * @net: the applicable net namespace
  561. * @name: name to find
  562. *
  563. * Find an interface by name.
  564. * If the name is found a pointer to the device is returned.
  565. * If the name is not found then %NULL is returned.
  566. * The reference counters are not incremented so the caller must be
  567. * careful with locks. The caller must hold RCU lock.
  568. */
  569. struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
  570. {
  571. struct hlist_node *p;
  572. struct net_device *dev;
  573. struct hlist_head *head = dev_name_hash(net, name);
  574. hlist_for_each_entry_rcu(dev, p, head, name_hlist)
  575. if (!strncmp(dev->name, name, IFNAMSIZ))
  576. return dev;
  577. return NULL;
  578. }
  579. EXPORT_SYMBOL(dev_get_by_name_rcu);
  580. /**
  581. * dev_get_by_name - find a device by its name
  582. * @net: the applicable net namespace
  583. * @name: name to find
  584. *
  585. * Find an interface by name. This can be called from any
  586. * context and does its own locking. The returned handle has
  587. * the usage count incremented and the caller must use dev_put() to
  588. * release it when it is no longer needed. %NULL is returned if no
  589. * matching device is found.
  590. */
  591. struct net_device *dev_get_by_name(struct net *net, const char *name)
  592. {
  593. struct net_device *dev;
  594. rcu_read_lock();
  595. dev = dev_get_by_name_rcu(net, name);
  596. if (dev)
  597. dev_hold(dev);
  598. rcu_read_unlock();
  599. return dev;
  600. }
  601. EXPORT_SYMBOL(dev_get_by_name);
  602. /**
  603. * __dev_get_by_index - find a device by its ifindex
  604. * @net: the applicable net namespace
  605. * @ifindex: index of device
  606. *
  607. * Search for an interface by index. Returns %NULL if the device
  608. * is not found or a pointer to the device. The device has not
  609. * had its reference counter increased so the caller must be careful
  610. * about locking. The caller must hold either the RTNL semaphore
  611. * or @dev_base_lock.
  612. */
  613. struct net_device *__dev_get_by_index(struct net *net, int ifindex)
  614. {
  615. struct hlist_node *p;
  616. struct net_device *dev;
  617. struct hlist_head *head = dev_index_hash(net, ifindex);
  618. hlist_for_each_entry(dev, p, head, index_hlist)
  619. if (dev->ifindex == ifindex)
  620. return dev;
  621. return NULL;
  622. }
  623. EXPORT_SYMBOL(__dev_get_by_index);
  624. /**
  625. * dev_get_by_index_rcu - find a device by its ifindex
  626. * @net: the applicable net namespace
  627. * @ifindex: index of device
  628. *
  629. * Search for an interface by index. Returns %NULL if the device
  630. * is not found or a pointer to the device. The device has not
  631. * had its reference counter increased so the caller must be careful
  632. * about locking. The caller must hold RCU lock.
  633. */
  634. struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
  635. {
  636. struct hlist_node *p;
  637. struct net_device *dev;
  638. struct hlist_head *head = dev_index_hash(net, ifindex);
  639. hlist_for_each_entry_rcu(dev, p, head, index_hlist)
  640. if (dev->ifindex == ifindex)
  641. return dev;
  642. return NULL;
  643. }
  644. EXPORT_SYMBOL(dev_get_by_index_rcu);
  645. /**
  646. * dev_get_by_index - find a device by its ifindex
  647. * @net: the applicable net namespace
  648. * @ifindex: index of device
  649. *
  650. * Search for an interface by index. Returns NULL if the device
  651. * is not found or a pointer to the device. The device returned has
  652. * had a reference added and the pointer is safe until the user calls
  653. * dev_put to indicate they have finished with it.
  654. */
  655. struct net_device *dev_get_by_index(struct net *net, int ifindex)
  656. {
  657. struct net_device *dev;
  658. rcu_read_lock();
  659. dev = dev_get_by_index_rcu(net, ifindex);
  660. if (dev)
  661. dev_hold(dev);
  662. rcu_read_unlock();
  663. return dev;
  664. }
  665. EXPORT_SYMBOL(dev_get_by_index);
  666. /**
  667. * dev_getbyhwaddr_rcu - find a device by its hardware address
  668. * @net: the applicable net namespace
  669. * @type: media type of device
  670. * @ha: hardware address
  671. *
  672. * Search for an interface by MAC address. Returns NULL if the device
  673. * is not found or a pointer to the device.
  674. * The caller must hold RCU or RTNL.
  675. * The returned device has not had its ref count increased
  676. * and the caller must therefore be careful about locking
  677. *
  678. */
  679. struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
  680. const char *ha)
  681. {
  682. struct net_device *dev;
  683. for_each_netdev_rcu(net, dev)
  684. if (dev->type == type &&
  685. !memcmp(dev->dev_addr, ha, dev->addr_len))
  686. return dev;
  687. return NULL;
  688. }
  689. EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
  690. struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
  691. {
  692. struct net_device *dev;
  693. ASSERT_RTNL();
  694. for_each_netdev(net, dev)
  695. if (dev->type == type)
  696. return dev;
  697. return NULL;
  698. }
  699. EXPORT_SYMBOL(__dev_getfirstbyhwtype);
  700. struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
  701. {
  702. struct net_device *dev, *ret = NULL;
  703. rcu_read_lock();
  704. for_each_netdev_rcu(net, dev)
  705. if (dev->type == type) {
  706. dev_hold(dev);
  707. ret = dev;
  708. break;
  709. }
  710. rcu_read_unlock();
  711. return ret;
  712. }
  713. EXPORT_SYMBOL(dev_getfirstbyhwtype);
  714. /**
  715. * dev_get_by_flags_rcu - find any device with given flags
  716. * @net: the applicable net namespace
  717. * @if_flags: IFF_* values
  718. * @mask: bitmask of bits in if_flags to check
  719. *
  720. * Search for any interface with the given flags. Returns NULL if a device
  721. * is not found or a pointer to the device. Must be called inside
  722. * rcu_read_lock(), and result refcount is unchanged.
  723. */
  724. struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
  725. unsigned short mask)
  726. {
  727. struct net_device *dev, *ret;
  728. ret = NULL;
  729. for_each_netdev_rcu(net, dev) {
  730. if (((dev->flags ^ if_flags) & mask) == 0) {
  731. ret = dev;
  732. break;
  733. }
  734. }
  735. return ret;
  736. }
  737. EXPORT_SYMBOL(dev_get_by_flags_rcu);
  738. /**
  739. * dev_valid_name - check if name is okay for network device
  740. * @name: name string
  741. *
  742. * Network device names need to be valid file names to
  743. * to allow sysfs to work. We also disallow any kind of
  744. * whitespace.
  745. */
  746. int dev_valid_name(const char *name)
  747. {
  748. if (*name == '\0')
  749. return 0;
  750. if (strlen(name) >= IFNAMSIZ)
  751. return 0;
  752. if (!strcmp(name, ".") || !strcmp(name, ".."))
  753. return 0;
  754. while (*name) {
  755. if (*name == '/' || isspace(*name))
  756. return 0;
  757. name++;
  758. }
  759. return 1;
  760. }
  761. EXPORT_SYMBOL(dev_valid_name);
  762. /**
  763. * __dev_alloc_name - allocate a name for a device
  764. * @net: network namespace to allocate the device name in
  765. * @name: name format string
  766. * @buf: scratch buffer and result name string
  767. *
  768. * Passed a format string - eg "lt%d" it will try and find a suitable
  769. * id. It scans list of devices to build up a free map, then chooses
  770. * the first empty slot. The caller must hold the dev_base or rtnl lock
  771. * while allocating the name and adding the device in order to avoid
  772. * duplicates.
  773. * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
  774. * Returns the number of the unit assigned or a negative errno code.
  775. */
  776. static int __dev_alloc_name(struct net *net, const char *name, char *buf)
  777. {
  778. int i = 0;
  779. const char *p;
  780. const int max_netdevices = 8*PAGE_SIZE;
  781. unsigned long *inuse;
  782. struct net_device *d;
  783. p = strnchr(name, IFNAMSIZ-1, '%');
  784. if (p) {
  785. /*
  786. * Verify the string as this thing may have come from
  787. * the user. There must be either one "%d" and no other "%"
  788. * characters.
  789. */
  790. if (p[1] != 'd' || strchr(p + 2, '%'))
  791. return -EINVAL;
  792. /* Use one page as a bit array of possible slots */
  793. inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
  794. if (!inuse)
  795. return -ENOMEM;
  796. for_each_netdev(net, d) {
  797. if (!sscanf(d->name, name, &i))
  798. continue;
  799. if (i < 0 || i >= max_netdevices)
  800. continue;
  801. /* avoid cases where sscanf is not exact inverse of printf */
  802. snprintf(buf, IFNAMSIZ, name, i);
  803. if (!strncmp(buf, d->name, IFNAMSIZ))
  804. set_bit(i, inuse);
  805. }
  806. i = find_first_zero_bit(inuse, max_netdevices);
  807. free_page((unsigned long) inuse);
  808. }
  809. if (buf != name)
  810. snprintf(buf, IFNAMSIZ, name, i);
  811. if (!__dev_get_by_name(net, buf))
  812. return i;
  813. /* It is possible to run out of possible slots
  814. * when the name is long and there isn't enough space left
  815. * for the digits, or if all bits are used.
  816. */
  817. return -ENFILE;
  818. }
  819. /**
  820. * dev_alloc_name - allocate a name for a device
  821. * @dev: device
  822. * @name: name format string
  823. *
  824. * Passed a format string - eg "lt%d" it will try and find a suitable
  825. * id. It scans list of devices to build up a free map, then chooses
  826. * the first empty slot. The caller must hold the dev_base or rtnl lock
  827. * while allocating the name and adding the device in order to avoid
  828. * duplicates.
  829. * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
  830. * Returns the number of the unit assigned or a negative errno code.
  831. */
  832. int dev_alloc_name(struct net_device *dev, const char *name)
  833. {
  834. char buf[IFNAMSIZ];
  835. struct net *net;
  836. int ret;
  837. BUG_ON(!dev_net(dev));
  838. net = dev_net(dev);
  839. ret = __dev_alloc_name(net, name, buf);
  840. if (ret >= 0)
  841. strlcpy(dev->name, buf, IFNAMSIZ);
  842. return ret;
  843. }
  844. EXPORT_SYMBOL(dev_alloc_name);
  845. static int dev_get_valid_name(struct net_device *dev, const char *name)
  846. {
  847. struct net *net;
  848. BUG_ON(!dev_net(dev));
  849. net = dev_net(dev);
  850. if (!dev_valid_name(name))
  851. return -EINVAL;
  852. if (strchr(name, '%'))
  853. return dev_alloc_name(dev, name);
  854. else if (__dev_get_by_name(net, name))
  855. return -EEXIST;
  856. else if (dev->name != name)
  857. strlcpy(dev->name, name, IFNAMSIZ);
  858. return 0;
  859. }
  860. /**
  861. * dev_change_name - change name of a device
  862. * @dev: device
  863. * @newname: name (or format string) must be at least IFNAMSIZ
  864. *
  865. * Change name of a device, can pass format strings "eth%d".
  866. * for wildcarding.
  867. */
  868. int dev_change_name(struct net_device *dev, const char *newname)
  869. {
  870. char oldname[IFNAMSIZ];
  871. int err = 0;
  872. int ret;
  873. struct net *net;
  874. ASSERT_RTNL();
  875. BUG_ON(!dev_net(dev));
  876. net = dev_net(dev);
  877. if (dev->flags & IFF_UP)
  878. return -EBUSY;
  879. if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
  880. return 0;
  881. memcpy(oldname, dev->name, IFNAMSIZ);
  882. err = dev_get_valid_name(dev, newname);
  883. if (err < 0)
  884. return err;
  885. rollback:
  886. ret = device_rename(&dev->dev, dev->name);
  887. if (ret) {
  888. memcpy(dev->name, oldname, IFNAMSIZ);
  889. return ret;
  890. }
  891. write_lock_bh(&dev_base_lock);
  892. hlist_del_rcu(&dev->name_hlist);
  893. write_unlock_bh(&dev_base_lock);
  894. synchronize_rcu();
  895. write_lock_bh(&dev_base_lock);
  896. hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
  897. write_unlock_bh(&dev_base_lock);
  898. ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
  899. ret = notifier_to_errno(ret);
  900. if (ret) {
  901. /* err >= 0 after dev_alloc_name() or stores the first errno */
  902. if (err >= 0) {
  903. err = ret;
  904. memcpy(dev->name, oldname, IFNAMSIZ);
  905. goto rollback;
  906. } else {
  907. printk(KERN_ERR
  908. "%s: name change rollback failed: %d.\n",
  909. dev->name, ret);
  910. }
  911. }
  912. return err;
  913. }
  914. /**
  915. * dev_set_alias - change ifalias of a device
  916. * @dev: device
  917. * @alias: name up to IFALIASZ
  918. * @len: limit of bytes to copy from info
  919. *
  920. * Set ifalias for a device,
  921. */
  922. int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
  923. {
  924. ASSERT_RTNL();
  925. if (len >= IFALIASZ)
  926. return -EINVAL;
  927. if (!len) {
  928. if (dev->ifalias) {
  929. kfree(dev->ifalias);
  930. dev->ifalias = NULL;
  931. }
  932. return 0;
  933. }
  934. dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
  935. if (!dev->ifalias)
  936. return -ENOMEM;
  937. strlcpy(dev->ifalias, alias, len+1);
  938. return len;
  939. }
  940. /**
  941. * netdev_features_change - device changes features
  942. * @dev: device to cause notification
  943. *
  944. * Called to indicate a device has changed features.
  945. */
  946. void netdev_features_change(struct net_device *dev)
  947. {
  948. call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
  949. }
  950. EXPORT_SYMBOL(netdev_features_change);
  951. /**
  952. * netdev_state_change - device changes state
  953. * @dev: device to cause notification
  954. *
  955. * Called to indicate a device has changed state. This function calls
  956. * the notifier chains for netdev_chain and sends a NEWLINK message
  957. * to the routing socket.
  958. */
  959. void netdev_state_change(struct net_device *dev)
  960. {
  961. if (dev->flags & IFF_UP) {
  962. call_netdevice_notifiers(NETDEV_CHANGE, dev);
  963. rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
  964. }
  965. }
  966. EXPORT_SYMBOL(netdev_state_change);
  967. int netdev_bonding_change(struct net_device *dev, unsigned long event)
  968. {
  969. return call_netdevice_notifiers(event, dev);
  970. }
  971. EXPORT_SYMBOL(netdev_bonding_change);
  972. /**
  973. * dev_load - load a network module
  974. * @net: the applicable net namespace
  975. * @name: name of interface
  976. *
  977. * If a network interface is not present and the process has suitable
  978. * privileges this function loads the module. If module loading is not
  979. * available in this kernel then it becomes a nop.
  980. */
  981. void dev_load(struct net *net, const char *name)
  982. {
  983. struct net_device *dev;
  984. int no_module;
  985. rcu_read_lock();
  986. dev = dev_get_by_name_rcu(net, name);
  987. rcu_read_unlock();
  988. no_module = !dev;
  989. if (no_module && capable(CAP_NET_ADMIN))
  990. no_module = request_module("netdev-%s", name);
  991. if (no_module && capable(CAP_SYS_MODULE)) {
  992. if (!request_module("%s", name))
  993. pr_err("Loading kernel module for a network device "
  994. "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
  995. "instead\n", name);
  996. }
  997. }
  998. EXPORT_SYMBOL(dev_load);
  999. static int __dev_open(struct net_device *dev)
  1000. {
  1001. const struct net_device_ops *ops = dev->netdev_ops;
  1002. int ret;
  1003. ASSERT_RTNL();
  1004. if (!netif_device_present(dev))
  1005. return -ENODEV;
  1006. ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
  1007. ret = notifier_to_errno(ret);
  1008. if (ret)
  1009. return ret;
  1010. set_bit(__LINK_STATE_START, &dev->state);
  1011. if (ops->ndo_validate_addr)
  1012. ret = ops->ndo_validate_addr(dev);
  1013. if (!ret && ops->ndo_open)
  1014. ret = ops->ndo_open(dev);
  1015. if (ret)
  1016. clear_bit(__LINK_STATE_START, &dev->state);
  1017. else {
  1018. dev->flags |= IFF_UP;
  1019. net_dmaengine_get();
  1020. dev_set_rx_mode(dev);
  1021. dev_activate(dev);
  1022. }
  1023. return ret;
  1024. }
  1025. /**
  1026. * dev_open - prepare an interface for use.
  1027. * @dev: device to open
  1028. *
  1029. * Takes a device from down to up state. The device's private open
  1030. * function is invoked and then the multicast lists are loaded. Finally
  1031. * the device is moved into the up state and a %NETDEV_UP message is
  1032. * sent to the netdev notifier chain.
  1033. *
  1034. * Calling this function on an active interface is a nop. On a failure
  1035. * a negative errno code is returned.
  1036. */
  1037. int dev_open(struct net_device *dev)
  1038. {
  1039. int ret;
  1040. if (dev->flags & IFF_UP)
  1041. return 0;
  1042. ret = __dev_open(dev);
  1043. if (ret < 0)
  1044. return ret;
  1045. rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
  1046. call_netdevice_notifiers(NETDEV_UP, dev);
  1047. return ret;
  1048. }
  1049. EXPORT_SYMBOL(dev_open);
  1050. static int __dev_close_many(struct list_head *head)
  1051. {
  1052. struct net_device *dev;
  1053. ASSERT_RTNL();
  1054. might_sleep();
  1055. list_for_each_entry(dev, head, unreg_list) {
  1056. call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
  1057. clear_bit(__LINK_STATE_START, &dev->state);
  1058. /* Synchronize to scheduled poll. We cannot touch poll list, it
  1059. * can be even on different cpu. So just clear netif_running().
  1060. *
  1061. * dev->stop() will invoke napi_disable() on all of it's
  1062. * napi_struct instances on this device.
  1063. */
  1064. smp_mb__after_clear_bit(); /* Commit netif_running(). */
  1065. }
  1066. dev_deactivate_many(head);
  1067. list_for_each_entry(dev, head, unreg_list) {
  1068. const struct net_device_ops *ops = dev->netdev_ops;
  1069. /*
  1070. * Call the device specific close. This cannot fail.
  1071. * Only if device is UP
  1072. *
  1073. * We allow it to be called even after a DETACH hot-plug
  1074. * event.
  1075. */
  1076. if (ops->ndo_stop)
  1077. ops->ndo_stop(dev);
  1078. dev->flags &= ~IFF_UP;
  1079. net_dmaengine_put();
  1080. }
  1081. return 0;
  1082. }
  1083. static int __dev_close(struct net_device *dev)
  1084. {
  1085. int retval;
  1086. LIST_HEAD(single);
  1087. list_add(&dev->unreg_list, &single);
  1088. retval = __dev_close_many(&single);
  1089. list_del(&single);
  1090. return retval;
  1091. }
  1092. static int dev_close_many(struct list_head *head)
  1093. {
  1094. struct net_device *dev, *tmp;
  1095. LIST_HEAD(tmp_list);
  1096. list_for_each_entry_safe(dev, tmp, head, unreg_list)
  1097. if (!(dev->flags & IFF_UP))
  1098. list_move(&dev->unreg_list, &tmp_list);
  1099. __dev_close_many(head);
  1100. list_for_each_entry(dev, head, unreg_list) {
  1101. rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
  1102. call_netdevice_notifiers(NETDEV_DOWN, dev);
  1103. }
  1104. /* rollback_registered_many needs the complete original list */
  1105. list_splice(&tmp_list, head);
  1106. return 0;
  1107. }
  1108. /**
  1109. * dev_close - shutdown an interface.
  1110. * @dev: device to shutdown
  1111. *
  1112. * This function moves an active device into down state. A
  1113. * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
  1114. * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
  1115. * chain.
  1116. */
  1117. int dev_close(struct net_device *dev)
  1118. {
  1119. if (dev->flags & IFF_UP) {
  1120. LIST_HEAD(single);
  1121. list_add(&dev->unreg_list, &single);
  1122. dev_close_many(&single);
  1123. list_del(&single);
  1124. }
  1125. return 0;
  1126. }
  1127. EXPORT_SYMBOL(dev_close);
  1128. /**
  1129. * dev_disable_lro - disable Large Receive Offload on a device
  1130. * @dev: device
  1131. *
  1132. * Disable Large Receive Offload (LRO) on a net device. Must be
  1133. * called under RTNL. This is needed if received packets may be
  1134. * forwarded to another interface.
  1135. */
  1136. void dev_disable_lro(struct net_device *dev)
  1137. {
  1138. u32 flags;
  1139. /*
  1140. * If we're trying to disable lro on a vlan device
  1141. * use the underlying physical device instead
  1142. */
  1143. if (is_vlan_dev(dev))
  1144. dev = vlan_dev_real_dev(dev);
  1145. if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
  1146. flags = dev->ethtool_ops->get_flags(dev);
  1147. else
  1148. flags = ethtool_op_get_flags(dev);
  1149. if (!(flags & ETH_FLAG_LRO))
  1150. return;
  1151. __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
  1152. if (unlikely(dev->features & NETIF_F_LRO))
  1153. netdev_WARN(dev, "failed to disable LRO!\n");
  1154. }
  1155. EXPORT_SYMBOL(dev_disable_lro);
  1156. static int dev_boot_phase = 1;
  1157. /**
  1158. * register_netdevice_notifier - register a network notifier block
  1159. * @nb: notifier
  1160. *
  1161. * Register a notifier to be called when network device events occur.
  1162. * The notifier passed is linked into the kernel structures and must
  1163. * not be reused until it has been unregistered. A negative errno code
  1164. * is returned on a failure.
  1165. *
  1166. * When registered all registration and up events are replayed
  1167. * to the new notifier to allow device to have a race free
  1168. * view of the network device list.
  1169. */
  1170. int register_netdevice_notifier(struct notifier_block *nb)
  1171. {
  1172. struct net_device *dev;
  1173. struct net_device *last;
  1174. struct net *net;
  1175. int err;
  1176. rtnl_lock();
  1177. err = raw_notifier_chain_register(&netdev_chain, nb);
  1178. if (err)
  1179. goto unlock;
  1180. if (dev_boot_phase)
  1181. goto unlock;
  1182. for_each_net(net) {
  1183. for_each_netdev(net, dev) {
  1184. err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
  1185. err = notifier_to_errno(err);
  1186. if (err)
  1187. goto rollback;
  1188. if (!(dev->flags & IFF_UP))
  1189. continue;
  1190. nb->notifier_call(nb, NETDEV_UP, dev);
  1191. }
  1192. }
  1193. unlock:
  1194. rtnl_unlock();
  1195. return err;
  1196. rollback:
  1197. last = dev;
  1198. for_each_net(net) {
  1199. for_each_netdev(net, dev) {
  1200. if (dev == last)
  1201. break;
  1202. if (dev->flags & IFF_UP) {
  1203. nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
  1204. nb->notifier_call(nb, NETDEV_DOWN, dev);
  1205. }
  1206. nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
  1207. nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
  1208. }
  1209. }
  1210. raw_notifier_chain_unregister(&netdev_chain, nb);
  1211. goto unlock;
  1212. }
  1213. EXPORT_SYMBOL(register_netdevice_notifier);
  1214. /**
  1215. * unregister_netdevice_notifier - unregister a network notifier block
  1216. * @nb: notifier
  1217. *
  1218. * Unregister a notifier previously registered by
  1219. * register_netdevice_notifier(). The notifier is unlinked into the
  1220. * kernel structures and may then be reused. A negative errno code
  1221. * is returned on a failure.
  1222. */
  1223. int unregister_netdevice_notifier(struct notifier_block *nb)
  1224. {
  1225. int err;
  1226. rtnl_lock();
  1227. err = raw_notifier_chain_unregister(&netdev_chain, nb);
  1228. rtnl_unlock();
  1229. return err;
  1230. }
  1231. EXPORT_SYMBOL(unregister_netdevice_notifier);
  1232. /**
  1233. * call_netdevice_notifiers - call all network notifier blocks
  1234. * @val: value passed unmodified to notifier function
  1235. * @dev: net_device pointer passed unmodified to notifier function
  1236. *
  1237. * Call all network notifier blocks. Parameters and return value
  1238. * are as for raw_notifier_call_chain().
  1239. */
  1240. int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
  1241. {
  1242. ASSERT_RTNL();
  1243. return raw_notifier_call_chain(&netdev_chain, val, dev);
  1244. }
  1245. EXPORT_SYMBOL(call_netdevice_notifiers);
  1246. /* When > 0 there are consumers of rx skb time stamps */
  1247. static atomic_t netstamp_needed = ATOMIC_INIT(0);
  1248. void net_enable_timestamp(void)
  1249. {
  1250. atomic_inc(&netstamp_needed);
  1251. }
  1252. EXPORT_SYMBOL(net_enable_timestamp);
  1253. void net_disable_timestamp(void)
  1254. {
  1255. atomic_dec(&netstamp_needed);
  1256. }
  1257. EXPORT_SYMBOL(net_disable_timestamp);
  1258. static inline void net_timestamp_set(struct sk_buff *skb)
  1259. {
  1260. if (atomic_read(&netstamp_needed))
  1261. __net_timestamp(skb);
  1262. else
  1263. skb->tstamp.tv64 = 0;
  1264. }
  1265. static inline void net_timestamp_check(struct sk_buff *skb)
  1266. {
  1267. if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
  1268. __net_timestamp(skb);
  1269. }
  1270. static int net_hwtstamp_validate(struct ifreq *ifr)
  1271. {
  1272. struct hwtstamp_config cfg;
  1273. enum hwtstamp_tx_types tx_type;
  1274. enum hwtstamp_rx_filters rx_filter;
  1275. int tx_type_valid = 0;
  1276. int rx_filter_valid = 0;
  1277. if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
  1278. return -EFAULT;
  1279. if (cfg.flags) /* reserved for future extensions */
  1280. return -EINVAL;
  1281. tx_type = cfg.tx_type;
  1282. rx_filter = cfg.rx_filter;
  1283. switch (tx_type) {
  1284. case HWTSTAMP_TX_OFF:
  1285. case HWTSTAMP_TX_ON:
  1286. case HWTSTAMP_TX_ONESTEP_SYNC:
  1287. tx_type_valid = 1;
  1288. break;
  1289. }
  1290. switch (rx_filter) {
  1291. case HWTSTAMP_FILTER_NONE:
  1292. case HWTSTAMP_FILTER_ALL:
  1293. case HWTSTAMP_FILTER_SOME:
  1294. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  1295. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  1296. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  1297. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  1298. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  1299. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  1300. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  1301. case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
  1302. case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
  1303. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  1304. case HWTSTAMP_FILTER_PTP_V2_SYNC:
  1305. case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
  1306. rx_filter_valid = 1;
  1307. break;
  1308. }
  1309. if (!tx_type_valid || !rx_filter_valid)
  1310. return -ERANGE;
  1311. return 0;
  1312. }
  1313. static inline bool is_skb_forwardable(struct net_device *dev,
  1314. struct sk_buff *skb)
  1315. {
  1316. unsigned int len;
  1317. if (!(dev->flags & IFF_UP))
  1318. return false;
  1319. len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
  1320. if (skb->len <= len)
  1321. return true;
  1322. /* if TSO is enabled, we don't care about the length as the packet
  1323. * could be forwarded without being segmented before
  1324. */
  1325. if (skb_is_gso(skb))
  1326. return true;
  1327. return false;
  1328. }
  1329. /**
  1330. * dev_forward_skb - loopback an skb to another netif
  1331. *
  1332. * @dev: destination network device
  1333. * @skb: buffer to forward
  1334. *
  1335. * return values:
  1336. * NET_RX_SUCCESS (no congestion)
  1337. * NET_RX_DROP (packet was dropped, but freed)
  1338. *
  1339. * dev_forward_skb can be used for injecting an skb from the
  1340. * start_xmit function of one device into the receive queue
  1341. * of another device.
  1342. *
  1343. * The receiving device may be in another namespace, so
  1344. * we have to clear all information in the skb that could
  1345. * impact namespace isolation.
  1346. */
  1347. int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
  1348. {
  1349. if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
  1350. if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
  1351. atomic_long_inc(&dev->rx_dropped);
  1352. kfree_skb(skb);
  1353. return NET_RX_DROP;
  1354. }
  1355. }
  1356. skb_orphan(skb);
  1357. nf_reset(skb);
  1358. if (unlikely(!is_skb_forwardable(dev, skb))) {
  1359. atomic_long_inc(&dev->rx_dropped);
  1360. kfree_skb(skb);
  1361. return NET_RX_DROP;
  1362. }
  1363. skb_set_dev(skb, dev);
  1364. skb->tstamp.tv64 = 0;
  1365. skb->pkt_type = PACKET_HOST;
  1366. skb->protocol = eth_type_trans(skb, dev);
  1367. return netif_rx(skb);
  1368. }
  1369. EXPORT_SYMBOL_GPL(dev_forward_skb);
  1370. static inline int deliver_skb(struct sk_buff *skb,
  1371. struct packet_type *pt_prev,
  1372. struct net_device *orig_dev)
  1373. {
  1374. atomic_inc(&skb->users);
  1375. return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  1376. }
  1377. /*
  1378. * Support routine. Sends outgoing frames to any network
  1379. * taps currently in use.
  1380. */
  1381. static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  1382. {
  1383. struct packet_type *ptype;
  1384. struct sk_buff *skb2 = NULL;
  1385. struct packet_type *pt_prev = NULL;
  1386. rcu_read_lock();
  1387. list_for_each_entry_rcu(ptype, &ptype_all, list) {
  1388. /* Never send packets back to the socket
  1389. * they originated from - MvS (miquels@drinkel.ow.org)
  1390. */
  1391. if ((ptype->dev == dev || !ptype->dev) &&
  1392. (ptype->af_packet_priv == NULL ||
  1393. (struct sock *)ptype->af_packet_priv != skb->sk)) {
  1394. if (pt_prev) {
  1395. deliver_skb(skb2, pt_prev, skb->dev);
  1396. pt_prev = ptype;
  1397. continue;
  1398. }
  1399. skb2 = skb_clone(skb, GFP_ATOMIC);
  1400. if (!skb2)
  1401. break;
  1402. net_timestamp_set(skb2);
  1403. /* skb->nh should be correctly
  1404. set by sender, so that the second statement is
  1405. just protection against buggy protocols.
  1406. */
  1407. skb_reset_mac_header(skb2);
  1408. if (skb_network_header(skb2) < skb2->data ||
  1409. skb2->network_header > skb2->tail) {
  1410. if (net_ratelimit())
  1411. printk(KERN_CRIT "protocol %04x is "
  1412. "buggy, dev %s\n",
  1413. ntohs(skb2->protocol),
  1414. dev->name);
  1415. skb_reset_network_header(skb2);
  1416. }
  1417. skb2->transport_header = skb2->network_header;
  1418. skb2->pkt_type = PACKET_OUTGOING;
  1419. pt_prev = ptype;
  1420. }
  1421. }
  1422. if (pt_prev)
  1423. pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
  1424. rcu_read_unlock();
  1425. }
  1426. /* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
  1427. * @dev: Network device
  1428. * @txq: number of queues available
  1429. *
  1430. * If real_num_tx_queues is changed the tc mappings may no longer be
  1431. * valid. To resolve this verify the tc mapping remains valid and if
  1432. * not NULL the mapping. With no priorities mapping to this
  1433. * offset/count pair it will no longer be used. In the worst case TC0
  1434. * is invalid nothing can be done so disable priority mappings. If is
  1435. * expected that drivers will fix this mapping if they can before
  1436. * calling netif_set_real_num_tx_queues.
  1437. */
  1438. static void netif_setup_tc(struct net_device *dev, unsigned int txq)
  1439. {
  1440. int i;
  1441. struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
  1442. /* If TC0 is invalidated disable TC mapping */
  1443. if (tc->offset + tc->count > txq) {
  1444. pr_warning("Number of in use tx queues changed "
  1445. "invalidating tc mappings. Priority "
  1446. "traffic classification disabled!\n");
  1447. dev->num_tc = 0;
  1448. return;
  1449. }
  1450. /* Invalidated prio to tc mappings set to TC0 */
  1451. for (i = 1; i < TC_BITMASK + 1; i++) {
  1452. int q = netdev_get_prio_tc_map(dev, i);
  1453. tc = &dev->tc_to_txq[q];
  1454. if (tc->offset + tc->count > txq) {
  1455. pr_warning("Number of in use tx queues "
  1456. "changed. Priority %i to tc "
  1457. "mapping %i is no longer valid "
  1458. "setting map to 0\n",
  1459. i, q);
  1460. netdev_set_prio_tc_map(dev, i, 0);
  1461. }
  1462. }
  1463. }
  1464. /*
  1465. * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
  1466. * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
  1467. */
  1468. int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
  1469. {
  1470. int rc;
  1471. if (txq < 1 || txq > dev->num_tx_queues)
  1472. return -EINVAL;
  1473. if (dev->reg_state == NETREG_REGISTERED ||
  1474. dev->reg_state == NETREG_UNREGISTERING) {
  1475. ASSERT_RTNL();
  1476. rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
  1477. txq);
  1478. if (rc)
  1479. return rc;
  1480. if (dev->num_tc)
  1481. netif_setup_tc(dev, txq);
  1482. if (txq < dev->real_num_tx_queues)
  1483. qdisc_reset_all_tx_gt(dev, txq);
  1484. }
  1485. dev->real_num_tx_queues = txq;
  1486. return 0;
  1487. }
  1488. EXPORT_SYMBOL(netif_set_real_num_tx_queues);
  1489. #ifdef CONFIG_RPS
  1490. /**
  1491. * netif_set_real_num_rx_queues - set actual number of RX queues used
  1492. * @dev: Network device
  1493. * @rxq: Actual number of RX queues
  1494. *
  1495. * This must be called either with the rtnl_lock held or before
  1496. * registration of the net device. Returns 0 on success, or a
  1497. * negative error code. If called before registration, it always
  1498. * succeeds.
  1499. */
  1500. int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
  1501. {
  1502. int rc;
  1503. if (rxq < 1 || rxq > dev->num_rx_queues)
  1504. return -EINVAL;
  1505. if (dev->reg_state == NETREG_REGISTERED) {
  1506. ASSERT_RTNL();
  1507. rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
  1508. rxq);
  1509. if (rc)
  1510. return rc;
  1511. }
  1512. dev->real_num_rx_queues = rxq;
  1513. return 0;
  1514. }
  1515. EXPORT_SYMBOL(netif_set_real_num_rx_queues);
  1516. #endif
  1517. static inline void __netif_reschedule(struct Qdisc *q)
  1518. {
  1519. struct softnet_data *sd;
  1520. unsigned long flags;
  1521. local_irq_save(flags);
  1522. sd = &__get_cpu_var(softnet_data);
  1523. q->next_sched = NULL;
  1524. *sd->output_queue_tailp = q;
  1525. sd->output_queue_tailp = &q->next_sched;
  1526. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  1527. local_irq_restore(flags);
  1528. }
  1529. void __netif_schedule(struct Qdisc *q)
  1530. {
  1531. if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
  1532. __netif_reschedule(q);
  1533. }
  1534. EXPORT_SYMBOL(__netif_schedule);
  1535. void dev_kfree_skb_irq(struct sk_buff *skb)
  1536. {
  1537. if (atomic_dec_and_test(&skb->users)) {
  1538. struct softnet_data *sd;
  1539. unsigned long flags;
  1540. local_irq_save(flags);
  1541. sd = &__get_cpu_var(softnet_data);
  1542. skb->next = sd->completion_queue;
  1543. sd->completion_queue = skb;
  1544. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  1545. local_irq_restore(flags);
  1546. }
  1547. }
  1548. EXPORT_SYMBOL(dev_kfree_skb_irq);
  1549. void dev_kfree_skb_any(struct sk_buff *skb)
  1550. {
  1551. if (in_irq() || irqs_disabled())
  1552. dev_kfree_skb_irq(skb);
  1553. else
  1554. dev_kfree_skb(skb);
  1555. }
  1556. EXPORT_SYMBOL(dev_kfree_skb_any);
  1557. /**
  1558. * netif_device_detach - mark device as removed
  1559. * @dev: network device
  1560. *
  1561. * Mark device as removed from system and therefore no longer available.
  1562. */
  1563. void netif_device_detach(struct net_device *dev)
  1564. {
  1565. if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
  1566. netif_running(dev)) {
  1567. netif_tx_stop_all_queues(dev);
  1568. }
  1569. }
  1570. EXPORT_SYMBOL(netif_device_detach);
  1571. /**
  1572. * netif_device_attach - mark device as attached
  1573. * @dev: network device
  1574. *
  1575. * Mark device as attached from system and restart if needed.
  1576. */
  1577. void netif_device_attach(struct net_device *dev)
  1578. {
  1579. if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
  1580. netif_running(dev)) {
  1581. netif_tx_wake_all_queues(dev);
  1582. __netdev_watchdog_up(dev);
  1583. }
  1584. }
  1585. EXPORT_SYMBOL(netif_device_attach);
  1586. /**
  1587. * skb_dev_set -- assign a new device to a buffer
  1588. * @skb: buffer for the new device
  1589. * @dev: network device
  1590. *
  1591. * If an skb is owned by a device already, we have to reset
  1592. * all data private to the namespace a device belongs to
  1593. * before assigning it a new device.
  1594. */
  1595. #ifdef CONFIG_NET_NS
  1596. void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
  1597. {
  1598. skb_dst_drop(skb);
  1599. if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
  1600. secpath_reset(skb);
  1601. nf_reset(skb);
  1602. skb_init_secmark(skb);
  1603. skb->mark = 0;
  1604. skb->priority = 0;
  1605. skb->nf_trace = 0;
  1606. skb->ipvs_property = 0;
  1607. #ifdef CONFIG_NET_SCHED
  1608. skb->tc_index = 0;
  1609. #endif
  1610. }
  1611. skb->dev = dev;
  1612. }
  1613. EXPORT_SYMBOL(skb_set_dev);
  1614. #endif /* CONFIG_NET_NS */
  1615. /*
  1616. * Invalidate hardware checksum when packet is to be mangled, and
  1617. * complete checksum manually on outgoing path.
  1618. */
  1619. int skb_checksum_help(struct sk_buff *skb)
  1620. {
  1621. __wsum csum;
  1622. int ret = 0, offset;
  1623. if (skb->ip_summed == CHECKSUM_COMPLETE)
  1624. goto out_set_summed;
  1625. if (unlikely(skb_shinfo(skb)->gso_size)) {
  1626. /* Let GSO fix up the checksum. */
  1627. goto out_set_summed;
  1628. }
  1629. offset = skb_checksum_start_offset(skb);
  1630. BUG_ON(offset >= skb_headlen(skb));
  1631. csum = skb_checksum(skb, offset, skb->len - offset, 0);
  1632. offset += skb->csum_offset;
  1633. BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
  1634. if (skb_cloned(skb) &&
  1635. !skb_clone_writable(skb, offset + sizeof(__sum16))) {
  1636. ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  1637. if (ret)
  1638. goto out;
  1639. }
  1640. *(__sum16 *)(skb->data + offset) = csum_fold(csum);
  1641. out_set_summed:
  1642. skb->ip_summed = CHECKSUM_NONE;
  1643. out:
  1644. return ret;
  1645. }
  1646. EXPORT_SYMBOL(skb_checksum_help);
  1647. /**
  1648. * skb_gso_segment - Perform segmentation on skb.
  1649. * @skb: buffer to segment
  1650. * @features: features for the output path (see dev->features)
  1651. *
  1652. * This function segments the given skb and returns a list of segments.
  1653. *
  1654. * It may return NULL if the skb requires no segmentation. This is
  1655. * only possible when GSO is used for verifying header integrity.
  1656. */
  1657. struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
  1658. {
  1659. struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
  1660. struct packet_type *ptype;
  1661. __be16 type = skb->protocol;
  1662. int vlan_depth = ETH_HLEN;
  1663. int err;
  1664. while (type == htons(ETH_P_8021Q)) {
  1665. struct vlan_hdr *vh;
  1666. if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
  1667. return ERR_PTR(-EINVAL);
  1668. vh = (struct vlan_hdr *)(skb->data + vlan_depth);
  1669. type = vh->h_vlan_encapsulated_proto;
  1670. vlan_depth += VLAN_HLEN;
  1671. }
  1672. skb_reset_mac_header(skb);
  1673. skb->mac_len = skb->network_header - skb->mac_header;
  1674. __skb_pull(skb, skb->mac_len);
  1675. if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
  1676. struct net_device *dev = skb->dev;
  1677. struct ethtool_drvinfo info = {};
  1678. if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
  1679. dev->ethtool_ops->get_drvinfo(dev, &info);
  1680. WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
  1681. info.driver, dev ? dev->features : 0L,
  1682. skb->sk ? skb->sk->sk_route_caps : 0L,
  1683. skb->len, skb->data_len, skb->ip_summed);
  1684. if (skb_header_cloned(skb) &&
  1685. (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
  1686. return ERR_PTR(err);
  1687. }
  1688. rcu_read_lock();
  1689. list_for_each_entry_rcu(ptype,
  1690. &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
  1691. if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
  1692. if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
  1693. err = ptype->gso_send_check(skb);
  1694. segs = ERR_PTR(err);
  1695. if (err || skb_gso_ok(skb, features))
  1696. break;
  1697. __skb_push(skb, (skb->data -
  1698. skb_network_header(skb)));
  1699. }
  1700. segs = ptype->gso_segment(skb, features);
  1701. break;
  1702. }
  1703. }
  1704. rcu_read_unlock();
  1705. __skb_push(skb, skb->data - skb_mac_header(skb));
  1706. return segs;
  1707. }
  1708. EXPORT_SYMBOL(skb_gso_segment);
  1709. /* Take action when hardware reception checksum errors are detected. */
  1710. #ifdef CONFIG_BUG
  1711. void netdev_rx_csum_fault(struct net_device *dev)
  1712. {
  1713. if (net_ratelimit()) {
  1714. printk(KERN_ERR "%s: hw csum failure.\n",
  1715. dev ? dev->name : "<unknown>");
  1716. dump_stack();
  1717. }
  1718. }
  1719. EXPORT_SYMBOL(netdev_rx_csum_fault);
  1720. #endif
  1721. /* Actually, we should eliminate this check as soon as we know, that:
  1722. * 1. IOMMU is present and allows to map all the memory.
  1723. * 2. No high memory really exists on this machine.
  1724. */
  1725. static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
  1726. {
  1727. #ifdef CONFIG_HIGHMEM
  1728. int i;
  1729. if (!(dev->features & NETIF_F_HIGHDMA)) {
  1730. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1731. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1732. if (PageHighMem(skb_frag_page(frag)))
  1733. return 1;
  1734. }
  1735. }
  1736. if (PCI_DMA_BUS_IS_PHYS) {
  1737. struct device *pdev = dev->dev.parent;
  1738. if (!pdev)
  1739. return 0;
  1740. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1741. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1742. dma_addr_t addr = page_to_phys(skb_frag_page(frag));
  1743. if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
  1744. return 1;
  1745. }
  1746. }
  1747. #endif
  1748. return 0;
  1749. }
  1750. struct dev_gso_cb {
  1751. void (*destructor)(struct sk_buff *skb);
  1752. };
  1753. #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
  1754. static void dev_gso_skb_destructor(struct sk_buff *skb)
  1755. {
  1756. struct dev_gso_cb *cb;
  1757. do {
  1758. struct sk_buff *nskb = skb->next;
  1759. skb->next = nskb->next;
  1760. nskb->next = NULL;
  1761. kfree_skb(nskb);
  1762. } while (skb->next);
  1763. cb = DEV_GSO_CB(skb);
  1764. if (cb->destructor)
  1765. cb->destructor(skb);
  1766. }
  1767. /**
  1768. * dev_gso_segment - Perform emulated hardware segmentation on skb.
  1769. * @skb: buffer to segment
  1770. * @features: device features as applicable to this skb
  1771. *
  1772. * This function segments the given skb and stores the list of segments
  1773. * in skb->next.
  1774. */
  1775. static int dev_gso_segment(struct sk_buff *skb, int features)
  1776. {
  1777. struct sk_buff *segs;
  1778. segs = skb_gso_segment(skb, features);
  1779. /* Verifying header integrity only. */
  1780. if (!segs)
  1781. return 0;
  1782. if (IS_ERR(segs))
  1783. return PTR_ERR(segs);
  1784. skb->next = segs;
  1785. DEV_GSO_CB(skb)->destructor = skb->destructor;
  1786. skb->destructor = dev_gso_skb_destructor;
  1787. return 0;
  1788. }
  1789. /*
  1790. * Try to orphan skb early, right before transmission by the device.
  1791. * We cannot orphan skb if tx timestamp is requested or the sk-reference
  1792. * is needed on driver level for other reasons, e.g. see net/can/raw.c
  1793. */
  1794. static inline void skb_orphan_try(struct sk_buff *skb)
  1795. {
  1796. struct sock *sk = skb->sk;
  1797. if (sk && !skb_shinfo(skb)->tx_flags) {
  1798. /* skb_tx_hash() wont be able to get sk.
  1799. * We copy sk_hash into skb->rxhash
  1800. */
  1801. if (!skb->rxhash)
  1802. skb->rxhash = sk->sk_hash;
  1803. skb_orphan(skb);
  1804. }
  1805. }
  1806. static bool can_checksum_protocol(unsigned long features, __be16 protocol)
  1807. {
  1808. return ((features & NETIF_F_GEN_CSUM) ||
  1809. ((features & NETIF_F_V4_CSUM) &&
  1810. protocol == htons(ETH_P_IP)) ||
  1811. ((features & NETIF_F_V6_CSUM) &&
  1812. protocol == htons(ETH_P_IPV6)) ||
  1813. ((features & NETIF_F_FCOE_CRC) &&
  1814. protocol == htons(ETH_P_FCOE)));
  1815. }
  1816. static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
  1817. {
  1818. if (!can_checksum_protocol(features, protocol)) {
  1819. features &= ~NETIF_F_ALL_CSUM;
  1820. features &= ~NETIF_F_SG;
  1821. } else if (illegal_highdma(skb->dev, skb)) {
  1822. features &= ~NETIF_F_SG;
  1823. }
  1824. return features;
  1825. }
  1826. u32 netif_skb_features(struct sk_buff *skb)
  1827. {
  1828. __be16 protocol = skb->protocol;
  1829. u32 features = skb->dev->features;
  1830. if (protocol == htons(ETH_P_8021Q)) {
  1831. struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
  1832. protocol = veh->h_vlan_encapsulated_proto;
  1833. } else if (!vlan_tx_tag_present(skb)) {
  1834. return harmonize_features(skb, protocol, features);
  1835. }
  1836. features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
  1837. if (protocol != htons(ETH_P_8021Q)) {
  1838. return harmonize_features(skb, protocol, features);
  1839. } else {
  1840. features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
  1841. NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
  1842. return harmonize_features(skb, protocol, features);
  1843. }
  1844. }
  1845. EXPORT_SYMBOL(netif_skb_features);
  1846. /*
  1847. * Returns true if either:
  1848. * 1. skb has frag_list and the device doesn't support FRAGLIST, or
  1849. * 2. skb is fragmented and the device does not support SG, or if
  1850. * at least one of fragments is in highmem and device does not
  1851. * support DMA from it.
  1852. */
  1853. static inline int skb_needs_linearize(struct sk_buff *skb,
  1854. int features)
  1855. {
  1856. return skb_is_nonlinear(skb) &&
  1857. ((skb_has_frag_list(skb) &&
  1858. !(features & NETIF_F_FRAGLIST)) ||
  1859. (skb_shinfo(skb)->nr_frags &&
  1860. !(features & NETIF_F_SG)));
  1861. }
  1862. int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
  1863. struct netdev_queue *txq)
  1864. {
  1865. const struct net_device_ops *ops = dev->netdev_ops;
  1866. int rc = NETDEV_TX_OK;
  1867. unsigned int skb_len;
  1868. if (likely(!skb->next)) {
  1869. u32 features;
  1870. /*
  1871. * If device doesn't need skb->dst, release it right now while
  1872. * its hot in this cpu cache
  1873. */
  1874. if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
  1875. skb_dst_drop(skb);
  1876. if (!list_empty(&ptype_all))
  1877. dev_queue_xmit_nit(skb, dev);
  1878. skb_orphan_try(skb);
  1879. features = netif_skb_features(skb);
  1880. if (vlan_tx_tag_present(skb) &&
  1881. !(features & NETIF_F_HW_VLAN_TX)) {
  1882. skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
  1883. if (unlikely(!skb))
  1884. goto out;
  1885. skb->vlan_tci = 0;
  1886. }
  1887. if (netif_needs_gso(skb, features)) {
  1888. if (unlikely(dev_gso_segment(skb, features)))
  1889. goto out_kfree_skb;
  1890. if (skb->next)
  1891. goto gso;
  1892. } else {
  1893. if (skb_needs_linearize(skb, features) &&
  1894. __skb_linearize(skb))
  1895. goto out_kfree_skb;
  1896. /* If packet is not checksummed and device does not
  1897. * support checksumming for this protocol, complete
  1898. * checksumming here.
  1899. */
  1900. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1901. skb_set_transport_header(skb,
  1902. skb_checksum_start_offset(skb));
  1903. if (!(features & NETIF_F_ALL_CSUM) &&
  1904. skb_checksum_help(skb))
  1905. goto out_kfree_skb;
  1906. }
  1907. }
  1908. skb_len = skb->len;
  1909. rc = ops->ndo_start_xmit(skb, dev);
  1910. trace_net_dev_xmit(skb, rc, dev, skb_len);
  1911. if (rc == NETDEV_TX_OK)
  1912. txq_trans_update(txq);
  1913. return rc;
  1914. }
  1915. gso:
  1916. do {
  1917. struct sk_buff *nskb = skb->next;
  1918. skb->next = nskb->next;
  1919. nskb->next = NULL;
  1920. /*
  1921. * If device doesn't need nskb->dst, release it right now while
  1922. * its hot in this cpu cache
  1923. */
  1924. if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
  1925. skb_dst_drop(nskb);
  1926. skb_len = nskb->len;
  1927. rc = ops->ndo_start_xmit(nskb, dev);
  1928. trace_net_dev_xmit(nskb, rc, dev, skb_len);
  1929. if (unlikely(rc != NETDEV_TX_OK)) {
  1930. if (rc & ~NETDEV_TX_MASK)
  1931. goto out_kfree_gso_skb;
  1932. nskb->next = skb->next;
  1933. skb->next = nskb;
  1934. return rc;
  1935. }
  1936. txq_trans_update(txq);
  1937. if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
  1938. return NETDEV_TX_BUSY;
  1939. } while (skb->next);
  1940. out_kfree_gso_skb:
  1941. if (likely(skb->next == NULL))
  1942. skb->destructor = DEV_GSO_CB(skb)->destructor;
  1943. out_kfree_skb:
  1944. kfree_skb(skb);
  1945. out:
  1946. return rc;
  1947. }
  1948. static u32 hashrnd __read_mostly;
  1949. /*
  1950. * Returns a Tx hash based on the given packet descriptor a Tx queues' number
  1951. * to be used as a distribution range.
  1952. */
  1953. u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
  1954. unsigned int num_tx_queues)
  1955. {
  1956. u32 hash;
  1957. u16 qoffset = 0;
  1958. u16 qcount = num_tx_queues;
  1959. if (skb_rx_queue_recorded(skb)) {
  1960. hash = skb_get_rx_queue(skb);
  1961. while (unlikely(hash >= num_tx_queues))
  1962. hash -= num_tx_queues;
  1963. return hash;
  1964. }
  1965. if (dev->num_tc) {
  1966. u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
  1967. qoffset = dev->tc_to_txq[tc].offset;
  1968. qcount = dev->tc_to_txq[tc].count;
  1969. }
  1970. if (skb->sk && skb->sk->sk_hash)
  1971. hash = skb->sk->sk_hash;
  1972. else
  1973. hash = (__force u16) skb->protocol ^ skb->rxhash;
  1974. hash = jhash_1word(hash, hashrnd);
  1975. return (u16) (((u64) hash * qcount) >> 32) + qoffset;
  1976. }
  1977. EXPORT_SYMBOL(__skb_tx_hash);
  1978. static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
  1979. {
  1980. if (unlikely(queue_index >= dev->real_num_tx_queues)) {
  1981. if (net_ratelimit()) {
  1982. pr_warning("%s selects TX queue %d, but "
  1983. "real number of TX queues is %d\n",
  1984. dev->name, queue_index, dev->real_num_tx_queues);
  1985. }
  1986. return 0;
  1987. }
  1988. return queue_index;
  1989. }
  1990. static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
  1991. {
  1992. #ifdef CONFIG_XPS
  1993. struct xps_dev_maps *dev_maps;
  1994. struct xps_map *map;
  1995. int queue_index = -1;
  1996. rcu_read_lock();
  1997. dev_maps = rcu_dereference(dev->xps_maps);
  1998. if (dev_maps) {
  1999. map = rcu_dereference(
  2000. dev_maps->cpu_map[raw_smp_processor_id()]);
  2001. if (map) {
  2002. if (map->len == 1)
  2003. queue_index = map->queues[0];
  2004. else {
  2005. u32 hash;
  2006. if (skb->sk && skb->sk->sk_hash)
  2007. hash = skb->sk->sk_hash;
  2008. else
  2009. hash = (__force u16) skb->protocol ^
  2010. skb->rxhash;
  2011. hash = jhash_1word(hash, hashrnd);
  2012. queue_index = map->queues[
  2013. ((u64)hash * map->len) >> 32];
  2014. }
  2015. if (unlikely(queue_index >= dev->real_num_tx_queues))
  2016. queue_index = -1;
  2017. }
  2018. }
  2019. rcu_read_unlock();
  2020. return queue_index;
  2021. #else
  2022. return -1;
  2023. #endif
  2024. }
  2025. static struct netdev_queue *dev_pick_tx(struct net_device *dev,
  2026. struct sk_buff *skb)
  2027. {
  2028. int queue_index;
  2029. const struct net_device_ops *ops = dev->netdev_ops;
  2030. if (dev->real_num_tx_queues == 1)
  2031. queue_index = 0;
  2032. else if (ops->ndo_select_queue) {
  2033. queue_index = ops->ndo_select_queue(dev, skb);
  2034. queue_index = dev_cap_txqueue(dev, queue_index);
  2035. } else {
  2036. struct sock *sk = skb->sk;
  2037. queue_index = sk_tx_queue_get(sk);
  2038. if (queue_index < 0 || skb->ooo_okay ||
  2039. queue_index >= dev->real_num_tx_queues) {
  2040. int old_index = queue_index;
  2041. queue_index = get_xps_queue(dev, skb);
  2042. if (queue_index < 0)
  2043. queue_index = skb_tx_hash(dev, skb);
  2044. if (queue_index != old_index && sk) {
  2045. struct dst_entry *dst =
  2046. rcu_dereference_check(sk->sk_dst_cache, 1);
  2047. if (dst && skb_dst(skb) == dst)
  2048. sk_tx_queue_set(sk, queue_index);
  2049. }
  2050. }
  2051. }
  2052. skb_set_queue_mapping(skb, queue_index);
  2053. return netdev_get_tx_queue(dev, queue_index);
  2054. }
  2055. static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
  2056. struct net_device *dev,
  2057. struct netdev_queue *txq)
  2058. {
  2059. spinlock_t *root_lock = qdisc_lock(q);
  2060. bool contended;
  2061. int rc;
  2062. qdisc_skb_cb(skb)->pkt_len = skb->len;
  2063. qdisc_calculate_pkt_len(skb, q);
  2064. /*
  2065. * Heuristic to force contended enqueues to serialize on a
  2066. * separate lock before trying to get qdisc main lock.
  2067. * This permits __QDISC_STATE_RUNNING owner to get the lock more often
  2068. * and dequeue packets faster.
  2069. */
  2070. contended = qdisc_is_running(q);
  2071. if (unlikely(contended))
  2072. spin_lock(&q->busylock);
  2073. spin_lock(root_lock);
  2074. if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
  2075. kfree_skb(skb);
  2076. rc = NET_XMIT_DROP;
  2077. } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
  2078. qdisc_run_begin(q)) {
  2079. /*
  2080. * This is a work-conserving queue; there are no old skbs
  2081. * waiting to be sent out; and the qdisc is not running -
  2082. * xmit the skb directly.
  2083. */
  2084. if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
  2085. skb_dst_force(skb);
  2086. qdisc_bstats_update(q, skb);
  2087. if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
  2088. if (unlikely(contended)) {
  2089. spin_unlock(&q->busylock);
  2090. contended = false;
  2091. }
  2092. __qdisc_run(q);
  2093. } else
  2094. qdisc_run_end(q);
  2095. rc = NET_XMIT_SUCCESS;
  2096. } else {
  2097. skb_dst_force(skb);
  2098. rc = q->enqueue(skb, q) & NET_XMIT_MASK;
  2099. if (qdisc_run_begin(q)) {
  2100. if (unlikely(contended)) {
  2101. spin_unlock(&q->busylock);
  2102. contended = false;
  2103. }
  2104. __qdisc_run(q);
  2105. }
  2106. }
  2107. spin_unlock(root_lock);
  2108. if (unlikely(contended))
  2109. spin_unlock(&q->busylock);
  2110. return rc;
  2111. }
  2112. static DEFINE_PER_CPU(int, xmit_recursion);
  2113. #define RECURSION_LIMIT 10
  2114. /**
  2115. * dev_queue_xmit - transmit a buffer
  2116. * @skb: buffer to transmit
  2117. *
  2118. * Queue a buffer for transmission to a network device. The caller must
  2119. * have set the device and priority and built the buffer before calling
  2120. * this function. The function can be called from an interrupt.
  2121. *
  2122. * A negative errno code is returned on a failure. A success does not
  2123. * guarantee the frame will be transmitted as it may be dropped due
  2124. * to congestion or traffic shaping.
  2125. *
  2126. * -----------------------------------------------------------------------------------
  2127. * I notice this method can also return errors from the queue disciplines,
  2128. * including NET_XMIT_DROP, which is a positive value. So, errors can also
  2129. * be positive.
  2130. *
  2131. * Regardless of the return value, the skb is consumed, so it is currently
  2132. * difficult to retry a send to this method. (You can bump the ref count
  2133. * before sending to hold a reference for retry if you are careful.)
  2134. *
  2135. * When calling this method, interrupts MUST be enabled. This is because
  2136. * the BH enable code must have IRQs enabled so that it will not deadlock.
  2137. * --BLG
  2138. */
  2139. int dev_queue_xmit(struct sk_buff *skb)
  2140. {
  2141. struct net_device *dev = skb->dev;
  2142. struct netdev_queue *txq;
  2143. struct Qdisc *q;
  2144. int rc = -ENOMEM;
  2145. /* Disable soft irqs for various locks below. Also
  2146. * stops preemption for RCU.
  2147. */
  2148. rcu_read_lock_bh();
  2149. txq = dev_pick_tx(dev, skb);
  2150. q = rcu_dereference_bh(txq->qdisc);
  2151. #ifdef CONFIG_NET_CLS_ACT
  2152. skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
  2153. #endif
  2154. trace_net_dev_queue(skb);
  2155. if (q->enqueue) {
  2156. rc = __dev_xmit_skb(skb, q, dev, txq);
  2157. goto out;
  2158. }
  2159. /* The device has no queue. Common case for software devices:
  2160. loopback, all the sorts of tunnels...
  2161. Really, it is unlikely that netif_tx_lock protection is necessary
  2162. here. (f.e. loopback and IP tunnels are clean ignoring statistics
  2163. counters.)
  2164. However, it is possible, that they rely on protection
  2165. made by us here.
  2166. Check this and shot the lock. It is not prone from deadlocks.
  2167. Either shot noqueue qdisc, it is even simpler 8)
  2168. */
  2169. if (dev->flags & IFF_UP) {
  2170. int cpu = smp_processor_id(); /* ok because BHs are off */
  2171. if (txq->xmit_lock_owner != cpu) {
  2172. if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
  2173. goto recursion_alert;
  2174. HARD_TX_LOCK(dev, txq, cpu);
  2175. if (!netif_tx_queue_stopped(txq)) {
  2176. __this_cpu_inc(xmit_recursion);
  2177. rc = dev_hard_start_xmit(skb, dev, txq);
  2178. __this_cpu_dec(xmit_recursion);
  2179. if (dev_xmit_complete(rc)) {
  2180. HARD_TX_UNLOCK(dev, txq);
  2181. goto out;
  2182. }
  2183. }
  2184. HARD_TX_UNLOCK(dev, txq);
  2185. if (net_ratelimit())
  2186. printk(KERN_CRIT "Virtual device %s asks to "
  2187. "queue packet!\n", dev->name);
  2188. } else {
  2189. /* Recursion is detected! It is possible,
  2190. * unfortunately
  2191. */
  2192. recursion_alert:
  2193. if (net_ratelimit())
  2194. printk(KERN_CRIT "Dead loop on virtual device "
  2195. "%s, fix it urgently!\n", dev->name);
  2196. }
  2197. }
  2198. rc = -ENETDOWN;
  2199. rcu_read_unlock_bh();
  2200. kfree_skb(skb);
  2201. return rc;
  2202. out:
  2203. rcu_read_unlock_bh();
  2204. return rc;
  2205. }
  2206. EXPORT_SYMBOL(dev_queue_xmit);
  2207. /*=======================================================================
  2208. Receiver routines
  2209. =======================================================================*/
  2210. int netdev_max_backlog __read_mostly = 1000;
  2211. int netdev_tstamp_prequeue __read_mostly = 1;
  2212. int netdev_budget __read_mostly = 300;
  2213. int weight_p __read_mostly = 64; /* old backlog weight */
  2214. /* Called with irq disabled */
  2215. static inline void ____napi_schedule(struct softnet_data *sd,
  2216. struct napi_struct *napi)
  2217. {
  2218. list_add_tail(&napi->poll_list, &sd->poll_list);
  2219. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  2220. }
  2221. /*
  2222. * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
  2223. * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
  2224. * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
  2225. * if hash is a canonical 4-tuple hash over transport ports.
  2226. */
  2227. void __skb_get_rxhash(struct sk_buff *skb)
  2228. {
  2229. int nhoff, hash = 0, poff;
  2230. const struct ipv6hdr *ip6;
  2231. const struct iphdr *ip;
  2232. const struct vlan_hdr *vlan;
  2233. u8 ip_proto;
  2234. u32 addr1, addr2;
  2235. u16 proto;
  2236. union {
  2237. u32 v32;
  2238. u16 v16[2];
  2239. } ports;
  2240. nhoff = skb_network_offset(skb);
  2241. proto = skb->protocol;
  2242. again:
  2243. switch (proto) {
  2244. case __constant_htons(ETH_P_IP):
  2245. ip:
  2246. if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
  2247. goto done;
  2248. ip = (const struct iphdr *) (skb->data + nhoff);
  2249. if (ip_is_fragment(ip))
  2250. ip_proto = 0;
  2251. else
  2252. ip_proto = ip->protocol;
  2253. addr1 = (__force u32) ip->saddr;
  2254. addr2 = (__force u32) ip->daddr;
  2255. nhoff += ip->ihl * 4;
  2256. break;
  2257. case __constant_htons(ETH_P_IPV6):
  2258. ipv6:
  2259. if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
  2260. goto done;
  2261. ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
  2262. ip_proto = ip6->nexthdr;
  2263. addr1 = (__force u32) ip6->saddr.s6_addr32[3];
  2264. addr2 = (__force u32) ip6->daddr.s6_addr32[3];
  2265. nhoff += 40;
  2266. break;
  2267. case __constant_htons(ETH_P_8021Q):
  2268. if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
  2269. goto done;
  2270. vlan = (const struct vlan_hdr *) (skb->data + nhoff);
  2271. proto = vlan->h_vlan_encapsulated_proto;
  2272. nhoff += sizeof(*vlan);
  2273. goto again;
  2274. case __constant_htons(ETH_P_PPP_SES):
  2275. if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
  2276. goto done;
  2277. proto = *((__be16 *) (skb->data + nhoff +
  2278. sizeof(struct pppoe_hdr)));
  2279. nhoff += PPPOE_SES_HLEN;
  2280. switch (proto) {
  2281. case __constant_htons(PPP_IP):
  2282. goto ip;
  2283. case __constant_htons(PPP_IPV6):
  2284. goto ipv6;
  2285. default:
  2286. goto done;
  2287. }
  2288. default:
  2289. goto done;
  2290. }
  2291. switch (ip_proto) {
  2292. case IPPROTO_GRE:
  2293. if (pskb_may_pull(skb, nhoff + 16)) {
  2294. u8 *h = skb->data + nhoff;
  2295. __be16 flags = *(__be16 *)h;
  2296. /*
  2297. * Only look inside GRE if version zero and no
  2298. * routing
  2299. */
  2300. if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
  2301. proto = *(__be16 *)(h + 2);
  2302. nhoff += 4;
  2303. if (flags & GRE_CSUM)
  2304. nhoff += 4;
  2305. if (flags & GRE_KEY)
  2306. nhoff += 4;
  2307. if (flags & GRE_SEQ)
  2308. nhoff += 4;
  2309. goto again;
  2310. }
  2311. }
  2312. break;
  2313. case IPPROTO_IPIP:
  2314. goto again;
  2315. default:
  2316. break;
  2317. }
  2318. ports.v32 = 0;
  2319. poff = proto_ports_offset(ip_proto);
  2320. if (poff >= 0) {
  2321. nhoff += poff;
  2322. if (pskb_may_pull(skb, nhoff + 4)) {
  2323. ports.v32 = * (__force u32 *) (skb->data + nhoff);
  2324. if (ports.v16[1] < ports.v16[0])
  2325. swap(ports.v16[0], ports.v16[1]);
  2326. skb->l4_rxhash = 1;
  2327. }
  2328. }
  2329. /* get a consistent hash (same value on both flow directions) */
  2330. if (addr2 < addr1)
  2331. swap(addr1, addr2);
  2332. hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
  2333. if (!hash)
  2334. hash = 1;
  2335. done:
  2336. skb->rxhash = hash;
  2337. }
  2338. EXPORT_SYMBOL(__skb_get_rxhash);
  2339. #ifdef CONFIG_RPS
  2340. /* One global table that all flow-based protocols share. */
  2341. struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
  2342. EXPORT_SYMBOL(rps_sock_flow_table);
  2343. static struct rps_dev_flow *
  2344. set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
  2345. struct rps_dev_flow *rflow, u16 next_cpu)
  2346. {
  2347. if (next_cpu != RPS_NO_CPU) {
  2348. #ifdef CONFIG_RFS_ACCEL
  2349. struct netdev_rx_queue *rxqueue;
  2350. struct rps_dev_flow_table *flow_table;
  2351. struct rps_dev_flow *old_rflow;
  2352. u32 flow_id;
  2353. u16 rxq_index;
  2354. int rc;
  2355. /* Should we steer this flow to a different hardware queue? */
  2356. if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
  2357. !(dev->features & NETIF_F_NTUPLE))
  2358. goto out;
  2359. rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
  2360. if (rxq_index == skb_get_rx_queue(skb))
  2361. goto out;
  2362. rxqueue = dev->_rx + rxq_index;
  2363. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  2364. if (!flow_table)
  2365. goto out;
  2366. flow_id = skb->rxhash & flow_table->mask;
  2367. rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
  2368. rxq_index, flow_id);
  2369. if (rc < 0)
  2370. goto out;
  2371. old_rflow = rflow;
  2372. rflow = &flow_table->flows[flow_id];
  2373. rflow->filter = rc;
  2374. if (old_rflow->filter == rflow->filter)
  2375. old_rflow->filter = RPS_NO_FILTER;
  2376. out:
  2377. #endif
  2378. rflow->last_qtail =
  2379. per_cpu(softnet_data, next_cpu).input_queue_head;
  2380. }
  2381. rflow->cpu = next_cpu;
  2382. return rflow;
  2383. }
  2384. /*
  2385. * get_rps_cpu is called from netif_receive_skb and returns the target
  2386. * CPU from the RPS map of the receiving queue for a given skb.
  2387. * rcu_read_lock must be held on entry.
  2388. */
  2389. static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
  2390. struct rps_dev_flow **rflowp)
  2391. {
  2392. struct netdev_rx_queue *rxqueue;
  2393. struct rps_map *map;
  2394. struct rps_dev_flow_table *flow_table;
  2395. struct rps_sock_flow_table *sock_flow_table;
  2396. int cpu = -1;
  2397. u16 tcpu;
  2398. if (skb_rx_queue_recorded(skb)) {
  2399. u16 index = skb_get_rx_queue(skb);
  2400. if (unlikely(index >= dev->real_num_rx_queues)) {
  2401. WARN_ONCE(dev->real_num_rx_queues > 1,
  2402. "%s received packet on queue %u, but number "
  2403. "of RX queues is %u\n",
  2404. dev->name, index, dev->real_num_rx_queues);
  2405. goto done;
  2406. }
  2407. rxqueue = dev->_rx + index;
  2408. } else
  2409. rxqueue = dev->_rx;
  2410. map = rcu_dereference(rxqueue->rps_map);
  2411. if (map) {
  2412. if (map->len == 1 &&
  2413. !rcu_access_pointer(rxqueue->rps_flow_table)) {
  2414. tcpu = map->cpus[0];
  2415. if (cpu_online(tcpu))
  2416. cpu = tcpu;
  2417. goto done;
  2418. }
  2419. } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
  2420. goto done;
  2421. }
  2422. skb_reset_network_header(skb);
  2423. if (!skb_get_rxhash(skb))
  2424. goto done;
  2425. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  2426. sock_flow_table = rcu_dereference(rps_sock_flow_table);
  2427. if (flow_table && sock_flow_table) {
  2428. u16 next_cpu;
  2429. struct rps_dev_flow *rflow;
  2430. rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
  2431. tcpu = rflow->cpu;
  2432. next_cpu = sock_flow_table->ents[skb->rxhash &
  2433. sock_flow_table->mask];
  2434. /*
  2435. * If the desired CPU (where last recvmsg was done) is
  2436. * different from current CPU (one in the rx-queue flow
  2437. * table entry), switch if one of the following holds:
  2438. * - Current CPU is unset (equal to RPS_NO_CPU).
  2439. * - Current CPU is offline.
  2440. * - The current CPU's queue tail has advanced beyond the
  2441. * last packet that was enqueued using this table entry.
  2442. * This guarantees that all previous packets for the flow
  2443. * have been dequeued, thus preserving in order delivery.
  2444. */
  2445. if (unlikely(tcpu != next_cpu) &&
  2446. (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
  2447. ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
  2448. rflow->last_qtail)) >= 0))
  2449. rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
  2450. if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
  2451. *rflowp = rflow;
  2452. cpu = tcpu;
  2453. goto done;
  2454. }
  2455. }
  2456. if (map) {
  2457. tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
  2458. if (cpu_online(tcpu)) {
  2459. cpu = tcpu;
  2460. goto done;
  2461. }
  2462. }
  2463. done:
  2464. return cpu;
  2465. }
  2466. #ifdef CONFIG_RFS_ACCEL
  2467. /**
  2468. * rps_may_expire_flow - check whether an RFS hardware filter may be removed
  2469. * @dev: Device on which the filter was set
  2470. * @rxq_index: RX queue index
  2471. * @flow_id: Flow ID passed to ndo_rx_flow_steer()
  2472. * @filter_id: Filter ID returned by ndo_rx_flow_steer()
  2473. *
  2474. * Drivers that implement ndo_rx_flow_steer() should periodically call
  2475. * this function for each installed filter and remove the filters for
  2476. * which it returns %true.
  2477. */
  2478. bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
  2479. u32 flow_id, u16 filter_id)
  2480. {
  2481. struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
  2482. struct rps_dev_flow_table *flow_table;
  2483. struct rps_dev_flow *rflow;
  2484. bool expire = true;
  2485. int cpu;
  2486. rcu_read_lock();
  2487. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  2488. if (flow_table && flow_id <= flow_table->mask) {
  2489. rflow = &flow_table->flows[flow_id];
  2490. cpu = ACCESS_ONCE(rflow->cpu);
  2491. if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
  2492. ((int)(per_cpu(softnet_data, cpu).input_queue_head -
  2493. rflow->last_qtail) <
  2494. (int)(10 * flow_table->mask)))
  2495. expire = false;
  2496. }
  2497. rcu_read_unlock();
  2498. return expire;
  2499. }
  2500. EXPORT_SYMBOL(rps_may_expire_flow);
  2501. #endif /* CONFIG_RFS_ACCEL */
  2502. /* Called from hardirq (IPI) context */
  2503. static void rps_trigger_softirq(void *data)
  2504. {
  2505. struct softnet_data *sd = data;
  2506. ____napi_schedule(sd, &sd->backlog);
  2507. sd->received_rps++;
  2508. }
  2509. #endif /* CONFIG_RPS */
  2510. /*
  2511. * Check if this softnet_data structure is another cpu one
  2512. * If yes, queue it to our IPI list and return 1
  2513. * If no, return 0
  2514. */
  2515. static int rps_ipi_queued(struct softnet_data *sd)
  2516. {
  2517. #ifdef CONFIG_RPS
  2518. struct softnet_data *mysd = &__get_cpu_var(softnet_data);
  2519. if (sd != mysd) {
  2520. sd->rps_ipi_next = mysd->rps_ipi_list;
  2521. mysd->rps_ipi_list = sd;
  2522. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  2523. return 1;
  2524. }
  2525. #endif /* CONFIG_RPS */
  2526. return 0;
  2527. }
  2528. /*
  2529. * enqueue_to_backlog is called to queue an skb to a per CPU backlog
  2530. * queue (may be a remote CPU queue).
  2531. */
  2532. static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
  2533. unsigned int *qtail)
  2534. {
  2535. struct softnet_data *sd;
  2536. unsigned long flags;
  2537. sd = &per_cpu(softnet_data, cpu);
  2538. local_irq_save(flags);
  2539. rps_lock(sd);
  2540. if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
  2541. if (skb_queue_len(&sd->input_pkt_queue)) {
  2542. enqueue:
  2543. __skb_queue_tail(&sd->input_pkt_queue, skb);
  2544. input_queue_tail_incr_save(sd, qtail);
  2545. rps_unlock(sd);
  2546. local_irq_restore(flags);
  2547. return NET_RX_SUCCESS;
  2548. }
  2549. /* Schedule NAPI for backlog device
  2550. * We can use non atomic operation since we own the queue lock
  2551. */
  2552. if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
  2553. if (!rps_ipi_queued(sd))
  2554. ____napi_schedule(sd, &sd->backlog);
  2555. }
  2556. goto enqueue;
  2557. }
  2558. sd->dropped++;
  2559. rps_unlock(sd);
  2560. local_irq_restore(flags);
  2561. atomic_long_inc(&skb->dev->rx_dropped);
  2562. kfree_skb(skb);
  2563. return NET_RX_DROP;
  2564. }
  2565. /**
  2566. * netif_rx - post buffer to the network code
  2567. * @skb: buffer to post
  2568. *
  2569. * This function receives a packet from a device driver and queues it for
  2570. * the upper (protocol) levels to process. It always succeeds. The buffer
  2571. * may be dropped during processing for congestion control or by the
  2572. * protocol layers.
  2573. *
  2574. * return values:
  2575. * NET_RX_SUCCESS (no congestion)
  2576. * NET_RX_DROP (packet was dropped)
  2577. *
  2578. */
  2579. int netif_rx(struct sk_buff *skb)
  2580. {
  2581. int ret;
  2582. /* if netpoll wants it, pretend we never saw it */
  2583. if (netpoll_rx(skb))
  2584. return NET_RX_DROP;
  2585. if (netdev_tstamp_prequeue)
  2586. net_timestamp_check(skb);
  2587. trace_netif_rx(skb);
  2588. #ifdef CONFIG_RPS
  2589. {
  2590. struct rps_dev_flow voidflow, *rflow = &voidflow;
  2591. int cpu;
  2592. preempt_disable();
  2593. rcu_read_lock();
  2594. cpu = get_rps_cpu(skb->dev, skb, &rflow);
  2595. if (cpu < 0)
  2596. cpu = smp_processor_id();
  2597. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  2598. rcu_read_unlock();
  2599. preempt_enable();
  2600. }
  2601. #else
  2602. {
  2603. unsigned int qtail;
  2604. ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
  2605. put_cpu();
  2606. }
  2607. #endif
  2608. return ret;
  2609. }
  2610. EXPORT_SYMBOL(netif_rx);
  2611. int netif_rx_ni(struct sk_buff *skb)
  2612. {
  2613. int err;
  2614. preempt_disable();
  2615. err = netif_rx(skb);
  2616. if (local_softirq_pending())
  2617. do_softirq();
  2618. preempt_enable();
  2619. return err;
  2620. }
  2621. EXPORT_SYMBOL(netif_rx_ni);
  2622. static void net_tx_action(struct softirq_action *h)
  2623. {
  2624. struct softnet_data *sd = &__get_cpu_var(softnet_data);
  2625. if (sd->completion_queue) {
  2626. struct sk_buff *clist;
  2627. local_irq_disable();
  2628. clist = sd->completion_queue;
  2629. sd->completion_queue = NULL;
  2630. local_irq_enable();
  2631. while (clist) {
  2632. struct sk_buff *skb = clist;
  2633. clist = clist->next;
  2634. WARN_ON(atomic_read(&skb->users));
  2635. trace_kfree_skb(skb, net_tx_action);
  2636. __kfree_skb(skb);
  2637. }
  2638. }
  2639. if (sd->output_queue) {
  2640. struct Qdisc *head;
  2641. local_irq_disable();
  2642. head = sd->output_queue;
  2643. sd->output_queue = NULL;
  2644. sd->output_queue_tailp = &sd->output_queue;
  2645. local_irq_enable();
  2646. while (head) {
  2647. struct Qdisc *q = head;
  2648. spinlock_t *root_lock;
  2649. head = head->next_sched;
  2650. root_lock = qdisc_lock(q);
  2651. if (spin_trylock(root_lock)) {
  2652. smp_mb__before_clear_bit();
  2653. clear_bit(__QDISC_STATE_SCHED,
  2654. &q->state);
  2655. qdisc_run(q);
  2656. spin_unlock(root_lock);
  2657. } else {
  2658. if (!test_bit(__QDISC_STATE_DEACTIVATED,
  2659. &q->state)) {
  2660. __netif_reschedule(q);
  2661. } else {
  2662. smp_mb__before_clear_bit();
  2663. clear_bit(__QDISC_STATE_SCHED,
  2664. &q->state);
  2665. }
  2666. }
  2667. }
  2668. }
  2669. }
  2670. #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
  2671. (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
  2672. /* This hook is defined here for ATM LANE */
  2673. int (*br_fdb_test_addr_hook)(struct net_device *dev,
  2674. unsigned char *addr) __read_mostly;
  2675. EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
  2676. #endif
  2677. #ifdef CONFIG_NET_CLS_ACT
  2678. /* TODO: Maybe we should just force sch_ingress to be compiled in
  2679. * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
  2680. * a compare and 2 stores extra right now if we dont have it on
  2681. * but have CONFIG_NET_CLS_ACT
  2682. * NOTE: This doesn't stop any functionality; if you dont have
  2683. * the ingress scheduler, you just can't add policies on ingress.
  2684. *
  2685. */
  2686. static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
  2687. {
  2688. struct net_device *dev = skb->dev;
  2689. u32 ttl = G_TC_RTTL(skb->tc_verd);
  2690. int result = TC_ACT_OK;
  2691. struct Qdisc *q;
  2692. if (unlikely(MAX_RED_LOOP < ttl++)) {
  2693. if (net_ratelimit())
  2694. pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
  2695. skb->skb_iif, dev->ifindex);
  2696. return TC_ACT_SHOT;
  2697. }
  2698. skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
  2699. skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
  2700. q = rxq->qdisc;
  2701. if (q != &noop_qdisc) {
  2702. spin_lock(qdisc_lock(q));
  2703. if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
  2704. result = qdisc_enqueue_root(skb, q);
  2705. spin_unlock(qdisc_lock(q));
  2706. }
  2707. return result;
  2708. }
  2709. static inline struct sk_buff *handle_ing(struct sk_buff *skb,
  2710. struct packet_type **pt_prev,
  2711. int *ret, struct net_device *orig_dev)
  2712. {
  2713. struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
  2714. if (!rxq || rxq->qdisc == &noop_qdisc)
  2715. goto out;
  2716. if (*pt_prev) {
  2717. *ret = deliver_skb(skb, *pt_prev, orig_dev);
  2718. *pt_prev = NULL;
  2719. }
  2720. switch (ing_filter(skb, rxq)) {
  2721. case TC_ACT_SHOT:
  2722. case TC_ACT_STOLEN:
  2723. kfree_skb(skb);
  2724. return NULL;
  2725. }
  2726. out:
  2727. skb->tc_verd = 0;
  2728. return skb;
  2729. }
  2730. #endif
  2731. /**
  2732. * netdev_rx_handler_register - register receive handler
  2733. * @dev: device to register a handler for
  2734. * @rx_handler: receive handler to register
  2735. * @rx_handler_data: data pointer that is used by rx handler
  2736. *
  2737. * Register a receive hander for a device. This handler will then be
  2738. * called from __netif_receive_skb. A negative errno code is returned
  2739. * on a failure.
  2740. *
  2741. * The caller must hold the rtnl_mutex.
  2742. *
  2743. * For a general description of rx_handler, see enum rx_handler_result.
  2744. */
  2745. int netdev_rx_handler_register(struct net_device *dev,
  2746. rx_handler_func_t *rx_handler,
  2747. void *rx_handler_data)
  2748. {
  2749. ASSERT_RTNL();
  2750. if (dev->rx_handler)
  2751. return -EBUSY;
  2752. rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
  2753. rcu_assign_pointer(dev->rx_handler, rx_handler);
  2754. return 0;
  2755. }
  2756. EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
  2757. /**
  2758. * netdev_rx_handler_unregister - unregister receive handler
  2759. * @dev: device to unregister a handler from
  2760. *
  2761. * Unregister a receive hander from a device.
  2762. *
  2763. * The caller must hold the rtnl_mutex.
  2764. */
  2765. void netdev_rx_handler_unregister(struct net_device *dev)
  2766. {
  2767. ASSERT_RTNL();
  2768. RCU_INIT_POINTER(dev->rx_handler, NULL);
  2769. RCU_INIT_POINTER(dev->rx_handler_data, NULL);
  2770. }
  2771. EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
  2772. static int __netif_receive_skb(struct sk_buff *skb)
  2773. {
  2774. struct packet_type *ptype, *pt_prev;
  2775. rx_handler_func_t *rx_handler;
  2776. struct net_device *orig_dev;
  2777. struct net_device *null_or_dev;
  2778. bool deliver_exact = false;
  2779. int ret = NET_RX_DROP;
  2780. __be16 type;
  2781. if (!netdev_tstamp_prequeue)
  2782. net_timestamp_check(skb);
  2783. trace_netif_receive_skb(skb);
  2784. /* if we've gotten here through NAPI, check netpoll */
  2785. if (netpoll_receive_skb(skb))
  2786. return NET_RX_DROP;
  2787. if (!skb->skb_iif)
  2788. skb->skb_iif = skb->dev->ifindex;
  2789. orig_dev = skb->dev;
  2790. skb_reset_network_header(skb);
  2791. skb_reset_transport_header(skb);
  2792. skb_reset_mac_len(skb);
  2793. pt_prev = NULL;
  2794. rcu_read_lock();
  2795. another_round:
  2796. __this_cpu_inc(softnet_data.processed);
  2797. if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
  2798. skb = vlan_untag(skb);
  2799. if (unlikely(!skb))
  2800. goto out;
  2801. }
  2802. #ifdef CONFIG_NET_CLS_ACT
  2803. if (skb->tc_verd & TC_NCLS) {
  2804. skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
  2805. goto ncls;
  2806. }
  2807. #endif
  2808. list_for_each_entry_rcu(ptype, &ptype_all, list) {
  2809. if (!ptype->dev || ptype->dev == skb->dev) {
  2810. if (pt_prev)
  2811. ret = deliver_skb(skb, pt_prev, orig_dev);
  2812. pt_prev = ptype;
  2813. }
  2814. }
  2815. #ifdef CONFIG_NET_CLS_ACT
  2816. skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
  2817. if (!skb)
  2818. goto out;
  2819. ncls:
  2820. #endif
  2821. rx_handler = rcu_dereference(skb->dev->rx_handler);
  2822. if (vlan_tx_tag_present(skb)) {
  2823. if (pt_prev) {
  2824. ret = deliver_skb(skb, pt_prev, orig_dev);
  2825. pt_prev = NULL;
  2826. }
  2827. if (vlan_do_receive(&skb, !rx_handler))
  2828. goto another_round;
  2829. else if (unlikely(!skb))
  2830. goto out;
  2831. }
  2832. if (rx_handler) {
  2833. if (pt_prev) {
  2834. ret = deliver_skb(skb, pt_prev, orig_dev);
  2835. pt_prev = NULL;
  2836. }
  2837. switch (rx_handler(&skb)) {
  2838. case RX_HANDLER_CONSUMED:
  2839. goto out;
  2840. case RX_HANDLER_ANOTHER:
  2841. goto another_round;
  2842. case RX_HANDLER_EXACT:
  2843. deliver_exact = true;
  2844. case RX_HANDLER_PASS:
  2845. break;
  2846. default:
  2847. BUG();
  2848. }
  2849. }
  2850. /* deliver only exact match when indicated */
  2851. null_or_dev = deliver_exact ? skb->dev : NULL;
  2852. type = skb->protocol;
  2853. list_for_each_entry_rcu(ptype,
  2854. &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
  2855. if (ptype->type == type &&
  2856. (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
  2857. ptype->dev == orig_dev)) {
  2858. if (pt_prev)
  2859. ret = deliver_skb(skb, pt_prev, orig_dev);
  2860. pt_prev = ptype;
  2861. }
  2862. }
  2863. if (pt_prev) {
  2864. ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  2865. } else {
  2866. atomic_long_inc(&skb->dev->rx_dropped);
  2867. kfree_skb(skb);
  2868. /* Jamal, now you will not able to escape explaining
  2869. * me how you were going to use this. :-)
  2870. */
  2871. ret = NET_RX_DROP;
  2872. }
  2873. out:
  2874. rcu_read_unlock();
  2875. return ret;
  2876. }
  2877. /**
  2878. * netif_receive_skb - process receive buffer from network
  2879. * @skb: buffer to process
  2880. *
  2881. * netif_receive_skb() is the main receive data processing function.
  2882. * It always succeeds. The buffer may be dropped during processing
  2883. * for congestion control or by the protocol layers.
  2884. *
  2885. * This function may only be called from softirq context and interrupts
  2886. * should be enabled.
  2887. *
  2888. * Return values (usually ignored):
  2889. * NET_RX_SUCCESS: no congestion
  2890. * NET_RX_DROP: packet was dropped
  2891. */
  2892. int netif_receive_skb(struct sk_buff *skb)
  2893. {
  2894. if (netdev_tstamp_prequeue)
  2895. net_timestamp_check(skb);
  2896. if (skb_defer_rx_timestamp(skb))
  2897. return NET_RX_SUCCESS;
  2898. #ifdef CONFIG_RPS
  2899. {
  2900. struct rps_dev_flow voidflow, *rflow = &voidflow;
  2901. int cpu, ret;
  2902. rcu_read_lock();
  2903. cpu = get_rps_cpu(skb->dev, skb, &rflow);
  2904. if (cpu >= 0) {
  2905. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  2906. rcu_read_unlock();
  2907. } else {
  2908. rcu_read_unlock();
  2909. ret = __netif_receive_skb(skb);
  2910. }
  2911. return ret;
  2912. }
  2913. #else
  2914. return __netif_receive_skb(skb);
  2915. #endif
  2916. }
  2917. EXPORT_SYMBOL(netif_receive_skb);
  2918. /* Network device is going away, flush any packets still pending
  2919. * Called with irqs disabled.
  2920. */
  2921. static void flush_backlog(void *arg)
  2922. {
  2923. struct net_device *dev = arg;
  2924. struct softnet_data *sd = &__get_cpu_var(softnet_data);
  2925. struct sk_buff *skb, *tmp;
  2926. rps_lock(sd);
  2927. skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
  2928. if (skb->dev == dev) {
  2929. __skb_unlink(skb, &sd->input_pkt_queue);
  2930. kfree_skb(skb);
  2931. input_queue_head_incr(sd);
  2932. }
  2933. }
  2934. rps_unlock(sd);
  2935. skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
  2936. if (skb->dev == dev) {
  2937. __skb_unlink(skb, &sd->process_queue);
  2938. kfree_skb(skb);
  2939. input_queue_head_incr(sd);
  2940. }
  2941. }
  2942. }
  2943. static int napi_gro_complete(struct sk_buff *skb)
  2944. {
  2945. struct packet_type *ptype;
  2946. __be16 type = skb->protocol;
  2947. struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
  2948. int err = -ENOENT;
  2949. if (NAPI_GRO_CB(skb)->count == 1) {
  2950. skb_shinfo(skb)->gso_size = 0;
  2951. goto out;
  2952. }
  2953. rcu_read_lock();
  2954. list_for_each_entry_rcu(ptype, head, list) {
  2955. if (ptype->type != type || ptype->dev || !ptype->gro_complete)
  2956. continue;
  2957. err = ptype->gro_complete(skb);
  2958. break;
  2959. }
  2960. rcu_read_unlock();
  2961. if (err) {
  2962. WARN_ON(&ptype->list == head);
  2963. kfree_skb(skb);
  2964. return NET_RX_SUCCESS;
  2965. }
  2966. out:
  2967. return netif_receive_skb(skb);
  2968. }
  2969. inline void napi_gro_flush(struct napi_struct *napi)
  2970. {
  2971. struct sk_buff *skb, *next;
  2972. for (skb = napi->gro_list; skb; skb = next) {
  2973. next = skb->next;
  2974. skb->next = NULL;
  2975. napi_gro_complete(skb);
  2976. }
  2977. napi->gro_count = 0;
  2978. napi->gro_list = NULL;
  2979. }
  2980. EXPORT_SYMBOL(napi_gro_flush);
  2981. enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  2982. {
  2983. struct sk_buff **pp = NULL;
  2984. struct packet_type *ptype;
  2985. __be16 type = skb->protocol;
  2986. struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
  2987. int same_flow;
  2988. int mac_len;
  2989. enum gro_result ret;
  2990. if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
  2991. goto normal;
  2992. if (skb_is_gso(skb) || skb_has_frag_list(skb))
  2993. goto normal;
  2994. rcu_read_lock();
  2995. list_for_each_entry_rcu(ptype, head, list) {
  2996. if (ptype->type != type || ptype->dev || !ptype->gro_receive)
  2997. continue;
  2998. skb_set_network_header(skb, skb_gro_offset(skb));
  2999. mac_len = skb->network_header - skb->mac_header;
  3000. skb->mac_len = mac_len;
  3001. NAPI_GRO_CB(skb)->same_flow = 0;
  3002. NAPI_GRO_CB(skb)->flush = 0;
  3003. NAPI_GRO_CB(skb)->free = 0;
  3004. pp = ptype->gro_receive(&napi->gro_list, skb);
  3005. break;
  3006. }
  3007. rcu_read_unlock();
  3008. if (&ptype->list == head)
  3009. goto normal;
  3010. same_flow = NAPI_GRO_CB(skb)->same_flow;
  3011. ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
  3012. if (pp) {
  3013. struct sk_buff *nskb = *pp;
  3014. *pp = nskb->next;
  3015. nskb->next = NULL;
  3016. napi_gro_complete(nskb);
  3017. napi->gro_count--;
  3018. }
  3019. if (same_flow)
  3020. goto ok;
  3021. if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
  3022. goto normal;
  3023. napi->gro_count++;
  3024. NAPI_GRO_CB(skb)->count = 1;
  3025. skb_shinfo(skb)->gso_size = skb_gro_len(skb);
  3026. skb->next = napi->gro_list;
  3027. napi->gro_list = skb;
  3028. ret = GRO_HELD;
  3029. pull:
  3030. if (skb_headlen(skb) < skb_gro_offset(skb)) {
  3031. int grow = skb_gro_offset(skb) - skb_headlen(skb);
  3032. BUG_ON(skb->end - skb->tail < grow);
  3033. memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
  3034. skb->tail += grow;
  3035. skb->data_len -= grow;
  3036. skb_shinfo(skb)->frags[0].page_offset += grow;
  3037. skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
  3038. if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
  3039. skb_frag_unref(skb, 0);
  3040. memmove(skb_shinfo(skb)->frags,
  3041. skb_shinfo(skb)->frags + 1,
  3042. --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
  3043. }
  3044. }
  3045. ok:
  3046. return ret;
  3047. normal:
  3048. ret = GRO_NORMAL;
  3049. goto pull;
  3050. }
  3051. EXPORT_SYMBOL(dev_gro_receive);
  3052. static inline gro_result_t
  3053. __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  3054. {
  3055. struct sk_buff *p;
  3056. for (p = napi->gro_list; p; p = p->next) {
  3057. unsigned long diffs;
  3058. diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
  3059. diffs |= p->vlan_tci ^ skb->vlan_tci;
  3060. diffs |= compare_ether_header(skb_mac_header(p),
  3061. skb_gro_mac_header(skb));
  3062. NAPI_GRO_CB(p)->same_flow = !diffs;
  3063. NAPI_GRO_CB(p)->flush = 0;
  3064. }
  3065. return dev_gro_receive(napi, skb);
  3066. }
  3067. gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
  3068. {
  3069. switch (ret) {
  3070. case GRO_NORMAL:
  3071. if (netif_receive_skb(skb))
  3072. ret = GRO_DROP;
  3073. break;
  3074. case GRO_DROP:
  3075. case GRO_MERGED_FREE:
  3076. kfree_skb(skb);
  3077. break;
  3078. case GRO_HELD:
  3079. case GRO_MERGED:
  3080. break;
  3081. }
  3082. return ret;
  3083. }
  3084. EXPORT_SYMBOL(napi_skb_finish);
  3085. void skb_gro_reset_offset(struct sk_buff *skb)
  3086. {
  3087. NAPI_GRO_CB(skb)->data_offset = 0;
  3088. NAPI_GRO_CB(skb)->frag0 = NULL;
  3089. NAPI_GRO_CB(skb)->frag0_len = 0;
  3090. if (skb->mac_header == skb->tail &&
  3091. !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
  3092. NAPI_GRO_CB(skb)->frag0 =
  3093. skb_frag_address(&skb_shinfo(skb)->frags[0]);
  3094. NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
  3095. }
  3096. }
  3097. EXPORT_SYMBOL(skb_gro_reset_offset);
  3098. gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  3099. {
  3100. skb_gro_reset_offset(skb);
  3101. return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
  3102. }
  3103. EXPORT_SYMBOL(napi_gro_receive);
  3104. static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
  3105. {
  3106. __skb_pull(skb, skb_headlen(skb));
  3107. skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
  3108. skb->vlan_tci = 0;
  3109. skb->dev = napi->dev;
  3110. skb->skb_iif = 0;
  3111. napi->skb = skb;
  3112. }
  3113. struct sk_buff *napi_get_frags(struct napi_struct *napi)
  3114. {
  3115. struct sk_buff *skb = napi->skb;
  3116. if (!skb) {
  3117. skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
  3118. if (skb)
  3119. napi->skb = skb;
  3120. }
  3121. return skb;
  3122. }
  3123. EXPORT_SYMBOL(napi_get_frags);
  3124. gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
  3125. gro_result_t ret)
  3126. {
  3127. switch (ret) {
  3128. case GRO_NORMAL:
  3129. case GRO_HELD:
  3130. skb->protocol = eth_type_trans(skb, skb->dev);
  3131. if (ret == GRO_HELD)
  3132. skb_gro_pull(skb, -ETH_HLEN);
  3133. else if (netif_receive_skb(skb))
  3134. ret = GRO_DROP;
  3135. break;
  3136. case GRO_DROP:
  3137. case GRO_MERGED_FREE:
  3138. napi_reuse_skb(napi, skb);
  3139. break;
  3140. case GRO_MERGED:
  3141. break;
  3142. }
  3143. return ret;
  3144. }
  3145. EXPORT_SYMBOL(napi_frags_finish);
  3146. struct sk_buff *napi_frags_skb(struct napi_struct *napi)
  3147. {
  3148. struct sk_buff *skb = napi->skb;
  3149. struct ethhdr *eth;
  3150. unsigned int hlen;
  3151. unsigned int off;
  3152. napi->skb = NULL;
  3153. skb_reset_mac_header(skb);
  3154. skb_gro_reset_offset(skb);
  3155. off = skb_gro_offset(skb);
  3156. hlen = off + sizeof(*eth);
  3157. eth = skb_gro_header_fast(skb, off);
  3158. if (skb_gro_header_hard(skb, hlen)) {
  3159. eth = skb_gro_header_slow(skb, hlen, off);
  3160. if (unlikely(!eth)) {
  3161. napi_reuse_skb(napi, skb);
  3162. skb = NULL;
  3163. goto out;
  3164. }
  3165. }
  3166. skb_gro_pull(skb, sizeof(*eth));
  3167. /*
  3168. * This works because the only protocols we care about don't require
  3169. * special handling. We'll fix it up properly at the end.
  3170. */
  3171. skb->protocol = eth->h_proto;
  3172. out:
  3173. return skb;
  3174. }
  3175. EXPORT_SYMBOL(napi_frags_skb);
  3176. gro_result_t napi_gro_frags(struct napi_struct *napi)
  3177. {
  3178. struct sk_buff *skb = napi_frags_skb(napi);
  3179. if (!skb)
  3180. return GRO_DROP;
  3181. return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
  3182. }
  3183. EXPORT_SYMBOL(napi_gro_frags);
  3184. /*
  3185. * net_rps_action sends any pending IPI's for rps.
  3186. * Note: called with local irq disabled, but exits with local irq enabled.
  3187. */
  3188. static void net_rps_action_and_irq_enable(struct softnet_data *sd)
  3189. {
  3190. #ifdef CONFIG_RPS
  3191. struct softnet_data *remsd = sd->rps_ipi_list;
  3192. if (remsd) {
  3193. sd->rps_ipi_list = NULL;
  3194. local_irq_enable();
  3195. /* Send pending IPI's to kick RPS processing on remote cpus. */
  3196. while (remsd) {
  3197. struct softnet_data *next = remsd->rps_ipi_next;
  3198. if (cpu_online(remsd->cpu))
  3199. __smp_call_function_single(remsd->cpu,
  3200. &remsd->csd, 0);
  3201. remsd = next;
  3202. }
  3203. } else
  3204. #endif
  3205. local_irq_enable();
  3206. }
  3207. static int process_backlog(struct napi_struct *napi, int quota)
  3208. {
  3209. int work = 0;
  3210. struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
  3211. #ifdef CONFIG_RPS
  3212. /* Check if we have pending ipi, its better to send them now,
  3213. * not waiting net_rx_action() end.
  3214. */
  3215. if (sd->rps_ipi_list) {
  3216. local_irq_disable();
  3217. net_rps_action_and_irq_enable(sd);
  3218. }
  3219. #endif
  3220. napi->weight = weight_p;
  3221. local_irq_disable();
  3222. while (work < quota) {
  3223. struct sk_buff *skb;
  3224. unsigned int qlen;
  3225. while ((skb = __skb_dequeue(&sd->process_queue))) {
  3226. local_irq_enable();
  3227. __netif_receive_skb(skb);
  3228. local_irq_disable();
  3229. input_queue_head_incr(sd);
  3230. if (++work >= quota) {
  3231. local_irq_enable();
  3232. return work;
  3233. }
  3234. }
  3235. rps_lock(sd);
  3236. qlen = skb_queue_len(&sd->input_pkt_queue);
  3237. if (qlen)
  3238. skb_queue_splice_tail_init(&sd->input_pkt_queue,
  3239. &sd->process_queue);
  3240. if (qlen < quota - work) {
  3241. /*
  3242. * Inline a custom version of __napi_complete().
  3243. * only current cpu owns and manipulates this napi,
  3244. * and NAPI_STATE_SCHED is the only possible flag set on backlog.
  3245. * we can use a plain write instead of clear_bit(),
  3246. * and we dont need an smp_mb() memory barrier.
  3247. */
  3248. list_del(&napi->poll_list);
  3249. napi->state = 0;
  3250. quota = work + qlen;
  3251. }
  3252. rps_unlock(sd);
  3253. }
  3254. local_irq_enable();
  3255. return work;
  3256. }
  3257. /**
  3258. * __napi_schedule - schedule for receive
  3259. * @n: entry to schedule
  3260. *
  3261. * The entry's receive function will be scheduled to run
  3262. */
  3263. void __napi_schedule(struct napi_struct *n)
  3264. {
  3265. unsigned long flags;
  3266. local_irq_save(flags);
  3267. ____napi_schedule(&__get_cpu_var(softnet_data), n);
  3268. local_irq_restore(flags);
  3269. }
  3270. EXPORT_SYMBOL(__napi_schedule);
  3271. void __napi_complete(struct napi_struct *n)
  3272. {
  3273. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  3274. BUG_ON(n->gro_list);
  3275. list_del(&n->poll_list);
  3276. smp_mb__before_clear_bit();
  3277. clear_bit(NAPI_STATE_SCHED, &n->state);
  3278. }
  3279. EXPORT_SYMBOL(__napi_complete);
  3280. void napi_complete(struct napi_struct *n)
  3281. {
  3282. unsigned long flags;
  3283. /*
  3284. * don't let napi dequeue from the cpu poll list
  3285. * just in case its running on a different cpu
  3286. */
  3287. if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
  3288. return;
  3289. napi_gro_flush(n);
  3290. local_irq_save(flags);
  3291. __napi_complete(n);
  3292. local_irq_restore(flags);
  3293. }
  3294. EXPORT_SYMBOL(napi_complete);
  3295. void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
  3296. int (*poll)(struct napi_struct *, int), int weight)
  3297. {
  3298. INIT_LIST_HEAD(&napi->poll_list);
  3299. napi->gro_count = 0;
  3300. napi->gro_list = NULL;
  3301. napi->skb = NULL;
  3302. napi->poll = poll;
  3303. napi->weight = weight;
  3304. list_add(&napi->dev_list, &dev->napi_list);
  3305. napi->dev = dev;
  3306. #ifdef CONFIG_NETPOLL
  3307. spin_lock_init(&napi->poll_lock);
  3308. napi->poll_owner = -1;
  3309. #endif
  3310. set_bit(NAPI_STATE_SCHED, &napi->state);
  3311. }
  3312. EXPORT_SYMBOL(netif_napi_add);
  3313. void netif_napi_del(struct napi_struct *napi)
  3314. {
  3315. struct sk_buff *skb, *next;
  3316. list_del_init(&napi->dev_list);
  3317. napi_free_frags(napi);
  3318. for (skb = napi->gro_list; skb; skb = next) {
  3319. next = skb->next;
  3320. skb->next = NULL;
  3321. kfree_skb(skb);
  3322. }
  3323. napi->gro_list = NULL;
  3324. napi->gro_count = 0;
  3325. }
  3326. EXPORT_SYMBOL(netif_napi_del);
  3327. static void net_rx_action(struct softirq_action *h)
  3328. {
  3329. struct softnet_data *sd = &__get_cpu_var(softnet_data);
  3330. unsigned long time_limit = jiffies + 2;
  3331. int budget = netdev_budget;
  3332. void *have;
  3333. local_irq_disable();
  3334. while (!list_empty(&sd->poll_list)) {
  3335. struct napi_struct *n;
  3336. int work, weight;
  3337. /* If softirq window is exhuasted then punt.
  3338. * Allow this to run for 2 jiffies since which will allow
  3339. * an average latency of 1.5/HZ.
  3340. */
  3341. if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
  3342. goto softnet_break;
  3343. local_irq_enable();
  3344. /* Even though interrupts have been re-enabled, this
  3345. * access is safe because interrupts can only add new
  3346. * entries to the tail of this list, and only ->poll()
  3347. * calls can remove this head entry from the list.
  3348. */
  3349. n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
  3350. have = netpoll_poll_lock(n);
  3351. weight = n->weight;
  3352. /* This NAPI_STATE_SCHED test is for avoiding a race
  3353. * with netpoll's poll_napi(). Only the entity which
  3354. * obtains the lock and sees NAPI_STATE_SCHED set will
  3355. * actually make the ->poll() call. Therefore we avoid
  3356. * accidentally calling ->poll() when NAPI is not scheduled.
  3357. */
  3358. work = 0;
  3359. if (test_bit(NAPI_STATE_SCHED, &n->state)) {
  3360. work = n->poll(n, weight);
  3361. trace_napi_poll(n);
  3362. }
  3363. WARN_ON_ONCE(work > weight);
  3364. budget -= work;
  3365. local_irq_disable();
  3366. /* Drivers must not modify the NAPI state if they
  3367. * consume the entire weight. In such cases this code
  3368. * still "owns" the NAPI instance and therefore can
  3369. * move the instance around on the list at-will.
  3370. */
  3371. if (unlikely(work == weight)) {
  3372. if (unlikely(napi_disable_pending(n))) {
  3373. local_irq_enable();
  3374. napi_complete(n);
  3375. local_irq_disable();
  3376. } else
  3377. list_move_tail(&n->poll_list, &sd->poll_list);
  3378. }
  3379. netpoll_poll_unlock(have);
  3380. }
  3381. out:
  3382. net_rps_action_and_irq_enable(sd);
  3383. #ifdef CONFIG_NET_DMA
  3384. /*
  3385. * There may not be any more sk_buffs coming right now, so push
  3386. * any pending DMA copies to hardware
  3387. */
  3388. dma_issue_pending_all();
  3389. #endif
  3390. return;
  3391. softnet_break:
  3392. sd->time_squeeze++;
  3393. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  3394. goto out;
  3395. }
  3396. static gifconf_func_t *gifconf_list[NPROTO];
  3397. /**
  3398. * register_gifconf - register a SIOCGIF handler
  3399. * @family: Address family
  3400. * @gifconf: Function handler
  3401. *
  3402. * Register protocol dependent address dumping routines. The handler
  3403. * that is passed must not be freed or reused until it has been replaced
  3404. * by another handler.
  3405. */
  3406. int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
  3407. {
  3408. if (family >= NPROTO)
  3409. return -EINVAL;
  3410. gifconf_list[family] = gifconf;
  3411. return 0;
  3412. }
  3413. EXPORT_SYMBOL(register_gifconf);
  3414. /*
  3415. * Map an interface index to its name (SIOCGIFNAME)
  3416. */
  3417. /*
  3418. * We need this ioctl for efficient implementation of the
  3419. * if_indextoname() function required by the IPv6 API. Without
  3420. * it, we would have to search all the interfaces to find a
  3421. * match. --pb
  3422. */
  3423. static int dev_ifname(struct net *net, struct ifreq __user *arg)
  3424. {
  3425. struct net_device *dev;
  3426. struct ifreq ifr;
  3427. /*
  3428. * Fetch the caller's info block.
  3429. */
  3430. if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
  3431. return -EFAULT;
  3432. rcu_read_lock();
  3433. dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
  3434. if (!dev) {
  3435. rcu_read_unlock();
  3436. return -ENODEV;
  3437. }
  3438. strcpy(ifr.ifr_name, dev->name);
  3439. rcu_read_unlock();
  3440. if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
  3441. return -EFAULT;
  3442. return 0;
  3443. }
  3444. /*
  3445. * Perform a SIOCGIFCONF call. This structure will change
  3446. * size eventually, and there is nothing I can do about it.
  3447. * Thus we will need a 'compatibility mode'.
  3448. */
  3449. static int dev_ifconf(struct net *net, char __user *arg)
  3450. {
  3451. struct ifconf ifc;
  3452. struct net_device *dev;
  3453. char __user *pos;
  3454. int len;
  3455. int total;
  3456. int i;
  3457. /*
  3458. * Fetch the caller's info block.
  3459. */
  3460. if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
  3461. return -EFAULT;
  3462. pos = ifc.ifc_buf;
  3463. len = ifc.ifc_len;
  3464. /*
  3465. * Loop over the interfaces, and write an info block for each.
  3466. */
  3467. total = 0;
  3468. for_each_netdev(net, dev) {
  3469. for (i = 0; i < NPROTO; i++) {
  3470. if (gifconf_list[i]) {
  3471. int done;
  3472. if (!pos)
  3473. done = gifconf_list[i](dev, NULL, 0);
  3474. else
  3475. done = gifconf_list[i](dev, pos + total,
  3476. len - total);
  3477. if (done < 0)
  3478. return -EFAULT;
  3479. total += done;
  3480. }
  3481. }
  3482. }
  3483. /*
  3484. * All done. Write the updated control block back to the caller.
  3485. */
  3486. ifc.ifc_len = total;
  3487. /*
  3488. * Both BSD and Solaris return 0 here, so we do too.
  3489. */
  3490. return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
  3491. }
  3492. #ifdef CONFIG_PROC_FS
  3493. #define BUCKET_SPACE (32 - NETDEV_HASHBITS)
  3494. struct dev_iter_state {
  3495. struct seq_net_private p;
  3496. unsigned int pos; /* bucket << BUCKET_SPACE + offset */
  3497. };
  3498. #define get_bucket(x) ((x) >> BUCKET_SPACE)
  3499. #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
  3500. #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
  3501. static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
  3502. {
  3503. struct dev_iter_state *state = seq->private;
  3504. struct net *net = seq_file_net(seq);
  3505. struct net_device *dev;
  3506. struct hlist_node *p;
  3507. struct hlist_head *h;
  3508. unsigned int count, bucket, offset;
  3509. bucket = get_bucket(state->pos);
  3510. offset = get_offset(state->pos);
  3511. h = &net->dev_name_head[bucket];
  3512. count = 0;
  3513. hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
  3514. if (count++ == offset) {
  3515. state->pos = set_bucket_offset(bucket, count);
  3516. return dev;
  3517. }
  3518. }
  3519. return NULL;
  3520. }
  3521. static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
  3522. {
  3523. struct dev_iter_state *state = seq->private;
  3524. struct net_device *dev;
  3525. unsigned int bucket;
  3526. bucket = get_bucket(state->pos);
  3527. do {
  3528. dev = dev_from_same_bucket(seq);
  3529. if (dev)
  3530. return dev;
  3531. bucket++;
  3532. state->pos = set_bucket_offset(bucket, 0);
  3533. } while (bucket < NETDEV_HASHENTRIES);
  3534. return NULL;
  3535. }
  3536. /*
  3537. * This is invoked by the /proc filesystem handler to display a device
  3538. * in detail.
  3539. */
  3540. void *dev_seq_start(struct seq_file *seq, loff_t *pos)
  3541. __acquires(RCU)
  3542. {
  3543. struct dev_iter_state *state = seq->private;
  3544. rcu_read_lock();
  3545. if (!*pos)
  3546. return SEQ_START_TOKEN;
  3547. /* check for end of the hash */
  3548. if (state->pos == 0 && *pos > 1)
  3549. return NULL;
  3550. return dev_from_new_bucket(seq);
  3551. }
  3552. void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  3553. {
  3554. struct net_device *dev;
  3555. ++*pos;
  3556. if (v == SEQ_START_TOKEN)
  3557. return dev_from_new_bucket(seq);
  3558. dev = dev_from_same_bucket(seq);
  3559. if (dev)
  3560. return dev;
  3561. return dev_from_new_bucket(seq);
  3562. }
  3563. void dev_seq_stop(struct seq_file *seq, void *v)
  3564. __releases(RCU)
  3565. {
  3566. rcu_read_unlock();
  3567. }
  3568. static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
  3569. {
  3570. struct rtnl_link_stats64 temp;
  3571. const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
  3572. seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
  3573. "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
  3574. dev->name, stats->rx_bytes, stats->rx_packets,
  3575. stats->rx_errors,
  3576. stats->rx_dropped + stats->rx_missed_errors,
  3577. stats->rx_fifo_errors,
  3578. stats->rx_length_errors + stats->rx_over_errors +
  3579. stats->rx_crc_errors + stats->rx_frame_errors,
  3580. stats->rx_compressed, stats->multicast,
  3581. stats->tx_bytes, stats->tx_packets,
  3582. stats->tx_errors, stats->tx_dropped,
  3583. stats->tx_fifo_errors, stats->collisions,
  3584. stats->tx_carrier_errors +
  3585. stats->tx_aborted_errors +
  3586. stats->tx_window_errors +
  3587. stats->tx_heartbeat_errors,
  3588. stats->tx_compressed);
  3589. }
  3590. /*
  3591. * Called from the PROCfs module. This now uses the new arbitrary sized
  3592. * /proc/net interface to create /proc/net/dev
  3593. */
  3594. static int dev_seq_show(struct seq_file *seq, void *v)
  3595. {
  3596. if (v == SEQ_START_TOKEN)
  3597. seq_puts(seq, "Inter-| Receive "
  3598. " | Transmit\n"
  3599. " face |bytes packets errs drop fifo frame "
  3600. "compressed multicast|bytes packets errs "
  3601. "drop fifo colls carrier compressed\n");
  3602. else
  3603. dev_seq_printf_stats(seq, v);
  3604. return 0;
  3605. }
  3606. static struct softnet_data *softnet_get_online(loff_t *pos)
  3607. {
  3608. struct softnet_data *sd = NULL;
  3609. while (*pos < nr_cpu_ids)
  3610. if (cpu_online(*pos)) {
  3611. sd = &per_cpu(softnet_data, *pos);
  3612. break;
  3613. } else
  3614. ++*pos;
  3615. return sd;
  3616. }
  3617. static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
  3618. {
  3619. return softnet_get_online(pos);
  3620. }
  3621. static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  3622. {
  3623. ++*pos;
  3624. return softnet_get_online(pos);
  3625. }
  3626. static void softnet_seq_stop(struct seq_file *seq, void *v)
  3627. {
  3628. }
  3629. static int softnet_seq_show(struct seq_file *seq, void *v)
  3630. {
  3631. struct softnet_data *sd = v;
  3632. seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
  3633. sd->processed, sd->dropped, sd->time_squeeze, 0,
  3634. 0, 0, 0, 0, /* was fastroute */
  3635. sd->cpu_collision, sd->received_rps);
  3636. return 0;
  3637. }
  3638. static const struct seq_operations dev_seq_ops = {
  3639. .start = dev_seq_start,
  3640. .next = dev_seq_next,
  3641. .stop = dev_seq_stop,
  3642. .show = dev_seq_show,
  3643. };
  3644. static int dev_seq_open(struct inode *inode, struct file *file)
  3645. {
  3646. return seq_open_net(inode, file, &dev_seq_ops,
  3647. sizeof(struct dev_iter_state));
  3648. }
  3649. static const struct file_operations dev_seq_fops = {
  3650. .owner = THIS_MODULE,
  3651. .open = dev_seq_open,
  3652. .read = seq_read,
  3653. .llseek = seq_lseek,
  3654. .release = seq_release_net,
  3655. };
  3656. static const struct seq_operations softnet_seq_ops = {
  3657. .start = softnet_seq_start,
  3658. .next = softnet_seq_next,
  3659. .stop = softnet_seq_stop,
  3660. .show = softnet_seq_show,
  3661. };
  3662. static int softnet_seq_open(struct inode *inode, struct file *file)
  3663. {
  3664. return seq_open(file, &softnet_seq_ops);
  3665. }
  3666. static const struct file_operations softnet_seq_fops = {
  3667. .owner = THIS_MODULE,
  3668. .open = softnet_seq_open,
  3669. .read = seq_read,
  3670. .llseek = seq_lseek,
  3671. .release = seq_release,
  3672. };
  3673. static void *ptype_get_idx(loff_t pos)
  3674. {
  3675. struct packet_type *pt = NULL;
  3676. loff_t i = 0;
  3677. int t;
  3678. list_for_each_entry_rcu(pt, &ptype_all, list) {
  3679. if (i == pos)
  3680. return pt;
  3681. ++i;
  3682. }
  3683. for (t = 0; t < PTYPE_HASH_SIZE; t++) {
  3684. list_for_each_entry_rcu(pt, &ptype_base[t], list) {
  3685. if (i == pos)
  3686. return pt;
  3687. ++i;
  3688. }
  3689. }
  3690. return NULL;
  3691. }
  3692. static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
  3693. __acquires(RCU)
  3694. {
  3695. rcu_read_lock();
  3696. return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
  3697. }
  3698. static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  3699. {
  3700. struct packet_type *pt;
  3701. struct list_head *nxt;
  3702. int hash;
  3703. ++*pos;
  3704. if (v == SEQ_START_TOKEN)
  3705. return ptype_get_idx(0);
  3706. pt = v;
  3707. nxt = pt->list.next;
  3708. if (pt->type == htons(ETH_P_ALL)) {
  3709. if (nxt != &ptype_all)
  3710. goto found;
  3711. hash = 0;
  3712. nxt = ptype_base[0].next;
  3713. } else
  3714. hash = ntohs(pt->type) & PTYPE_HASH_MASK;
  3715. while (nxt == &ptype_base[hash]) {
  3716. if (++hash >= PTYPE_HASH_SIZE)
  3717. return NULL;
  3718. nxt = ptype_base[hash].next;
  3719. }
  3720. found:
  3721. return list_entry(nxt, struct packet_type, list);
  3722. }
  3723. static void ptype_seq_stop(struct seq_file *seq, void *v)
  3724. __releases(RCU)
  3725. {
  3726. rcu_read_unlock();
  3727. }
  3728. static int ptype_seq_show(struct seq_file *seq, void *v)
  3729. {
  3730. struct packet_type *pt = v;
  3731. if (v == SEQ_START_TOKEN)
  3732. seq_puts(seq, "Type Device Function\n");
  3733. else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
  3734. if (pt->type == htons(ETH_P_ALL))
  3735. seq_puts(seq, "ALL ");
  3736. else
  3737. seq_printf(seq, "%04x", ntohs(pt->type));
  3738. seq_printf(seq, " %-8s %pF\n",
  3739. pt->dev ? pt->dev->name : "", pt->func);
  3740. }
  3741. return 0;
  3742. }
  3743. static const struct seq_operations ptype_seq_ops = {
  3744. .start = ptype_seq_start,
  3745. .next = ptype_seq_next,
  3746. .stop = ptype_seq_stop,
  3747. .show = ptype_seq_show,
  3748. };
  3749. static int ptype_seq_open(struct inode *inode, struct file *file)
  3750. {
  3751. return seq_open_net(inode, file, &ptype_seq_ops,
  3752. sizeof(struct seq_net_private));
  3753. }
  3754. static const struct file_operations ptype_seq_fops = {
  3755. .owner = THIS_MODULE,
  3756. .open = ptype_seq_open,
  3757. .read = seq_read,
  3758. .llseek = seq_lseek,
  3759. .release = seq_release_net,
  3760. };
  3761. static int __net_init dev_proc_net_init(struct net *net)
  3762. {
  3763. int rc = -ENOMEM;
  3764. if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
  3765. goto out;
  3766. if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
  3767. goto out_dev;
  3768. if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
  3769. goto out_softnet;
  3770. if (wext_proc_init(net))
  3771. goto out_ptype;
  3772. rc = 0;
  3773. out:
  3774. return rc;
  3775. out_ptype:
  3776. proc_net_remove(net, "ptype");
  3777. out_softnet:
  3778. proc_net_remove(net, "softnet_stat");
  3779. out_dev:
  3780. proc_net_remove(net, "dev");
  3781. goto out;
  3782. }
  3783. static void __net_exit dev_proc_net_exit(struct net *net)
  3784. {
  3785. wext_proc_exit(net);
  3786. proc_net_remove(net, "ptype");
  3787. proc_net_remove(net, "softnet_stat");
  3788. proc_net_remove(net, "dev");
  3789. }
  3790. static struct pernet_operations __net_initdata dev_proc_ops = {
  3791. .init = dev_proc_net_init,
  3792. .exit = dev_proc_net_exit,
  3793. };
  3794. static int __init dev_proc_init(void)
  3795. {
  3796. return register_pernet_subsys(&dev_proc_ops);
  3797. }
  3798. #else
  3799. #define dev_proc_init() 0
  3800. #endif /* CONFIG_PROC_FS */
  3801. /**
  3802. * netdev_set_master - set up master pointer
  3803. * @slave: slave device
  3804. * @master: new master device
  3805. *
  3806. * Changes the master device of the slave. Pass %NULL to break the
  3807. * bonding. The caller must hold the RTNL semaphore. On a failure
  3808. * a negative errno code is returned. On success the reference counts
  3809. * are adjusted and the function returns zero.
  3810. */
  3811. int netdev_set_master(struct net_device *slave, struct net_device *master)
  3812. {
  3813. struct net_device *old = slave->master;
  3814. ASSERT_RTNL();
  3815. if (master) {
  3816. if (old)
  3817. return -EBUSY;
  3818. dev_hold(master);
  3819. }
  3820. slave->master = master;
  3821. if (old)
  3822. dev_put(old);
  3823. return 0;
  3824. }
  3825. EXPORT_SYMBOL(netdev_set_master);
  3826. /**
  3827. * netdev_set_bond_master - set up bonding master/slave pair
  3828. * @slave: slave device
  3829. * @master: new master device
  3830. *
  3831. * Changes the master device of the slave. Pass %NULL to break the
  3832. * bonding. The caller must hold the RTNL semaphore. On a failure
  3833. * a negative errno code is returned. On success %RTM_NEWLINK is sent
  3834. * to the routing socket and the function returns zero.
  3835. */
  3836. int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
  3837. {
  3838. int err;
  3839. ASSERT_RTNL();
  3840. err = netdev_set_master(slave, master);
  3841. if (err)
  3842. return err;
  3843. if (master)
  3844. slave->flags |= IFF_SLAVE;
  3845. else
  3846. slave->flags &= ~IFF_SLAVE;
  3847. rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
  3848. return 0;
  3849. }
  3850. EXPORT_SYMBOL(netdev_set_bond_master);
  3851. static void dev_change_rx_flags(struct net_device *dev, int flags)
  3852. {
  3853. const struct net_device_ops *ops = dev->netdev_ops;
  3854. if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
  3855. ops->ndo_change_rx_flags(dev, flags);
  3856. }
  3857. static int __dev_set_promiscuity(struct net_device *dev, int inc)
  3858. {
  3859. unsigned short old_flags = dev->flags;
  3860. uid_t uid;
  3861. gid_t gid;
  3862. ASSERT_RTNL();
  3863. dev->flags |= IFF_PROMISC;
  3864. dev->promiscuity += inc;
  3865. if (dev->promiscuity == 0) {
  3866. /*
  3867. * Avoid overflow.
  3868. * If inc causes overflow, untouch promisc and return error.
  3869. */
  3870. if (inc < 0)
  3871. dev->flags &= ~IFF_PROMISC;
  3872. else {
  3873. dev->promiscuity -= inc;
  3874. printk(KERN_WARNING "%s: promiscuity touches roof, "
  3875. "set promiscuity failed, promiscuity feature "
  3876. "of device might be broken.\n", dev->name);
  3877. return -EOVERFLOW;
  3878. }
  3879. }
  3880. if (dev->flags != old_flags) {
  3881. printk(KERN_INFO "device %s %s promiscuous mode\n",
  3882. dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
  3883. "left");
  3884. if (audit_enabled) {
  3885. current_uid_gid(&uid, &gid);
  3886. audit_log(current->audit_context, GFP_ATOMIC,
  3887. AUDIT_ANOM_PROMISCUOUS,
  3888. "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
  3889. dev->name, (dev->flags & IFF_PROMISC),
  3890. (old_flags & IFF_PROMISC),
  3891. audit_get_loginuid(current),
  3892. uid, gid,
  3893. audit_get_sessionid(current));
  3894. }
  3895. dev_change_rx_flags(dev, IFF_PROMISC);
  3896. }
  3897. return 0;
  3898. }
  3899. /**
  3900. * dev_set_promiscuity - update promiscuity count on a device
  3901. * @dev: device
  3902. * @inc: modifier
  3903. *
  3904. * Add or remove promiscuity from a device. While the count in the device
  3905. * remains above zero the interface remains promiscuous. Once it hits zero
  3906. * the device reverts back to normal filtering operation. A negative inc
  3907. * value is used to drop promiscuity on the device.
  3908. * Return 0 if successful or a negative errno code on error.
  3909. */
  3910. int dev_set_promiscuity(struct net_device *dev, int inc)
  3911. {
  3912. unsigned short old_flags = dev->flags;
  3913. int err;
  3914. err = __dev_set_promiscuity(dev, inc);
  3915. if (err < 0)
  3916. return err;
  3917. if (dev->flags != old_flags)
  3918. dev_set_rx_mode(dev);
  3919. return err;
  3920. }
  3921. EXPORT_SYMBOL(dev_set_promiscuity);
  3922. /**
  3923. * dev_set_allmulti - update allmulti count on a device
  3924. * @dev: device
  3925. * @inc: modifier
  3926. *
  3927. * Add or remove reception of all multicast frames to a device. While the
  3928. * count in the device remains above zero the interface remains listening
  3929. * to all interfaces. Once it hits zero the device reverts back to normal
  3930. * filtering operation. A negative @inc value is used to drop the counter
  3931. * when releasing a resource needing all multicasts.
  3932. * Return 0 if successful or a negative errno code on error.
  3933. */
  3934. int dev_set_allmulti(struct net_device *dev, int inc)
  3935. {
  3936. unsigned short old_flags = dev->flags;
  3937. ASSERT_RTNL();
  3938. dev->flags |= IFF_ALLMULTI;
  3939. dev->allmulti += inc;
  3940. if (dev->allmulti == 0) {
  3941. /*
  3942. * Avoid overflow.
  3943. * If inc causes overflow, untouch allmulti and return error.
  3944. */
  3945. if (inc < 0)
  3946. dev->flags &= ~IFF_ALLMULTI;
  3947. else {
  3948. dev->allmulti -= inc;
  3949. printk(KERN_WARNING "%s: allmulti touches roof, "
  3950. "set allmulti failed, allmulti feature of "
  3951. "device might be broken.\n", dev->name);
  3952. return -EOVERFLOW;
  3953. }
  3954. }
  3955. if (dev->flags ^ old_flags) {
  3956. dev_change_rx_flags(dev, IFF_ALLMULTI);
  3957. dev_set_rx_mode(dev);
  3958. }
  3959. return 0;
  3960. }
  3961. EXPORT_SYMBOL(dev_set_allmulti);
  3962. /*
  3963. * Upload unicast and multicast address lists to device and
  3964. * configure RX filtering. When the device doesn't support unicast
  3965. * filtering it is put in promiscuous mode while unicast addresses
  3966. * are present.
  3967. */
  3968. void __dev_set_rx_mode(struct net_device *dev)
  3969. {
  3970. const struct net_device_ops *ops = dev->netdev_ops;
  3971. /* dev_open will call this function so the list will stay sane. */
  3972. if (!(dev->flags&IFF_UP))
  3973. return;
  3974. if (!netif_device_present(dev))
  3975. return;
  3976. if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
  3977. /* Unicast addresses changes may only happen under the rtnl,
  3978. * therefore calling __dev_set_promiscuity here is safe.
  3979. */
  3980. if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
  3981. __dev_set_promiscuity(dev, 1);
  3982. dev->uc_promisc = true;
  3983. } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
  3984. __dev_set_promiscuity(dev, -1);
  3985. dev->uc_promisc = false;
  3986. }
  3987. }
  3988. if (ops->ndo_set_rx_mode)
  3989. ops->ndo_set_rx_mode(dev);
  3990. }
  3991. void dev_set_rx_mode(struct net_device *dev)
  3992. {
  3993. netif_addr_lock_bh(dev);
  3994. __dev_set_rx_mode(dev);
  3995. netif_addr_unlock_bh(dev);
  3996. }
  3997. /**
  3998. * dev_get_flags - get flags reported to userspace
  3999. * @dev: device
  4000. *
  4001. * Get the combination of flag bits exported through APIs to userspace.
  4002. */
  4003. unsigned dev_get_flags(const struct net_device *dev)
  4004. {
  4005. unsigned flags;
  4006. flags = (dev->flags & ~(IFF_PROMISC |
  4007. IFF_ALLMULTI |
  4008. IFF_RUNNING |
  4009. IFF_LOWER_UP |
  4010. IFF_DORMANT)) |
  4011. (dev->gflags & (IFF_PROMISC |
  4012. IFF_ALLMULTI));
  4013. if (netif_running(dev)) {
  4014. if (netif_oper_up(dev))
  4015. flags |= IFF_RUNNING;
  4016. if (netif_carrier_ok(dev))
  4017. flags |= IFF_LOWER_UP;
  4018. if (netif_dormant(dev))
  4019. flags |= IFF_DORMANT;
  4020. }
  4021. return flags;
  4022. }
  4023. EXPORT_SYMBOL(dev_get_flags);
  4024. int __dev_change_flags(struct net_device *dev, unsigned int flags)
  4025. {
  4026. int old_flags = dev->flags;
  4027. int ret;
  4028. ASSERT_RTNL();
  4029. /*
  4030. * Set the flags on our device.
  4031. */
  4032. dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
  4033. IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
  4034. IFF_AUTOMEDIA)) |
  4035. (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
  4036. IFF_ALLMULTI));
  4037. /*
  4038. * Load in the correct multicast list now the flags have changed.
  4039. */
  4040. if ((old_flags ^ flags) & IFF_MULTICAST)
  4041. dev_change_rx_flags(dev, IFF_MULTICAST);
  4042. dev_set_rx_mode(dev);
  4043. /*
  4044. * Have we downed the interface. We handle IFF_UP ourselves
  4045. * according to user attempts to set it, rather than blindly
  4046. * setting it.
  4047. */
  4048. ret = 0;
  4049. if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
  4050. ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
  4051. if (!ret)
  4052. dev_set_rx_mode(dev);
  4053. }
  4054. if ((flags ^ dev->gflags) & IFF_PROMISC) {
  4055. int inc = (flags & IFF_PROMISC) ? 1 : -1;
  4056. dev->gflags ^= IFF_PROMISC;
  4057. dev_set_promiscuity(dev, inc);
  4058. }
  4059. /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
  4060. is important. Some (broken) drivers set IFF_PROMISC, when
  4061. IFF_ALLMULTI is requested not asking us and not reporting.
  4062. */
  4063. if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
  4064. int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
  4065. dev->gflags ^= IFF_ALLMULTI;
  4066. dev_set_allmulti(dev, inc);
  4067. }
  4068. return ret;
  4069. }
  4070. void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
  4071. {
  4072. unsigned int changes = dev->flags ^ old_flags;
  4073. if (changes & IFF_UP) {
  4074. if (dev->flags & IFF_UP)
  4075. call_netdevice_notifiers(NETDEV_UP, dev);
  4076. else
  4077. call_netdevice_notifiers(NETDEV_DOWN, dev);
  4078. }
  4079. if (dev->flags & IFF_UP &&
  4080. (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
  4081. call_netdevice_notifiers(NETDEV_CHANGE, dev);
  4082. }
  4083. /**
  4084. * dev_change_flags - change device settings
  4085. * @dev: device
  4086. * @flags: device state flags
  4087. *
  4088. * Change settings on device based state flags. The flags are
  4089. * in the userspace exported format.
  4090. */
  4091. int dev_change_flags(struct net_device *dev, unsigned flags)
  4092. {
  4093. int ret, changes;
  4094. int old_flags = dev->flags;
  4095. ret = __dev_change_flags(dev, flags);
  4096. if (ret < 0)
  4097. return ret;
  4098. changes = old_flags ^ dev->flags;
  4099. if (changes)
  4100. rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
  4101. __dev_notify_flags(dev, old_flags);
  4102. return ret;
  4103. }
  4104. EXPORT_SYMBOL(dev_change_flags);
  4105. /**
  4106. * dev_set_mtu - Change maximum transfer unit
  4107. * @dev: device
  4108. * @new_mtu: new transfer unit
  4109. *
  4110. * Change the maximum transfer size of the network device.
  4111. */
  4112. int dev_set_mtu(struct net_device *dev, int new_mtu)
  4113. {
  4114. const struct net_device_ops *ops = dev->netdev_ops;
  4115. int err;
  4116. if (new_mtu == dev->mtu)
  4117. return 0;
  4118. /* MTU must be positive. */
  4119. if (new_mtu < 0)
  4120. return -EINVAL;
  4121. if (!netif_device_present(dev))
  4122. return -ENODEV;
  4123. err = 0;
  4124. if (ops->ndo_change_mtu)
  4125. err = ops->ndo_change_mtu(dev, new_mtu);
  4126. else
  4127. dev->mtu = new_mtu;
  4128. if (!err && dev->flags & IFF_UP)
  4129. call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
  4130. return err;
  4131. }
  4132. EXPORT_SYMBOL(dev_set_mtu);
  4133. /**
  4134. * dev_set_group - Change group this device belongs to
  4135. * @dev: device
  4136. * @new_group: group this device should belong to
  4137. */
  4138. void dev_set_group(struct net_device *dev, int new_group)
  4139. {
  4140. dev->group = new_group;
  4141. }
  4142. EXPORT_SYMBOL(dev_set_group);
  4143. /**
  4144. * dev_set_mac_address - Change Media Access Control Address
  4145. * @dev: device
  4146. * @sa: new address
  4147. *
  4148. * Change the hardware (MAC) address of the device
  4149. */
  4150. int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
  4151. {
  4152. const struct net_device_ops *ops = dev->netdev_ops;
  4153. int err;
  4154. if (!ops->ndo_set_mac_address)
  4155. return -EOPNOTSUPP;
  4156. if (sa->sa_family != dev->type)
  4157. return -EINVAL;
  4158. if (!netif_device_present(dev))
  4159. return -ENODEV;
  4160. err = ops->ndo_set_mac_address(dev, sa);
  4161. if (!err)
  4162. call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
  4163. return err;
  4164. }
  4165. EXPORT_SYMBOL(dev_set_mac_address);
  4166. /*
  4167. * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
  4168. */
  4169. static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
  4170. {
  4171. int err;
  4172. struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
  4173. if (!dev)
  4174. return -ENODEV;
  4175. switch (cmd) {
  4176. case SIOCGIFFLAGS: /* Get interface flags */
  4177. ifr->ifr_flags = (short) dev_get_flags(dev);
  4178. return 0;
  4179. case SIOCGIFMETRIC: /* Get the metric on the interface
  4180. (currently unused) */
  4181. ifr->ifr_metric = 0;
  4182. return 0;
  4183. case SIOCGIFMTU: /* Get the MTU of a device */
  4184. ifr->ifr_mtu = dev->mtu;
  4185. return 0;
  4186. case SIOCGIFHWADDR:
  4187. if (!dev->addr_len)
  4188. memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
  4189. else
  4190. memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
  4191. min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
  4192. ifr->ifr_hwaddr.sa_family = dev->type;
  4193. return 0;
  4194. case SIOCGIFSLAVE:
  4195. err = -EINVAL;
  4196. break;
  4197. case SIOCGIFMAP:
  4198. ifr->ifr_map.mem_start = dev->mem_start;
  4199. ifr->ifr_map.mem_end = dev->mem_end;
  4200. ifr->ifr_map.base_addr = dev->base_addr;
  4201. ifr->ifr_map.irq = dev->irq;
  4202. ifr->ifr_map.dma = dev->dma;
  4203. ifr->ifr_map.port = dev->if_port;
  4204. return 0;
  4205. case SIOCGIFINDEX:
  4206. ifr->ifr_ifindex = dev->ifindex;
  4207. return 0;
  4208. case SIOCGIFTXQLEN:
  4209. ifr->ifr_qlen = dev->tx_queue_len;
  4210. return 0;
  4211. default:
  4212. /* dev_ioctl() should ensure this case
  4213. * is never reached
  4214. */
  4215. WARN_ON(1);
  4216. err = -ENOTTY;
  4217. break;
  4218. }
  4219. return err;
  4220. }
  4221. /*
  4222. * Perform the SIOCxIFxxx calls, inside rtnl_lock()
  4223. */
  4224. static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
  4225. {
  4226. int err;
  4227. struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
  4228. const struct net_device_ops *ops;
  4229. if (!dev)
  4230. return -ENODEV;
  4231. ops = dev->netdev_ops;
  4232. switch (cmd) {
  4233. case SIOCSIFFLAGS: /* Set interface flags */
  4234. return dev_change_flags(dev, ifr->ifr_flags);
  4235. case SIOCSIFMETRIC: /* Set the metric on the interface
  4236. (currently unused) */
  4237. return -EOPNOTSUPP;
  4238. case SIOCSIFMTU: /* Set the MTU of a device */
  4239. return dev_set_mtu(dev, ifr->ifr_mtu);
  4240. case SIOCSIFHWADDR:
  4241. return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
  4242. case SIOCSIFHWBROADCAST:
  4243. if (ifr->ifr_hwaddr.sa_family != dev->type)
  4244. return -EINVAL;
  4245. memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
  4246. min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
  4247. call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
  4248. return 0;
  4249. case SIOCSIFMAP:
  4250. if (ops->ndo_set_config) {
  4251. if (!netif_device_present(dev))
  4252. return -ENODEV;
  4253. return ops->ndo_set_config(dev, &ifr->ifr_map);
  4254. }
  4255. return -EOPNOTSUPP;
  4256. case SIOCADDMULTI:
  4257. if (!ops->ndo_set_rx_mode ||
  4258. ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
  4259. return -EINVAL;
  4260. if (!netif_device_present(dev))
  4261. return -ENODEV;
  4262. return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
  4263. case SIOCDELMULTI:
  4264. if (!ops->ndo_set_rx_mode ||
  4265. ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
  4266. return -EINVAL;
  4267. if (!netif_device_present(dev))
  4268. return -ENODEV;
  4269. return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
  4270. case SIOCSIFTXQLEN:
  4271. if (ifr->ifr_qlen < 0)
  4272. return -EINVAL;
  4273. dev->tx_queue_len = ifr->ifr_qlen;
  4274. return 0;
  4275. case SIOCSIFNAME:
  4276. ifr->ifr_newname[IFNAMSIZ-1] = '\0';
  4277. return dev_change_name(dev, ifr->ifr_newname);
  4278. case SIOCSHWTSTAMP:
  4279. err = net_hwtstamp_validate(ifr);
  4280. if (err)
  4281. return err;
  4282. /* fall through */
  4283. /*
  4284. * Unknown or private ioctl
  4285. */
  4286. default:
  4287. if ((cmd >= SIOCDEVPRIVATE &&
  4288. cmd <= SIOCDEVPRIVATE + 15) ||
  4289. cmd == SIOCBONDENSLAVE ||
  4290. cmd == SIOCBONDRELEASE ||
  4291. cmd == SIOCBONDSETHWADDR ||
  4292. cmd == SIOCBONDSLAVEINFOQUERY ||
  4293. cmd == SIOCBONDINFOQUERY ||
  4294. cmd == SIOCBONDCHANGEACTIVE ||
  4295. cmd == SIOCGMIIPHY ||
  4296. cmd == SIOCGMIIREG ||
  4297. cmd == SIOCSMIIREG ||
  4298. cmd == SIOCBRADDIF ||
  4299. cmd == SIOCBRDELIF ||
  4300. cmd == SIOCSHWTSTAMP ||
  4301. cmd == SIOCWANDEV) {
  4302. err = -EOPNOTSUPP;
  4303. if (ops->ndo_do_ioctl) {
  4304. if (netif_device_present(dev))
  4305. err = ops->ndo_do_ioctl(dev, ifr, cmd);
  4306. else
  4307. err = -ENODEV;
  4308. }
  4309. } else
  4310. err = -EINVAL;
  4311. }
  4312. return err;
  4313. }
  4314. /*
  4315. * This function handles all "interface"-type I/O control requests. The actual
  4316. * 'doing' part of this is dev_ifsioc above.
  4317. */
  4318. /**
  4319. * dev_ioctl - network device ioctl
  4320. * @net: the applicable net namespace
  4321. * @cmd: command to issue
  4322. * @arg: pointer to a struct ifreq in user space
  4323. *
  4324. * Issue ioctl functions to devices. This is normally called by the
  4325. * user space syscall interfaces but can sometimes be useful for
  4326. * other purposes. The return value is the return from the syscall if
  4327. * positive or a negative errno code on error.
  4328. */
  4329. int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
  4330. {
  4331. struct ifreq ifr;
  4332. int ret;
  4333. char *colon;
  4334. /* One special case: SIOCGIFCONF takes ifconf argument
  4335. and requires shared lock, because it sleeps writing
  4336. to user space.
  4337. */
  4338. if (cmd == SIOCGIFCONF) {
  4339. rtnl_lock();
  4340. ret = dev_ifconf(net, (char __user *) arg);
  4341. rtnl_unlock();
  4342. return ret;
  4343. }
  4344. if (cmd == SIOCGIFNAME)
  4345. return dev_ifname(net, (struct ifreq __user *)arg);
  4346. if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
  4347. return -EFAULT;
  4348. ifr.ifr_name[IFNAMSIZ-1] = 0;
  4349. colon = strchr(ifr.ifr_name, ':');
  4350. if (colon)
  4351. *colon = 0;
  4352. /*
  4353. * See which interface the caller is talking about.
  4354. */
  4355. switch (cmd) {
  4356. /*
  4357. * These ioctl calls:
  4358. * - can be done by all.
  4359. * - atomic and do not require locking.
  4360. * - return a value
  4361. */
  4362. case SIOCGIFFLAGS:
  4363. case SIOCGIFMETRIC:
  4364. case SIOCGIFMTU:
  4365. case SIOCGIFHWADDR:
  4366. case SIOCGIFSLAVE:
  4367. case SIOCGIFMAP:
  4368. case SIOCGIFINDEX:
  4369. case SIOCGIFTXQLEN:
  4370. dev_load(net, ifr.ifr_name);
  4371. rcu_read_lock();
  4372. ret = dev_ifsioc_locked(net, &ifr, cmd);
  4373. rcu_read_unlock();
  4374. if (!ret) {
  4375. if (colon)
  4376. *colon = ':';
  4377. if (copy_to_user(arg, &ifr,
  4378. sizeof(struct ifreq)))
  4379. ret = -EFAULT;
  4380. }
  4381. return ret;
  4382. case SIOCETHTOOL:
  4383. dev_load(net, ifr.ifr_name);
  4384. rtnl_lock();
  4385. ret = dev_ethtool(net, &ifr);
  4386. rtnl_unlock();
  4387. if (!ret) {
  4388. if (colon)
  4389. *colon = ':';
  4390. if (copy_to_user(arg, &ifr,
  4391. sizeof(struct ifreq)))
  4392. ret = -EFAULT;
  4393. }
  4394. return ret;
  4395. /*
  4396. * These ioctl calls:
  4397. * - require superuser power.
  4398. * - require strict serialization.
  4399. * - return a value
  4400. */
  4401. case SIOCGMIIPHY:
  4402. case SIOCGMIIREG:
  4403. case SIOCSIFNAME:
  4404. if (!capable(CAP_NET_ADMIN))
  4405. return -EPERM;
  4406. dev_load(net, ifr.ifr_name);
  4407. rtnl_lock();
  4408. ret = dev_ifsioc(net, &ifr, cmd);
  4409. rtnl_unlock();
  4410. if (!ret) {
  4411. if (colon)
  4412. *colon = ':';
  4413. if (copy_to_user(arg, &ifr,
  4414. sizeof(struct ifreq)))
  4415. ret = -EFAULT;
  4416. }
  4417. return ret;
  4418. /*
  4419. * These ioctl calls:
  4420. * - require superuser power.
  4421. * - require strict serialization.
  4422. * - do not return a value
  4423. */
  4424. case SIOCSIFFLAGS:
  4425. case SIOCSIFMETRIC:
  4426. case SIOCSIFMTU:
  4427. case SIOCSIFMAP:
  4428. case SIOCSIFHWADDR:
  4429. case SIOCSIFSLAVE:
  4430. case SIOCADDMULTI:
  4431. case SIOCDELMULTI:
  4432. case SIOCSIFHWBROADCAST:
  4433. case SIOCSIFTXQLEN:
  4434. case SIOCSMIIREG:
  4435. case SIOCBONDENSLAVE:
  4436. case SIOCBONDRELEASE:
  4437. case SIOCBONDSETHWADDR:
  4438. case SIOCBONDCHANGEACTIVE:
  4439. case SIOCBRADDIF:
  4440. case SIOCBRDELIF:
  4441. case SIOCSHWTSTAMP:
  4442. if (!capable(CAP_NET_ADMIN))
  4443. return -EPERM;
  4444. /* fall through */
  4445. case SIOCBONDSLAVEINFOQUERY:
  4446. case SIOCBONDINFOQUERY:
  4447. dev_load(net, ifr.ifr_name);
  4448. rtnl_lock();
  4449. ret = dev_ifsioc(net, &ifr, cmd);
  4450. rtnl_unlock();
  4451. return ret;
  4452. case SIOCGIFMEM:
  4453. /* Get the per device memory space. We can add this but
  4454. * currently do not support it */
  4455. case SIOCSIFMEM:
  4456. /* Set the per device memory buffer space.
  4457. * Not applicable in our case */
  4458. case SIOCSIFLINK:
  4459. return -ENOTTY;
  4460. /*
  4461. * Unknown or private ioctl.
  4462. */
  4463. default:
  4464. if (cmd == SIOCWANDEV ||
  4465. (cmd >= SIOCDEVPRIVATE &&
  4466. cmd <= SIOCDEVPRIVATE + 15)) {
  4467. dev_load(net, ifr.ifr_name);
  4468. rtnl_lock();
  4469. ret = dev_ifsioc(net, &ifr, cmd);
  4470. rtnl_unlock();
  4471. if (!ret && copy_to_user(arg, &ifr,
  4472. sizeof(struct ifreq)))
  4473. ret = -EFAULT;
  4474. return ret;
  4475. }
  4476. /* Take care of Wireless Extensions */
  4477. if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
  4478. return wext_handle_ioctl(net, &ifr, cmd, arg);
  4479. return -ENOTTY;
  4480. }
  4481. }
  4482. /**
  4483. * dev_new_index - allocate an ifindex
  4484. * @net: the applicable net namespace
  4485. *
  4486. * Returns a suitable unique value for a new device interface
  4487. * number. The caller must hold the rtnl semaphore or the
  4488. * dev_base_lock to be sure it remains unique.
  4489. */
  4490. static int dev_new_index(struct net *net)
  4491. {
  4492. static int ifindex;
  4493. for (;;) {
  4494. if (++ifindex <= 0)
  4495. ifindex = 1;
  4496. if (!__dev_get_by_index(net, ifindex))
  4497. return ifindex;
  4498. }
  4499. }
  4500. /* Delayed registration/unregisteration */
  4501. static LIST_HEAD(net_todo_list);
  4502. static void net_set_todo(struct net_device *dev)
  4503. {
  4504. list_add_tail(&dev->todo_list, &net_todo_list);
  4505. }
  4506. static void rollback_registered_many(struct list_head *head)
  4507. {
  4508. struct net_device *dev, *tmp;
  4509. BUG_ON(dev_boot_phase);
  4510. ASSERT_RTNL();
  4511. list_for_each_entry_safe(dev, tmp, head, unreg_list) {
  4512. /* Some devices call without registering
  4513. * for initialization unwind. Remove those
  4514. * devices and proceed with the remaining.
  4515. */
  4516. if (dev->reg_state == NETREG_UNINITIALIZED) {
  4517. pr_debug("unregister_netdevice: device %s/%p never "
  4518. "was registered\n", dev->name, dev);
  4519. WARN_ON(1);
  4520. list_del(&dev->unreg_list);
  4521. continue;
  4522. }
  4523. dev->dismantle = true;
  4524. BUG_ON(dev->reg_state != NETREG_REGISTERED);
  4525. }
  4526. /* If device is running, close it first. */
  4527. dev_close_many(head);
  4528. list_for_each_entry(dev, head, unreg_list) {
  4529. /* And unlink it from device chain. */
  4530. unlist_netdevice(dev);
  4531. dev->reg_state = NETREG_UNREGISTERING;
  4532. }
  4533. synchronize_net();
  4534. list_for_each_entry(dev, head, unreg_list) {
  4535. /* Shutdown queueing discipline. */
  4536. dev_shutdown(dev);
  4537. /* Notify protocols, that we are about to destroy
  4538. this device. They should clean all the things.
  4539. */
  4540. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  4541. if (!dev->rtnl_link_ops ||
  4542. dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
  4543. rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
  4544. /*
  4545. * Flush the unicast and multicast chains
  4546. */
  4547. dev_uc_flush(dev);
  4548. dev_mc_flush(dev);
  4549. if (dev->netdev_ops->ndo_uninit)
  4550. dev->netdev_ops->ndo_uninit(dev);
  4551. /* Notifier chain MUST detach us from master device. */
  4552. WARN_ON(dev->master);
  4553. /* Remove entries from kobject tree */
  4554. netdev_unregister_kobject(dev);
  4555. }
  4556. /* Process any work delayed until the end of the batch */
  4557. dev = list_first_entry(head, struct net_device, unreg_list);
  4558. call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
  4559. synchronize_net();
  4560. list_for_each_entry(dev, head, unreg_list)
  4561. dev_put(dev);
  4562. }
  4563. static void rollback_registered(struct net_device *dev)
  4564. {
  4565. LIST_HEAD(single);
  4566. list_add(&dev->unreg_list, &single);
  4567. rollback_registered_many(&single);
  4568. list_del(&single);
  4569. }
  4570. static u32 netdev_fix_features(struct net_device *dev, u32 features)
  4571. {
  4572. /* Fix illegal checksum combinations */
  4573. if ((features & NETIF_F_HW_CSUM) &&
  4574. (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
  4575. netdev_warn(dev, "mixed HW and IP checksum settings.\n");
  4576. features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
  4577. }
  4578. if ((features & NETIF_F_NO_CSUM) &&
  4579. (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
  4580. netdev_warn(dev, "mixed no checksumming and other settings.\n");
  4581. features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
  4582. }
  4583. /* Fix illegal SG+CSUM combinations. */
  4584. if ((features & NETIF_F_SG) &&
  4585. !(features & NETIF_F_ALL_CSUM)) {
  4586. netdev_dbg(dev,
  4587. "Dropping NETIF_F_SG since no checksum feature.\n");
  4588. features &= ~NETIF_F_SG;
  4589. }
  4590. /* TSO requires that SG is present as well. */
  4591. if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
  4592. netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
  4593. features &= ~NETIF_F_ALL_TSO;
  4594. }
  4595. /* TSO ECN requires that TSO is present as well. */
  4596. if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
  4597. features &= ~NETIF_F_TSO_ECN;
  4598. /* Software GSO depends on SG. */
  4599. if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
  4600. netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
  4601. features &= ~NETIF_F_GSO;
  4602. }
  4603. /* UFO needs SG and checksumming */
  4604. if (features & NETIF_F_UFO) {
  4605. /* maybe split UFO into V4 and V6? */
  4606. if (!((features & NETIF_F_GEN_CSUM) ||
  4607. (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
  4608. == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
  4609. netdev_dbg(dev,
  4610. "Dropping NETIF_F_UFO since no checksum offload features.\n");
  4611. features &= ~NETIF_F_UFO;
  4612. }
  4613. if (!(features & NETIF_F_SG)) {
  4614. netdev_dbg(dev,
  4615. "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
  4616. features &= ~NETIF_F_UFO;
  4617. }
  4618. }
  4619. return features;
  4620. }
  4621. int __netdev_update_features(struct net_device *dev)
  4622. {
  4623. u32 features;
  4624. int err = 0;
  4625. ASSERT_RTNL();
  4626. features = netdev_get_wanted_features(dev);
  4627. if (dev->netdev_ops->ndo_fix_features)
  4628. features = dev->netdev_ops->ndo_fix_features(dev, features);
  4629. /* driver might be less strict about feature dependencies */
  4630. features = netdev_fix_features(dev, features);
  4631. if (dev->features == features)
  4632. return 0;
  4633. netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
  4634. dev->features, features);
  4635. if (dev->netdev_ops->ndo_set_features)
  4636. err = dev->netdev_ops->ndo_set_features(dev, features);
  4637. if (unlikely(err < 0)) {
  4638. netdev_err(dev,
  4639. "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
  4640. err, features, dev->features);
  4641. return -1;
  4642. }
  4643. if (!err)
  4644. dev->features = features;
  4645. return 1;
  4646. }
  4647. /**
  4648. * netdev_update_features - recalculate device features
  4649. * @dev: the device to check
  4650. *
  4651. * Recalculate dev->features set and send notifications if it
  4652. * has changed. Should be called after driver or hardware dependent
  4653. * conditions might have changed that influence the features.
  4654. */
  4655. void netdev_update_features(struct net_device *dev)
  4656. {
  4657. if (__netdev_update_features(dev))
  4658. netdev_features_change(dev);
  4659. }
  4660. EXPORT_SYMBOL(netdev_update_features);
  4661. /**
  4662. * netdev_change_features - recalculate device features
  4663. * @dev: the device to check
  4664. *
  4665. * Recalculate dev->features set and send notifications even
  4666. * if they have not changed. Should be called instead of
  4667. * netdev_update_features() if also dev->vlan_features might
  4668. * have changed to allow the changes to be propagated to stacked
  4669. * VLAN devices.
  4670. */
  4671. void netdev_change_features(struct net_device *dev)
  4672. {
  4673. __netdev_update_features(dev);
  4674. netdev_features_change(dev);
  4675. }
  4676. EXPORT_SYMBOL(netdev_change_features);
  4677. /**
  4678. * netif_stacked_transfer_operstate - transfer operstate
  4679. * @rootdev: the root or lower level device to transfer state from
  4680. * @dev: the device to transfer operstate to
  4681. *
  4682. * Transfer operational state from root to device. This is normally
  4683. * called when a stacking relationship exists between the root
  4684. * device and the device(a leaf device).
  4685. */
  4686. void netif_stacked_transfer_operstate(const struct net_device *rootdev,
  4687. struct net_device *dev)
  4688. {
  4689. if (rootdev->operstate == IF_OPER_DORMANT)
  4690. netif_dormant_on(dev);
  4691. else
  4692. netif_dormant_off(dev);
  4693. if (netif_carrier_ok(rootdev)) {
  4694. if (!netif_carrier_ok(dev))
  4695. netif_carrier_on(dev);
  4696. } else {
  4697. if (netif_carrier_ok(dev))
  4698. netif_carrier_off(dev);
  4699. }
  4700. }
  4701. EXPORT_SYMBOL(netif_stacked_transfer_operstate);
  4702. #ifdef CONFIG_RPS
  4703. static int netif_alloc_rx_queues(struct net_device *dev)
  4704. {
  4705. unsigned int i, count = dev->num_rx_queues;
  4706. struct netdev_rx_queue *rx;
  4707. BUG_ON(count < 1);
  4708. rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
  4709. if (!rx) {
  4710. pr_err("netdev: Unable to allocate %u rx queues.\n", count);
  4711. return -ENOMEM;
  4712. }
  4713. dev->_rx = rx;
  4714. for (i = 0; i < count; i++)
  4715. rx[i].dev = dev;
  4716. return 0;
  4717. }
  4718. #endif
  4719. static void netdev_init_one_queue(struct net_device *dev,
  4720. struct netdev_queue *queue, void *_unused)
  4721. {
  4722. /* Initialize queue lock */
  4723. spin_lock_init(&queue->_xmit_lock);
  4724. netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
  4725. queue->xmit_lock_owner = -1;
  4726. netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
  4727. queue->dev = dev;
  4728. }
  4729. static int netif_alloc_netdev_queues(struct net_device *dev)
  4730. {
  4731. unsigned int count = dev->num_tx_queues;
  4732. struct netdev_queue *tx;
  4733. BUG_ON(count < 1);
  4734. tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
  4735. if (!tx) {
  4736. pr_err("netdev: Unable to allocate %u tx queues.\n",
  4737. count);
  4738. return -ENOMEM;
  4739. }
  4740. dev->_tx = tx;
  4741. netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
  4742. spin_lock_init(&dev->tx_global_lock);
  4743. return 0;
  4744. }
  4745. /**
  4746. * register_netdevice - register a network device
  4747. * @dev: device to register
  4748. *
  4749. * Take a completed network device structure and add it to the kernel
  4750. * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
  4751. * chain. 0 is returned on success. A negative errno code is returned
  4752. * on a failure to set up the device, or if the name is a duplicate.
  4753. *
  4754. * Callers must hold the rtnl semaphore. You may want
  4755. * register_netdev() instead of this.
  4756. *
  4757. * BUGS:
  4758. * The locking appears insufficient to guarantee two parallel registers
  4759. * will not get the same name.
  4760. */
  4761. int register_netdevice(struct net_device *dev)
  4762. {
  4763. int ret;
  4764. struct net *net = dev_net(dev);
  4765. BUG_ON(dev_boot_phase);
  4766. ASSERT_RTNL();
  4767. might_sleep();
  4768. /* When net_device's are persistent, this will be fatal. */
  4769. BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
  4770. BUG_ON(!net);
  4771. spin_lock_init(&dev->addr_list_lock);
  4772. netdev_set_addr_lockdep_class(dev);
  4773. dev->iflink = -1;
  4774. ret = dev_get_valid_name(dev, dev->name);
  4775. if (ret < 0)
  4776. goto out;
  4777. /* Init, if this function is available */
  4778. if (dev->netdev_ops->ndo_init) {
  4779. ret = dev->netdev_ops->ndo_init(dev);
  4780. if (ret) {
  4781. if (ret > 0)
  4782. ret = -EIO;
  4783. goto out;
  4784. }
  4785. }
  4786. dev->ifindex = dev_new_index(net);
  4787. if (dev->iflink == -1)
  4788. dev->iflink = dev->ifindex;
  4789. /* Transfer changeable features to wanted_features and enable
  4790. * software offloads (GSO and GRO).
  4791. */
  4792. dev->hw_features |= NETIF_F_SOFT_FEATURES;
  4793. dev->features |= NETIF_F_SOFT_FEATURES;
  4794. dev->wanted_features = dev->features & dev->hw_features;
  4795. /* Turn on no cache copy if HW is doing checksum */
  4796. dev->hw_features |= NETIF_F_NOCACHE_COPY;
  4797. if ((dev->features & NETIF_F_ALL_CSUM) &&
  4798. !(dev->features & NETIF_F_NO_CSUM)) {
  4799. dev->wanted_features |= NETIF_F_NOCACHE_COPY;
  4800. dev->features |= NETIF_F_NOCACHE_COPY;
  4801. }
  4802. /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
  4803. */
  4804. dev->vlan_features |= NETIF_F_HIGHDMA;
  4805. ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
  4806. ret = notifier_to_errno(ret);
  4807. if (ret)
  4808. goto err_uninit;
  4809. ret = netdev_register_kobject(dev);
  4810. if (ret)
  4811. goto err_uninit;
  4812. dev->reg_state = NETREG_REGISTERED;
  4813. __netdev_update_features(dev);
  4814. /*
  4815. * Default initial state at registry is that the
  4816. * device is present.
  4817. */
  4818. set_bit(__LINK_STATE_PRESENT, &dev->state);
  4819. dev_init_scheduler(dev);
  4820. dev_hold(dev);
  4821. list_netdevice(dev);
  4822. /* Notify protocols, that a new device appeared. */
  4823. ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
  4824. ret = notifier_to_errno(ret);
  4825. if (ret) {
  4826. rollback_registered(dev);
  4827. dev->reg_state = NETREG_UNREGISTERED;
  4828. }
  4829. /*
  4830. * Prevent userspace races by waiting until the network
  4831. * device is fully setup before sending notifications.
  4832. */
  4833. if (!dev->rtnl_link_ops ||
  4834. dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
  4835. rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
  4836. out:
  4837. return ret;
  4838. err_uninit:
  4839. if (dev->netdev_ops->ndo_uninit)
  4840. dev->netdev_ops->ndo_uninit(dev);
  4841. goto out;
  4842. }
  4843. EXPORT_SYMBOL(register_netdevice);
  4844. /**
  4845. * init_dummy_netdev - init a dummy network device for NAPI
  4846. * @dev: device to init
  4847. *
  4848. * This takes a network device structure and initialize the minimum
  4849. * amount of fields so it can be used to schedule NAPI polls without
  4850. * registering a full blown interface. This is to be used by drivers
  4851. * that need to tie several hardware interfaces to a single NAPI
  4852. * poll scheduler due to HW limitations.
  4853. */
  4854. int init_dummy_netdev(struct net_device *dev)
  4855. {
  4856. /* Clear everything. Note we don't initialize spinlocks
  4857. * are they aren't supposed to be taken by any of the
  4858. * NAPI code and this dummy netdev is supposed to be
  4859. * only ever used for NAPI polls
  4860. */
  4861. memset(dev, 0, sizeof(struct net_device));
  4862. /* make sure we BUG if trying to hit standard
  4863. * register/unregister code path
  4864. */
  4865. dev->reg_state = NETREG_DUMMY;
  4866. /* NAPI wants this */
  4867. INIT_LIST_HEAD(&dev->napi_list);
  4868. /* a dummy interface is started by default */
  4869. set_bit(__LINK_STATE_PRESENT, &dev->state);
  4870. set_bit(__LINK_STATE_START, &dev->state);
  4871. /* Note : We dont allocate pcpu_refcnt for dummy devices,
  4872. * because users of this 'device' dont need to change
  4873. * its refcount.
  4874. */
  4875. return 0;
  4876. }
  4877. EXPORT_SYMBOL_GPL(init_dummy_netdev);
  4878. /**
  4879. * register_netdev - register a network device
  4880. * @dev: device to register
  4881. *
  4882. * Take a completed network device structure and add it to the kernel
  4883. * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
  4884. * chain. 0 is returned on success. A negative errno code is returned
  4885. * on a failure to set up the device, or if the name is a duplicate.
  4886. *
  4887. * This is a wrapper around register_netdevice that takes the rtnl semaphore
  4888. * and expands the device name if you passed a format string to
  4889. * alloc_netdev.
  4890. */
  4891. int register_netdev(struct net_device *dev)
  4892. {
  4893. int err;
  4894. rtnl_lock();
  4895. err = register_netdevice(dev);
  4896. rtnl_unlock();
  4897. return err;
  4898. }
  4899. EXPORT_SYMBOL(register_netdev);
  4900. int netdev_refcnt_read(const struct net_device *dev)
  4901. {
  4902. int i, refcnt = 0;
  4903. for_each_possible_cpu(i)
  4904. refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
  4905. return refcnt;
  4906. }
  4907. EXPORT_SYMBOL(netdev_refcnt_read);
  4908. /*
  4909. * netdev_wait_allrefs - wait until all references are gone.
  4910. *
  4911. * This is called when unregistering network devices.
  4912. *
  4913. * Any protocol or device that holds a reference should register
  4914. * for netdevice notification, and cleanup and put back the
  4915. * reference if they receive an UNREGISTER event.
  4916. * We can get stuck here if buggy protocols don't correctly
  4917. * call dev_put.
  4918. */
  4919. static void netdev_wait_allrefs(struct net_device *dev)
  4920. {
  4921. unsigned long rebroadcast_time, warning_time;
  4922. int refcnt;
  4923. linkwatch_forget_dev(dev);
  4924. rebroadcast_time = warning_time = jiffies;
  4925. refcnt = netdev_refcnt_read(dev);
  4926. while (refcnt != 0) {
  4927. if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
  4928. rtnl_lock();
  4929. /* Rebroadcast unregister notification */
  4930. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  4931. /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
  4932. * should have already handle it the first time */
  4933. if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
  4934. &dev->state)) {
  4935. /* We must not have linkwatch events
  4936. * pending on unregister. If this
  4937. * happens, we simply run the queue
  4938. * unscheduled, resulting in a noop
  4939. * for this device.
  4940. */
  4941. linkwatch_run_queue();
  4942. }
  4943. __rtnl_unlock();
  4944. rebroadcast_time = jiffies;
  4945. }
  4946. msleep(250);
  4947. refcnt = netdev_refcnt_read(dev);
  4948. if (time_after(jiffies, warning_time + 10 * HZ)) {
  4949. printk(KERN_EMERG "unregister_netdevice: "
  4950. "waiting for %s to become free. Usage "
  4951. "count = %d\n",
  4952. dev->name, refcnt);
  4953. warning_time = jiffies;
  4954. }
  4955. }
  4956. }
  4957. /* The sequence is:
  4958. *
  4959. * rtnl_lock();
  4960. * ...
  4961. * register_netdevice(x1);
  4962. * register_netdevice(x2);
  4963. * ...
  4964. * unregister_netdevice(y1);
  4965. * unregister_netdevice(y2);
  4966. * ...
  4967. * rtnl_unlock();
  4968. * free_netdev(y1);
  4969. * free_netdev(y2);
  4970. *
  4971. * We are invoked by rtnl_unlock().
  4972. * This allows us to deal with problems:
  4973. * 1) We can delete sysfs objects which invoke hotplug
  4974. * without deadlocking with linkwatch via keventd.
  4975. * 2) Since we run with the RTNL semaphore not held, we can sleep
  4976. * safely in order to wait for the netdev refcnt to drop to zero.
  4977. *
  4978. * We must not return until all unregister events added during
  4979. * the interval the lock was held have been completed.
  4980. */
  4981. void netdev_run_todo(void)
  4982. {
  4983. struct list_head list;
  4984. /* Snapshot list, allow later requests */
  4985. list_replace_init(&net_todo_list, &list);
  4986. __rtnl_unlock();
  4987. /* Wait for rcu callbacks to finish before attempting to drain
  4988. * the device list. This usually avoids a 250ms wait.
  4989. */
  4990. if (!list_empty(&list))
  4991. rcu_barrier();
  4992. while (!list_empty(&list)) {
  4993. struct net_device *dev
  4994. = list_first_entry(&list, struct net_device, todo_list);
  4995. list_del(&dev->todo_list);
  4996. if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
  4997. printk(KERN_ERR "network todo '%s' but state %d\n",
  4998. dev->name, dev->reg_state);
  4999. dump_stack();
  5000. continue;
  5001. }
  5002. dev->reg_state = NETREG_UNREGISTERED;
  5003. on_each_cpu(flush_backlog, dev, 1);
  5004. netdev_wait_allrefs(dev);
  5005. /* paranoia */
  5006. BUG_ON(netdev_refcnt_read(dev));
  5007. WARN_ON(rcu_access_pointer(dev->ip_ptr));
  5008. WARN_ON(rcu_access_pointer(dev->ip6_ptr));
  5009. WARN_ON(dev->dn_ptr);
  5010. if (dev->destructor)
  5011. dev->destructor(dev);
  5012. /* Free network device */
  5013. kobject_put(&dev->dev.kobj);
  5014. }
  5015. }
  5016. /* Convert net_device_stats to rtnl_link_stats64. They have the same
  5017. * fields in the same order, with only the type differing.
  5018. */
  5019. static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
  5020. const struct net_device_stats *netdev_stats)
  5021. {
  5022. #if BITS_PER_LONG == 64
  5023. BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
  5024. memcpy(stats64, netdev_stats, sizeof(*stats64));
  5025. #else
  5026. size_t i, n = sizeof(*stats64) / sizeof(u64);
  5027. const unsigned long *src = (const unsigned long *)netdev_stats;
  5028. u64 *dst = (u64 *)stats64;
  5029. BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
  5030. sizeof(*stats64) / sizeof(u64));
  5031. for (i = 0; i < n; i++)
  5032. dst[i] = src[i];
  5033. #endif
  5034. }
  5035. /**
  5036. * dev_get_stats - get network device statistics
  5037. * @dev: device to get statistics from
  5038. * @storage: place to store stats
  5039. *
  5040. * Get network statistics from device. Return @storage.
  5041. * The device driver may provide its own method by setting
  5042. * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
  5043. * otherwise the internal statistics structure is used.
  5044. */
  5045. struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
  5046. struct rtnl_link_stats64 *storage)
  5047. {
  5048. const struct net_device_ops *ops = dev->netdev_ops;
  5049. if (ops->ndo_get_stats64) {
  5050. memset(storage, 0, sizeof(*storage));
  5051. ops->ndo_get_stats64(dev, storage);
  5052. } else if (ops->ndo_get_stats) {
  5053. netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
  5054. } else {
  5055. netdev_stats_to_stats64(storage, &dev->stats);
  5056. }
  5057. storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
  5058. return storage;
  5059. }
  5060. EXPORT_SYMBOL(dev_get_stats);
  5061. struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
  5062. {
  5063. struct netdev_queue *queue = dev_ingress_queue(dev);
  5064. #ifdef CONFIG_NET_CLS_ACT
  5065. if (queue)
  5066. return queue;
  5067. queue = kzalloc(sizeof(*queue), GFP_KERNEL);
  5068. if (!queue)
  5069. return NULL;
  5070. netdev_init_one_queue(dev, queue, NULL);
  5071. queue->qdisc = &noop_qdisc;
  5072. queue->qdisc_sleeping = &noop_qdisc;
  5073. rcu_assign_pointer(dev->ingress_queue, queue);
  5074. #endif
  5075. return queue;
  5076. }
  5077. /**
  5078. * alloc_netdev_mqs - allocate network device
  5079. * @sizeof_priv: size of private data to allocate space for
  5080. * @name: device name format string
  5081. * @setup: callback to initialize device
  5082. * @txqs: the number of TX subqueues to allocate
  5083. * @rxqs: the number of RX subqueues to allocate
  5084. *
  5085. * Allocates a struct net_device with private data area for driver use
  5086. * and performs basic initialization. Also allocates subquue structs
  5087. * for each queue on the device.
  5088. */
  5089. struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
  5090. void (*setup)(struct net_device *),
  5091. unsigned int txqs, unsigned int rxqs)
  5092. {
  5093. struct net_device *dev;
  5094. size_t alloc_size;
  5095. struct net_device *p;
  5096. BUG_ON(strlen(name) >= sizeof(dev->name));
  5097. if (txqs < 1) {
  5098. pr_err("alloc_netdev: Unable to allocate device "
  5099. "with zero queues.\n");
  5100. return NULL;
  5101. }
  5102. #ifdef CONFIG_RPS
  5103. if (rxqs < 1) {
  5104. pr_err("alloc_netdev: Unable to allocate device "
  5105. "with zero RX queues.\n");
  5106. return NULL;
  5107. }
  5108. #endif
  5109. alloc_size = sizeof(struct net_device);
  5110. if (sizeof_priv) {
  5111. /* ensure 32-byte alignment of private area */
  5112. alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
  5113. alloc_size += sizeof_priv;
  5114. }
  5115. /* ensure 32-byte alignment of whole construct */
  5116. alloc_size += NETDEV_ALIGN - 1;
  5117. p = kzalloc(alloc_size, GFP_KERNEL);
  5118. if (!p) {
  5119. printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
  5120. return NULL;
  5121. }
  5122. dev = PTR_ALIGN(p, NETDEV_ALIGN);
  5123. dev->padded = (char *)dev - (char *)p;
  5124. dev->pcpu_refcnt = alloc_percpu(int);
  5125. if (!dev->pcpu_refcnt)
  5126. goto free_p;
  5127. if (dev_addr_init(dev))
  5128. goto free_pcpu;
  5129. dev_mc_init(dev);
  5130. dev_uc_init(dev);
  5131. dev_net_set(dev, &init_net);
  5132. dev->gso_max_size = GSO_MAX_SIZE;
  5133. INIT_LIST_HEAD(&dev->napi_list);
  5134. INIT_LIST_HEAD(&dev->unreg_list);
  5135. INIT_LIST_HEAD(&dev->link_watch_list);
  5136. dev->priv_flags = IFF_XMIT_DST_RELEASE;
  5137. setup(dev);
  5138. dev->num_tx_queues = txqs;
  5139. dev->real_num_tx_queues = txqs;
  5140. if (netif_alloc_netdev_queues(dev))
  5141. goto free_all;
  5142. #ifdef CONFIG_RPS
  5143. dev->num_rx_queues = rxqs;
  5144. dev->real_num_rx_queues = rxqs;
  5145. if (netif_alloc_rx_queues(dev))
  5146. goto free_all;
  5147. #endif
  5148. strcpy(dev->name, name);
  5149. dev->group = INIT_NETDEV_GROUP;
  5150. return dev;
  5151. free_all:
  5152. free_netdev(dev);
  5153. return NULL;
  5154. free_pcpu:
  5155. free_percpu(dev->pcpu_refcnt);
  5156. kfree(dev->_tx);
  5157. #ifdef CONFIG_RPS
  5158. kfree(dev->_rx);
  5159. #endif
  5160. free_p:
  5161. kfree(p);
  5162. return NULL;
  5163. }
  5164. EXPORT_SYMBOL(alloc_netdev_mqs);
  5165. /**
  5166. * free_netdev - free network device
  5167. * @dev: device
  5168. *
  5169. * This function does the last stage of destroying an allocated device
  5170. * interface. The reference to the device object is released.
  5171. * If this is the last reference then it will be freed.
  5172. */
  5173. void free_netdev(struct net_device *dev)
  5174. {
  5175. struct napi_struct *p, *n;
  5176. release_net(dev_net(dev));
  5177. kfree(dev->_tx);
  5178. #ifdef CONFIG_RPS
  5179. kfree(dev->_rx);
  5180. #endif
  5181. kfree(rcu_dereference_protected(dev->ingress_queue, 1));
  5182. /* Flush device addresses */
  5183. dev_addr_flush(dev);
  5184. list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
  5185. netif_napi_del(p);
  5186. free_percpu(dev->pcpu_refcnt);
  5187. dev->pcpu_refcnt = NULL;
  5188. /* Compatibility with error handling in drivers */
  5189. if (dev->reg_state == NETREG_UNINITIALIZED) {
  5190. kfree((char *)dev - dev->padded);
  5191. return;
  5192. }
  5193. BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
  5194. dev->reg_state = NETREG_RELEASED;
  5195. /* will free via device release */
  5196. put_device(&dev->dev);
  5197. }
  5198. EXPORT_SYMBOL(free_netdev);
  5199. /**
  5200. * synchronize_net - Synchronize with packet receive processing
  5201. *
  5202. * Wait for packets currently being received to be done.
  5203. * Does not block later packets from starting.
  5204. */
  5205. void synchronize_net(void)
  5206. {
  5207. might_sleep();
  5208. if (rtnl_is_locked())
  5209. synchronize_rcu_expedited();
  5210. else
  5211. synchronize_rcu();
  5212. }
  5213. EXPORT_SYMBOL(synchronize_net);
  5214. /**
  5215. * unregister_netdevice_queue - remove device from the kernel
  5216. * @dev: device
  5217. * @head: list
  5218. *
  5219. * This function shuts down a device interface and removes it
  5220. * from the kernel tables.
  5221. * If head not NULL, device is queued to be unregistered later.
  5222. *
  5223. * Callers must hold the rtnl semaphore. You may want
  5224. * unregister_netdev() instead of this.
  5225. */
  5226. void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
  5227. {
  5228. ASSERT_RTNL();
  5229. if (head) {
  5230. list_move_tail(&dev->unreg_list, head);
  5231. } else {
  5232. rollback_registered(dev);
  5233. /* Finish processing unregister after unlock */
  5234. net_set_todo(dev);
  5235. }
  5236. }
  5237. EXPORT_SYMBOL(unregister_netdevice_queue);
  5238. /**
  5239. * unregister_netdevice_many - unregister many devices
  5240. * @head: list of devices
  5241. */
  5242. void unregister_netdevice_many(struct list_head *head)
  5243. {
  5244. struct net_device *dev;
  5245. if (!list_empty(head)) {
  5246. rollback_registered_many(head);
  5247. list_for_each_entry(dev, head, unreg_list)
  5248. net_set_todo(dev);
  5249. }
  5250. }
  5251. EXPORT_SYMBOL(unregister_netdevice_many);
  5252. /**
  5253. * unregister_netdev - remove device from the kernel
  5254. * @dev: device
  5255. *
  5256. * This function shuts down a device interface and removes it
  5257. * from the kernel tables.
  5258. *
  5259. * This is just a wrapper for unregister_netdevice that takes
  5260. * the rtnl semaphore. In general you want to use this and not
  5261. * unregister_netdevice.
  5262. */
  5263. void unregister_netdev(struct net_device *dev)
  5264. {
  5265. rtnl_lock();
  5266. unregister_netdevice(dev);
  5267. rtnl_unlock();
  5268. }
  5269. EXPORT_SYMBOL(unregister_netdev);
  5270. /**
  5271. * dev_change_net_namespace - move device to different nethost namespace
  5272. * @dev: device
  5273. * @net: network namespace
  5274. * @pat: If not NULL name pattern to try if the current device name
  5275. * is already taken in the destination network namespace.
  5276. *
  5277. * This function shuts down a device interface and moves it
  5278. * to a new network namespace. On success 0 is returned, on
  5279. * a failure a netagive errno code is returned.
  5280. *
  5281. * Callers must hold the rtnl semaphore.
  5282. */
  5283. int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
  5284. {
  5285. int err;
  5286. ASSERT_RTNL();
  5287. /* Don't allow namespace local devices to be moved. */
  5288. err = -EINVAL;
  5289. if (dev->features & NETIF_F_NETNS_LOCAL)
  5290. goto out;
  5291. /* Ensure the device has been registrered */
  5292. err = -EINVAL;
  5293. if (dev->reg_state != NETREG_REGISTERED)
  5294. goto out;
  5295. /* Get out if there is nothing todo */
  5296. err = 0;
  5297. if (net_eq(dev_net(dev), net))
  5298. goto out;
  5299. /* Pick the destination device name, and ensure
  5300. * we can use it in the destination network namespace.
  5301. */
  5302. err = -EEXIST;
  5303. if (__dev_get_by_name(net, dev->name)) {
  5304. /* We get here if we can't use the current device name */
  5305. if (!pat)
  5306. goto out;
  5307. if (dev_get_valid_name(dev, pat) < 0)
  5308. goto out;
  5309. }
  5310. /*
  5311. * And now a mini version of register_netdevice unregister_netdevice.
  5312. */
  5313. /* If device is running close it first. */
  5314. dev_close(dev);
  5315. /* And unlink it from device chain */
  5316. err = -ENODEV;
  5317. unlist_netdevice(dev);
  5318. synchronize_net();
  5319. /* Shutdown queueing discipline. */
  5320. dev_shutdown(dev);
  5321. /* Notify protocols, that we are about to destroy
  5322. this device. They should clean all the things.
  5323. Note that dev->reg_state stays at NETREG_REGISTERED.
  5324. This is wanted because this way 8021q and macvlan know
  5325. the device is just moving and can keep their slaves up.
  5326. */
  5327. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  5328. call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
  5329. rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
  5330. /*
  5331. * Flush the unicast and multicast chains
  5332. */
  5333. dev_uc_flush(dev);
  5334. dev_mc_flush(dev);
  5335. /* Actually switch the network namespace */
  5336. dev_net_set(dev, net);
  5337. /* If there is an ifindex conflict assign a new one */
  5338. if (__dev_get_by_index(net, dev->ifindex)) {
  5339. int iflink = (dev->iflink == dev->ifindex);
  5340. dev->ifindex = dev_new_index(net);
  5341. if (iflink)
  5342. dev->iflink = dev->ifindex;
  5343. }
  5344. /* Fixup kobjects */
  5345. err = device_rename(&dev->dev, dev->name);
  5346. WARN_ON(err);
  5347. /* Add the device back in the hashes */
  5348. list_netdevice(dev);
  5349. /* Notify protocols, that a new device appeared. */
  5350. call_netdevice_notifiers(NETDEV_REGISTER, dev);
  5351. /*
  5352. * Prevent userspace races by waiting until the network
  5353. * device is fully setup before sending notifications.
  5354. */
  5355. rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
  5356. synchronize_net();
  5357. err = 0;
  5358. out:
  5359. return err;
  5360. }
  5361. EXPORT_SYMBOL_GPL(dev_change_net_namespace);
  5362. static int dev_cpu_callback(struct notifier_block *nfb,
  5363. unsigned long action,
  5364. void *ocpu)
  5365. {
  5366. struct sk_buff **list_skb;
  5367. struct sk_buff *skb;
  5368. unsigned int cpu, oldcpu = (unsigned long)ocpu;
  5369. struct softnet_data *sd, *oldsd;
  5370. if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
  5371. return NOTIFY_OK;
  5372. local_irq_disable();
  5373. cpu = smp_processor_id();
  5374. sd = &per_cpu(softnet_data, cpu);
  5375. oldsd = &per_cpu(softnet_data, oldcpu);
  5376. /* Find end of our completion_queue. */
  5377. list_skb = &sd->completion_queue;
  5378. while (*list_skb)
  5379. list_skb = &(*list_skb)->next;
  5380. /* Append completion queue from offline CPU. */
  5381. *list_skb = oldsd->completion_queue;
  5382. oldsd->completion_queue = NULL;
  5383. /* Append output queue from offline CPU. */
  5384. if (oldsd->output_queue) {
  5385. *sd->output_queue_tailp = oldsd->output_queue;
  5386. sd->output_queue_tailp = oldsd->output_queue_tailp;
  5387. oldsd->output_queue = NULL;
  5388. oldsd->output_queue_tailp = &oldsd->output_queue;
  5389. }
  5390. /* Append NAPI poll list from offline CPU. */
  5391. if (!list_empty(&oldsd->poll_list)) {
  5392. list_splice_init(&oldsd->poll_list, &sd->poll_list);
  5393. raise_softirq_irqoff(NET_RX_SOFTIRQ);
  5394. }
  5395. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  5396. local_irq_enable();
  5397. /* Process offline CPU's input_pkt_queue */
  5398. while ((skb = __skb_dequeue(&oldsd->process_queue))) {
  5399. netif_rx(skb);
  5400. input_queue_head_incr(oldsd);
  5401. }
  5402. while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
  5403. netif_rx(skb);
  5404. input_queue_head_incr(oldsd);
  5405. }
  5406. return NOTIFY_OK;
  5407. }
  5408. /**
  5409. * netdev_increment_features - increment feature set by one
  5410. * @all: current feature set
  5411. * @one: new feature set
  5412. * @mask: mask feature set
  5413. *
  5414. * Computes a new feature set after adding a device with feature set
  5415. * @one to the master device with current feature set @all. Will not
  5416. * enable anything that is off in @mask. Returns the new feature set.
  5417. */
  5418. u32 netdev_increment_features(u32 all, u32 one, u32 mask)
  5419. {
  5420. if (mask & NETIF_F_GEN_CSUM)
  5421. mask |= NETIF_F_ALL_CSUM;
  5422. mask |= NETIF_F_VLAN_CHALLENGED;
  5423. all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
  5424. all &= one | ~NETIF_F_ALL_FOR_ALL;
  5425. /* If device needs checksumming, downgrade to it. */
  5426. if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
  5427. all &= ~NETIF_F_NO_CSUM;
  5428. /* If one device supports hw checksumming, set for all. */
  5429. if (all & NETIF_F_GEN_CSUM)
  5430. all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
  5431. return all;
  5432. }
  5433. EXPORT_SYMBOL(netdev_increment_features);
  5434. static struct hlist_head *netdev_create_hash(void)
  5435. {
  5436. int i;
  5437. struct hlist_head *hash;
  5438. hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
  5439. if (hash != NULL)
  5440. for (i = 0; i < NETDEV_HASHENTRIES; i++)
  5441. INIT_HLIST_HEAD(&hash[i]);
  5442. return hash;
  5443. }
  5444. /* Initialize per network namespace state */
  5445. static int __net_init netdev_init(struct net *net)
  5446. {
  5447. INIT_LIST_HEAD(&net->dev_base_head);
  5448. net->dev_name_head = netdev_create_hash();
  5449. if (net->dev_name_head == NULL)
  5450. goto err_name;
  5451. net->dev_index_head = netdev_create_hash();
  5452. if (net->dev_index_head == NULL)
  5453. goto err_idx;
  5454. return 0;
  5455. err_idx:
  5456. kfree(net->dev_name_head);
  5457. err_name:
  5458. return -ENOMEM;
  5459. }
  5460. /**
  5461. * netdev_drivername - network driver for the device
  5462. * @dev: network device
  5463. *
  5464. * Determine network driver for device.
  5465. */
  5466. const char *netdev_drivername(const struct net_device *dev)
  5467. {
  5468. const struct device_driver *driver;
  5469. const struct device *parent;
  5470. const char *empty = "";
  5471. parent = dev->dev.parent;
  5472. if (!parent)
  5473. return empty;
  5474. driver = parent->driver;
  5475. if (driver && driver->name)
  5476. return driver->name;
  5477. return empty;
  5478. }
  5479. int __netdev_printk(const char *level, const struct net_device *dev,
  5480. struct va_format *vaf)
  5481. {
  5482. int r;
  5483. if (dev && dev->dev.parent)
  5484. r = dev_printk(level, dev->dev.parent, "%s: %pV",
  5485. netdev_name(dev), vaf);
  5486. else if (dev)
  5487. r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
  5488. else
  5489. r = printk("%s(NULL net_device): %pV", level, vaf);
  5490. return r;
  5491. }
  5492. EXPORT_SYMBOL(__netdev_printk);
  5493. int netdev_printk(const char *level, const struct net_device *dev,
  5494. const char *format, ...)
  5495. {
  5496. struct va_format vaf;
  5497. va_list args;
  5498. int r;
  5499. va_start(args, format);
  5500. vaf.fmt = format;
  5501. vaf.va = &args;
  5502. r = __netdev_printk(level, dev, &vaf);
  5503. va_end(args);
  5504. return r;
  5505. }
  5506. EXPORT_SYMBOL(netdev_printk);
  5507. #define define_netdev_printk_level(func, level) \
  5508. int func(const struct net_device *dev, const char *fmt, ...) \
  5509. { \
  5510. int r; \
  5511. struct va_format vaf; \
  5512. va_list args; \
  5513. \
  5514. va_start(args, fmt); \
  5515. \
  5516. vaf.fmt = fmt; \
  5517. vaf.va = &args; \
  5518. \
  5519. r = __netdev_printk(level, dev, &vaf); \
  5520. va_end(args); \
  5521. \
  5522. return r; \
  5523. } \
  5524. EXPORT_SYMBOL(func);
  5525. define_netdev_printk_level(netdev_emerg, KERN_EMERG);
  5526. define_netdev_printk_level(netdev_alert, KERN_ALERT);
  5527. define_netdev_printk_level(netdev_crit, KERN_CRIT);
  5528. define_netdev_printk_level(netdev_err, KERN_ERR);
  5529. define_netdev_printk_level(netdev_warn, KERN_WARNING);
  5530. define_netdev_printk_level(netdev_notice, KERN_NOTICE);
  5531. define_netdev_printk_level(netdev_info, KERN_INFO);
  5532. static void __net_exit netdev_exit(struct net *net)
  5533. {
  5534. kfree(net->dev_name_head);
  5535. kfree(net->dev_index_head);
  5536. }
  5537. static struct pernet_operations __net_initdata netdev_net_ops = {
  5538. .init = netdev_init,
  5539. .exit = netdev_exit,
  5540. };
  5541. static void __net_exit default_device_exit(struct net *net)
  5542. {
  5543. struct net_device *dev, *aux;
  5544. /*
  5545. * Push all migratable network devices back to the
  5546. * initial network namespace
  5547. */
  5548. rtnl_lock();
  5549. for_each_netdev_safe(net, dev, aux) {
  5550. int err;
  5551. char fb_name[IFNAMSIZ];
  5552. /* Ignore unmoveable devices (i.e. loopback) */
  5553. if (dev->features & NETIF_F_NETNS_LOCAL)
  5554. continue;
  5555. /* Leave virtual devices for the generic cleanup */
  5556. if (dev->rtnl_link_ops)
  5557. continue;
  5558. /* Push remaining network devices to init_net */
  5559. snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
  5560. err = dev_change_net_namespace(dev, &init_net, fb_name);
  5561. if (err) {
  5562. printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
  5563. __func__, dev->name, err);
  5564. BUG();
  5565. }
  5566. }
  5567. rtnl_unlock();
  5568. }
  5569. static void __net_exit default_device_exit_batch(struct list_head *net_list)
  5570. {
  5571. /* At exit all network devices most be removed from a network
  5572. * namespace. Do this in the reverse order of registration.
  5573. * Do this across as many network namespaces as possible to
  5574. * improve batching efficiency.
  5575. */
  5576. struct net_device *dev;
  5577. struct net *net;
  5578. LIST_HEAD(dev_kill_list);
  5579. rtnl_lock();
  5580. list_for_each_entry(net, net_list, exit_list) {
  5581. for_each_netdev_reverse(net, dev) {
  5582. if (dev->rtnl_link_ops)
  5583. dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
  5584. else
  5585. unregister_netdevice_queue(dev, &dev_kill_list);
  5586. }
  5587. }
  5588. unregister_netdevice_many(&dev_kill_list);
  5589. list_del(&dev_kill_list);
  5590. rtnl_unlock();
  5591. }
  5592. static struct pernet_operations __net_initdata default_device_ops = {
  5593. .exit = default_device_exit,
  5594. .exit_batch = default_device_exit_batch,
  5595. };
  5596. /*
  5597. * Initialize the DEV module. At boot time this walks the device list and
  5598. * unhooks any devices that fail to initialise (normally hardware not
  5599. * present) and leaves us with a valid list of present and active devices.
  5600. *
  5601. */
  5602. /*
  5603. * This is called single threaded during boot, so no need
  5604. * to take the rtnl semaphore.
  5605. */
  5606. static int __init net_dev_init(void)
  5607. {
  5608. int i, rc = -ENOMEM;
  5609. BUG_ON(!dev_boot_phase);
  5610. if (dev_proc_init())
  5611. goto out;
  5612. if (netdev_kobject_init())
  5613. goto out;
  5614. INIT_LIST_HEAD(&ptype_all);
  5615. for (i = 0; i < PTYPE_HASH_SIZE; i++)
  5616. INIT_LIST_HEAD(&ptype_base[i]);
  5617. if (register_pernet_subsys(&netdev_net_ops))
  5618. goto out;
  5619. /*
  5620. * Initialise the packet receive queues.
  5621. */
  5622. for_each_possible_cpu(i) {
  5623. struct softnet_data *sd = &per_cpu(softnet_data, i);
  5624. memset(sd, 0, sizeof(*sd));
  5625. skb_queue_head_init(&sd->input_pkt_queue);
  5626. skb_queue_head_init(&sd->process_queue);
  5627. sd->completion_queue = NULL;
  5628. INIT_LIST_HEAD(&sd->poll_list);
  5629. sd->output_queue = NULL;
  5630. sd->output_queue_tailp = &sd->output_queue;
  5631. #ifdef CONFIG_RPS
  5632. sd->csd.func = rps_trigger_softirq;
  5633. sd->csd.info = sd;
  5634. sd->csd.flags = 0;
  5635. sd->cpu = i;
  5636. #endif
  5637. sd->backlog.poll = process_backlog;
  5638. sd->backlog.weight = weight_p;
  5639. sd->backlog.gro_list = NULL;
  5640. sd->backlog.gro_count = 0;
  5641. }
  5642. dev_boot_phase = 0;
  5643. /* The loopback device is special if any other network devices
  5644. * is present in a network namespace the loopback device must
  5645. * be present. Since we now dynamically allocate and free the
  5646. * loopback device ensure this invariant is maintained by
  5647. * keeping the loopback device as the first device on the
  5648. * list of network devices. Ensuring the loopback devices
  5649. * is the first device that appears and the last network device
  5650. * that disappears.
  5651. */
  5652. if (register_pernet_device(&loopback_net_ops))
  5653. goto out;
  5654. if (register_pernet_device(&default_device_ops))
  5655. goto out;
  5656. open_softirq(NET_TX_SOFTIRQ, net_tx_action);
  5657. open_softirq(NET_RX_SOFTIRQ, net_rx_action);
  5658. hotcpu_notifier(dev_cpu_callback, 0);
  5659. dst_init();
  5660. dev_mcast_init();
  5661. rc = 0;
  5662. out:
  5663. return rc;
  5664. }
  5665. subsys_initcall(net_dev_init);
  5666. static int __init initialize_hashrnd(void)
  5667. {
  5668. get_random_bytes(&hashrnd, sizeof(hashrnd));
  5669. return 0;
  5670. }
  5671. late_initcall_sync(initialize_hashrnd);