igb_main.c 192 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137
  1. /*******************************************************************************
  2. Intel(R) Gigabit Ethernet Linux driver
  3. Copyright(c) 2007-2012 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  18. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  19. *******************************************************************************/
  20. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21. #include <linux/module.h>
  22. #include <linux/types.h>
  23. #include <linux/init.h>
  24. #include <linux/bitops.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/pagemap.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/ipv6.h>
  29. #include <linux/slab.h>
  30. #include <net/checksum.h>
  31. #include <net/ip6_checksum.h>
  32. #include <linux/net_tstamp.h>
  33. #include <linux/mii.h>
  34. #include <linux/ethtool.h>
  35. #include <linux/if.h>
  36. #include <linux/if_vlan.h>
  37. #include <linux/pci.h>
  38. #include <linux/pci-aspm.h>
  39. #include <linux/delay.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/ip.h>
  42. #include <linux/tcp.h>
  43. #include <linux/sctp.h>
  44. #include <linux/if_ether.h>
  45. #include <linux/aer.h>
  46. #include <linux/prefetch.h>
  47. #include <linux/pm_runtime.h>
  48. #ifdef CONFIG_IGB_DCA
  49. #include <linux/dca.h>
  50. #endif
  51. #include "igb.h"
  52. #define MAJ 3
  53. #define MIN 4
  54. #define BUILD 7
  55. #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
  56. __stringify(BUILD) "-k"
  57. char igb_driver_name[] = "igb";
  58. char igb_driver_version[] = DRV_VERSION;
  59. static const char igb_driver_string[] =
  60. "Intel(R) Gigabit Ethernet Network Driver";
  61. static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
  62. static const struct e1000_info *igb_info_tbl[] = {
  63. [board_82575] = &e1000_82575_info,
  64. };
  65. static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
  66. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
  67. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
  68. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
  69. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
  70. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
  71. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
  72. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
  73. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
  74. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
  75. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
  76. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
  77. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
  78. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
  79. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
  80. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
  81. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
  82. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
  83. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
  84. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
  85. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
  86. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
  87. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
  88. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
  89. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
  90. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
  91. /* required last entry */
  92. {0, }
  93. };
  94. MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
  95. void igb_reset(struct igb_adapter *);
  96. static int igb_setup_all_tx_resources(struct igb_adapter *);
  97. static int igb_setup_all_rx_resources(struct igb_adapter *);
  98. static void igb_free_all_tx_resources(struct igb_adapter *);
  99. static void igb_free_all_rx_resources(struct igb_adapter *);
  100. static void igb_setup_mrqc(struct igb_adapter *);
  101. static int igb_probe(struct pci_dev *, const struct pci_device_id *);
  102. static void __devexit igb_remove(struct pci_dev *pdev);
  103. static int igb_sw_init(struct igb_adapter *);
  104. static int igb_open(struct net_device *);
  105. static int igb_close(struct net_device *);
  106. static void igb_configure_tx(struct igb_adapter *);
  107. static void igb_configure_rx(struct igb_adapter *);
  108. static void igb_clean_all_tx_rings(struct igb_adapter *);
  109. static void igb_clean_all_rx_rings(struct igb_adapter *);
  110. static void igb_clean_tx_ring(struct igb_ring *);
  111. static void igb_clean_rx_ring(struct igb_ring *);
  112. static void igb_set_rx_mode(struct net_device *);
  113. static void igb_update_phy_info(unsigned long);
  114. static void igb_watchdog(unsigned long);
  115. static void igb_watchdog_task(struct work_struct *);
  116. static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
  117. static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
  118. struct rtnl_link_stats64 *stats);
  119. static int igb_change_mtu(struct net_device *, int);
  120. static int igb_set_mac(struct net_device *, void *);
  121. static void igb_set_uta(struct igb_adapter *adapter);
  122. static irqreturn_t igb_intr(int irq, void *);
  123. static irqreturn_t igb_intr_msi(int irq, void *);
  124. static irqreturn_t igb_msix_other(int irq, void *);
  125. static irqreturn_t igb_msix_ring(int irq, void *);
  126. #ifdef CONFIG_IGB_DCA
  127. static void igb_update_dca(struct igb_q_vector *);
  128. static void igb_setup_dca(struct igb_adapter *);
  129. #endif /* CONFIG_IGB_DCA */
  130. static int igb_poll(struct napi_struct *, int);
  131. static bool igb_clean_tx_irq(struct igb_q_vector *);
  132. static bool igb_clean_rx_irq(struct igb_q_vector *, int);
  133. static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
  134. static void igb_tx_timeout(struct net_device *);
  135. static void igb_reset_task(struct work_struct *);
  136. static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
  137. static int igb_vlan_rx_add_vid(struct net_device *, u16);
  138. static int igb_vlan_rx_kill_vid(struct net_device *, u16);
  139. static void igb_restore_vlan(struct igb_adapter *);
  140. static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
  141. static void igb_ping_all_vfs(struct igb_adapter *);
  142. static void igb_msg_task(struct igb_adapter *);
  143. static void igb_vmm_control(struct igb_adapter *);
  144. static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
  145. static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
  146. static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
  147. static int igb_ndo_set_vf_vlan(struct net_device *netdev,
  148. int vf, u16 vlan, u8 qos);
  149. static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
  150. static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
  151. struct ifla_vf_info *ivi);
  152. static void igb_check_vf_rate_limit(struct igb_adapter *);
  153. #ifdef CONFIG_PCI_IOV
  154. static int igb_vf_configure(struct igb_adapter *adapter, int vf);
  155. static int igb_find_enabled_vfs(struct igb_adapter *adapter);
  156. static int igb_check_vf_assignment(struct igb_adapter *adapter);
  157. #endif
  158. #ifdef CONFIG_PM
  159. #ifdef CONFIG_PM_SLEEP
  160. static int igb_suspend(struct device *);
  161. #endif
  162. static int igb_resume(struct device *);
  163. #ifdef CONFIG_PM_RUNTIME
  164. static int igb_runtime_suspend(struct device *dev);
  165. static int igb_runtime_resume(struct device *dev);
  166. static int igb_runtime_idle(struct device *dev);
  167. #endif
  168. static const struct dev_pm_ops igb_pm_ops = {
  169. SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
  170. SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
  171. igb_runtime_idle)
  172. };
  173. #endif
  174. static void igb_shutdown(struct pci_dev *);
  175. #ifdef CONFIG_IGB_DCA
  176. static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
  177. static struct notifier_block dca_notifier = {
  178. .notifier_call = igb_notify_dca,
  179. .next = NULL,
  180. .priority = 0
  181. };
  182. #endif
  183. #ifdef CONFIG_NET_POLL_CONTROLLER
  184. /* for netdump / net console */
  185. static void igb_netpoll(struct net_device *);
  186. #endif
  187. #ifdef CONFIG_PCI_IOV
  188. static unsigned int max_vfs = 0;
  189. module_param(max_vfs, uint, 0);
  190. MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
  191. "per physical function");
  192. #endif /* CONFIG_PCI_IOV */
  193. static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
  194. pci_channel_state_t);
  195. static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
  196. static void igb_io_resume(struct pci_dev *);
  197. static struct pci_error_handlers igb_err_handler = {
  198. .error_detected = igb_io_error_detected,
  199. .slot_reset = igb_io_slot_reset,
  200. .resume = igb_io_resume,
  201. };
  202. static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
  203. static struct pci_driver igb_driver = {
  204. .name = igb_driver_name,
  205. .id_table = igb_pci_tbl,
  206. .probe = igb_probe,
  207. .remove = __devexit_p(igb_remove),
  208. #ifdef CONFIG_PM
  209. .driver.pm = &igb_pm_ops,
  210. #endif
  211. .shutdown = igb_shutdown,
  212. .err_handler = &igb_err_handler
  213. };
  214. MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
  215. MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
  216. MODULE_LICENSE("GPL");
  217. MODULE_VERSION(DRV_VERSION);
  218. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
  219. static int debug = -1;
  220. module_param(debug, int, 0);
  221. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  222. struct igb_reg_info {
  223. u32 ofs;
  224. char *name;
  225. };
  226. static const struct igb_reg_info igb_reg_info_tbl[] = {
  227. /* General Registers */
  228. {E1000_CTRL, "CTRL"},
  229. {E1000_STATUS, "STATUS"},
  230. {E1000_CTRL_EXT, "CTRL_EXT"},
  231. /* Interrupt Registers */
  232. {E1000_ICR, "ICR"},
  233. /* RX Registers */
  234. {E1000_RCTL, "RCTL"},
  235. {E1000_RDLEN(0), "RDLEN"},
  236. {E1000_RDH(0), "RDH"},
  237. {E1000_RDT(0), "RDT"},
  238. {E1000_RXDCTL(0), "RXDCTL"},
  239. {E1000_RDBAL(0), "RDBAL"},
  240. {E1000_RDBAH(0), "RDBAH"},
  241. /* TX Registers */
  242. {E1000_TCTL, "TCTL"},
  243. {E1000_TDBAL(0), "TDBAL"},
  244. {E1000_TDBAH(0), "TDBAH"},
  245. {E1000_TDLEN(0), "TDLEN"},
  246. {E1000_TDH(0), "TDH"},
  247. {E1000_TDT(0), "TDT"},
  248. {E1000_TXDCTL(0), "TXDCTL"},
  249. {E1000_TDFH, "TDFH"},
  250. {E1000_TDFT, "TDFT"},
  251. {E1000_TDFHS, "TDFHS"},
  252. {E1000_TDFPC, "TDFPC"},
  253. /* List Terminator */
  254. {}
  255. };
  256. /*
  257. * igb_regdump - register printout routine
  258. */
  259. static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
  260. {
  261. int n = 0;
  262. char rname[16];
  263. u32 regs[8];
  264. switch (reginfo->ofs) {
  265. case E1000_RDLEN(0):
  266. for (n = 0; n < 4; n++)
  267. regs[n] = rd32(E1000_RDLEN(n));
  268. break;
  269. case E1000_RDH(0):
  270. for (n = 0; n < 4; n++)
  271. regs[n] = rd32(E1000_RDH(n));
  272. break;
  273. case E1000_RDT(0):
  274. for (n = 0; n < 4; n++)
  275. regs[n] = rd32(E1000_RDT(n));
  276. break;
  277. case E1000_RXDCTL(0):
  278. for (n = 0; n < 4; n++)
  279. regs[n] = rd32(E1000_RXDCTL(n));
  280. break;
  281. case E1000_RDBAL(0):
  282. for (n = 0; n < 4; n++)
  283. regs[n] = rd32(E1000_RDBAL(n));
  284. break;
  285. case E1000_RDBAH(0):
  286. for (n = 0; n < 4; n++)
  287. regs[n] = rd32(E1000_RDBAH(n));
  288. break;
  289. case E1000_TDBAL(0):
  290. for (n = 0; n < 4; n++)
  291. regs[n] = rd32(E1000_RDBAL(n));
  292. break;
  293. case E1000_TDBAH(0):
  294. for (n = 0; n < 4; n++)
  295. regs[n] = rd32(E1000_TDBAH(n));
  296. break;
  297. case E1000_TDLEN(0):
  298. for (n = 0; n < 4; n++)
  299. regs[n] = rd32(E1000_TDLEN(n));
  300. break;
  301. case E1000_TDH(0):
  302. for (n = 0; n < 4; n++)
  303. regs[n] = rd32(E1000_TDH(n));
  304. break;
  305. case E1000_TDT(0):
  306. for (n = 0; n < 4; n++)
  307. regs[n] = rd32(E1000_TDT(n));
  308. break;
  309. case E1000_TXDCTL(0):
  310. for (n = 0; n < 4; n++)
  311. regs[n] = rd32(E1000_TXDCTL(n));
  312. break;
  313. default:
  314. pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
  315. return;
  316. }
  317. snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
  318. pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
  319. regs[2], regs[3]);
  320. }
  321. /*
  322. * igb_dump - Print registers, tx-rings and rx-rings
  323. */
  324. static void igb_dump(struct igb_adapter *adapter)
  325. {
  326. struct net_device *netdev = adapter->netdev;
  327. struct e1000_hw *hw = &adapter->hw;
  328. struct igb_reg_info *reginfo;
  329. struct igb_ring *tx_ring;
  330. union e1000_adv_tx_desc *tx_desc;
  331. struct my_u0 { u64 a; u64 b; } *u0;
  332. struct igb_ring *rx_ring;
  333. union e1000_adv_rx_desc *rx_desc;
  334. u32 staterr;
  335. u16 i, n;
  336. if (!netif_msg_hw(adapter))
  337. return;
  338. /* Print netdevice Info */
  339. if (netdev) {
  340. dev_info(&adapter->pdev->dev, "Net device Info\n");
  341. pr_info("Device Name state trans_start "
  342. "last_rx\n");
  343. pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
  344. netdev->state, netdev->trans_start, netdev->last_rx);
  345. }
  346. /* Print Registers */
  347. dev_info(&adapter->pdev->dev, "Register Dump\n");
  348. pr_info(" Register Name Value\n");
  349. for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
  350. reginfo->name; reginfo++) {
  351. igb_regdump(hw, reginfo);
  352. }
  353. /* Print TX Ring Summary */
  354. if (!netdev || !netif_running(netdev))
  355. goto exit;
  356. dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
  357. pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
  358. for (n = 0; n < adapter->num_tx_queues; n++) {
  359. struct igb_tx_buffer *buffer_info;
  360. tx_ring = adapter->tx_ring[n];
  361. buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
  362. pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
  363. n, tx_ring->next_to_use, tx_ring->next_to_clean,
  364. (u64)buffer_info->dma,
  365. buffer_info->length,
  366. buffer_info->next_to_watch,
  367. (u64)buffer_info->time_stamp);
  368. }
  369. /* Print TX Rings */
  370. if (!netif_msg_tx_done(adapter))
  371. goto rx_ring_summary;
  372. dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
  373. /* Transmit Descriptor Formats
  374. *
  375. * Advanced Transmit Descriptor
  376. * +--------------------------------------------------------------+
  377. * 0 | Buffer Address [63:0] |
  378. * +--------------------------------------------------------------+
  379. * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
  380. * +--------------------------------------------------------------+
  381. * 63 46 45 40 39 38 36 35 32 31 24 15 0
  382. */
  383. for (n = 0; n < adapter->num_tx_queues; n++) {
  384. tx_ring = adapter->tx_ring[n];
  385. pr_info("------------------------------------\n");
  386. pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
  387. pr_info("------------------------------------\n");
  388. pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
  389. "[bi->dma ] leng ntw timestamp "
  390. "bi->skb\n");
  391. for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
  392. const char *next_desc;
  393. struct igb_tx_buffer *buffer_info;
  394. tx_desc = IGB_TX_DESC(tx_ring, i);
  395. buffer_info = &tx_ring->tx_buffer_info[i];
  396. u0 = (struct my_u0 *)tx_desc;
  397. if (i == tx_ring->next_to_use &&
  398. i == tx_ring->next_to_clean)
  399. next_desc = " NTC/U";
  400. else if (i == tx_ring->next_to_use)
  401. next_desc = " NTU";
  402. else if (i == tx_ring->next_to_clean)
  403. next_desc = " NTC";
  404. else
  405. next_desc = "";
  406. pr_info("T [0x%03X] %016llX %016llX %016llX"
  407. " %04X %p %016llX %p%s\n", i,
  408. le64_to_cpu(u0->a),
  409. le64_to_cpu(u0->b),
  410. (u64)buffer_info->dma,
  411. buffer_info->length,
  412. buffer_info->next_to_watch,
  413. (u64)buffer_info->time_stamp,
  414. buffer_info->skb, next_desc);
  415. if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
  416. print_hex_dump(KERN_INFO, "",
  417. DUMP_PREFIX_ADDRESS,
  418. 16, 1, phys_to_virt(buffer_info->dma),
  419. buffer_info->length, true);
  420. }
  421. }
  422. /* Print RX Rings Summary */
  423. rx_ring_summary:
  424. dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
  425. pr_info("Queue [NTU] [NTC]\n");
  426. for (n = 0; n < adapter->num_rx_queues; n++) {
  427. rx_ring = adapter->rx_ring[n];
  428. pr_info(" %5d %5X %5X\n",
  429. n, rx_ring->next_to_use, rx_ring->next_to_clean);
  430. }
  431. /* Print RX Rings */
  432. if (!netif_msg_rx_status(adapter))
  433. goto exit;
  434. dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
  435. /* Advanced Receive Descriptor (Read) Format
  436. * 63 1 0
  437. * +-----------------------------------------------------+
  438. * 0 | Packet Buffer Address [63:1] |A0/NSE|
  439. * +----------------------------------------------+------+
  440. * 8 | Header Buffer Address [63:1] | DD |
  441. * +-----------------------------------------------------+
  442. *
  443. *
  444. * Advanced Receive Descriptor (Write-Back) Format
  445. *
  446. * 63 48 47 32 31 30 21 20 17 16 4 3 0
  447. * +------------------------------------------------------+
  448. * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
  449. * | Checksum Ident | | | | Type | Type |
  450. * +------------------------------------------------------+
  451. * 8 | VLAN Tag | Length | Extended Error | Extended Status |
  452. * +------------------------------------------------------+
  453. * 63 48 47 32 31 20 19 0
  454. */
  455. for (n = 0; n < adapter->num_rx_queues; n++) {
  456. rx_ring = adapter->rx_ring[n];
  457. pr_info("------------------------------------\n");
  458. pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
  459. pr_info("------------------------------------\n");
  460. pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
  461. "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
  462. pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
  463. "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
  464. for (i = 0; i < rx_ring->count; i++) {
  465. const char *next_desc;
  466. struct igb_rx_buffer *buffer_info;
  467. buffer_info = &rx_ring->rx_buffer_info[i];
  468. rx_desc = IGB_RX_DESC(rx_ring, i);
  469. u0 = (struct my_u0 *)rx_desc;
  470. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  471. if (i == rx_ring->next_to_use)
  472. next_desc = " NTU";
  473. else if (i == rx_ring->next_to_clean)
  474. next_desc = " NTC";
  475. else
  476. next_desc = "";
  477. if (staterr & E1000_RXD_STAT_DD) {
  478. /* Descriptor Done */
  479. pr_info("%s[0x%03X] %016llX %016llX -------"
  480. "--------- %p%s\n", "RWB", i,
  481. le64_to_cpu(u0->a),
  482. le64_to_cpu(u0->b),
  483. buffer_info->skb, next_desc);
  484. } else {
  485. pr_info("%s[0x%03X] %016llX %016llX %016llX"
  486. " %p%s\n", "R ", i,
  487. le64_to_cpu(u0->a),
  488. le64_to_cpu(u0->b),
  489. (u64)buffer_info->dma,
  490. buffer_info->skb, next_desc);
  491. if (netif_msg_pktdata(adapter)) {
  492. print_hex_dump(KERN_INFO, "",
  493. DUMP_PREFIX_ADDRESS,
  494. 16, 1,
  495. phys_to_virt(buffer_info->dma),
  496. IGB_RX_HDR_LEN, true);
  497. print_hex_dump(KERN_INFO, "",
  498. DUMP_PREFIX_ADDRESS,
  499. 16, 1,
  500. phys_to_virt(
  501. buffer_info->page_dma +
  502. buffer_info->page_offset),
  503. PAGE_SIZE/2, true);
  504. }
  505. }
  506. }
  507. }
  508. exit:
  509. return;
  510. }
  511. /**
  512. * igb_get_hw_dev - return device
  513. * used by hardware layer to print debugging information
  514. **/
  515. struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
  516. {
  517. struct igb_adapter *adapter = hw->back;
  518. return adapter->netdev;
  519. }
  520. /**
  521. * igb_init_module - Driver Registration Routine
  522. *
  523. * igb_init_module is the first routine called when the driver is
  524. * loaded. All it does is register with the PCI subsystem.
  525. **/
  526. static int __init igb_init_module(void)
  527. {
  528. int ret;
  529. pr_info("%s - version %s\n",
  530. igb_driver_string, igb_driver_version);
  531. pr_info("%s\n", igb_copyright);
  532. #ifdef CONFIG_IGB_DCA
  533. dca_register_notify(&dca_notifier);
  534. #endif
  535. ret = pci_register_driver(&igb_driver);
  536. return ret;
  537. }
  538. module_init(igb_init_module);
  539. /**
  540. * igb_exit_module - Driver Exit Cleanup Routine
  541. *
  542. * igb_exit_module is called just before the driver is removed
  543. * from memory.
  544. **/
  545. static void __exit igb_exit_module(void)
  546. {
  547. #ifdef CONFIG_IGB_DCA
  548. dca_unregister_notify(&dca_notifier);
  549. #endif
  550. pci_unregister_driver(&igb_driver);
  551. }
  552. module_exit(igb_exit_module);
  553. #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
  554. /**
  555. * igb_cache_ring_register - Descriptor ring to register mapping
  556. * @adapter: board private structure to initialize
  557. *
  558. * Once we know the feature-set enabled for the device, we'll cache
  559. * the register offset the descriptor ring is assigned to.
  560. **/
  561. static void igb_cache_ring_register(struct igb_adapter *adapter)
  562. {
  563. int i = 0, j = 0;
  564. u32 rbase_offset = adapter->vfs_allocated_count;
  565. switch (adapter->hw.mac.type) {
  566. case e1000_82576:
  567. /* The queues are allocated for virtualization such that VF 0
  568. * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
  569. * In order to avoid collision we start at the first free queue
  570. * and continue consuming queues in the same sequence
  571. */
  572. if (adapter->vfs_allocated_count) {
  573. for (; i < adapter->rss_queues; i++)
  574. adapter->rx_ring[i]->reg_idx = rbase_offset +
  575. Q_IDX_82576(i);
  576. }
  577. case e1000_82575:
  578. case e1000_82580:
  579. case e1000_i350:
  580. default:
  581. for (; i < adapter->num_rx_queues; i++)
  582. adapter->rx_ring[i]->reg_idx = rbase_offset + i;
  583. for (; j < adapter->num_tx_queues; j++)
  584. adapter->tx_ring[j]->reg_idx = rbase_offset + j;
  585. break;
  586. }
  587. }
  588. static void igb_free_queues(struct igb_adapter *adapter)
  589. {
  590. int i;
  591. for (i = 0; i < adapter->num_tx_queues; i++) {
  592. kfree(adapter->tx_ring[i]);
  593. adapter->tx_ring[i] = NULL;
  594. }
  595. for (i = 0; i < adapter->num_rx_queues; i++) {
  596. kfree(adapter->rx_ring[i]);
  597. adapter->rx_ring[i] = NULL;
  598. }
  599. adapter->num_rx_queues = 0;
  600. adapter->num_tx_queues = 0;
  601. }
  602. /**
  603. * igb_alloc_queues - Allocate memory for all rings
  604. * @adapter: board private structure to initialize
  605. *
  606. * We allocate one ring per queue at run-time since we don't know the
  607. * number of queues at compile-time.
  608. **/
  609. static int igb_alloc_queues(struct igb_adapter *adapter)
  610. {
  611. struct igb_ring *ring;
  612. int i;
  613. int orig_node = adapter->node;
  614. for (i = 0; i < adapter->num_tx_queues; i++) {
  615. if (orig_node == -1) {
  616. int cur_node = next_online_node(adapter->node);
  617. if (cur_node == MAX_NUMNODES)
  618. cur_node = first_online_node;
  619. adapter->node = cur_node;
  620. }
  621. ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
  622. adapter->node);
  623. if (!ring)
  624. ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
  625. if (!ring)
  626. goto err;
  627. ring->count = adapter->tx_ring_count;
  628. ring->queue_index = i;
  629. ring->dev = &adapter->pdev->dev;
  630. ring->netdev = adapter->netdev;
  631. ring->numa_node = adapter->node;
  632. /* For 82575, context index must be unique per ring. */
  633. if (adapter->hw.mac.type == e1000_82575)
  634. set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
  635. adapter->tx_ring[i] = ring;
  636. }
  637. /* Restore the adapter's original node */
  638. adapter->node = orig_node;
  639. for (i = 0; i < adapter->num_rx_queues; i++) {
  640. if (orig_node == -1) {
  641. int cur_node = next_online_node(adapter->node);
  642. if (cur_node == MAX_NUMNODES)
  643. cur_node = first_online_node;
  644. adapter->node = cur_node;
  645. }
  646. ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
  647. adapter->node);
  648. if (!ring)
  649. ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
  650. if (!ring)
  651. goto err;
  652. ring->count = adapter->rx_ring_count;
  653. ring->queue_index = i;
  654. ring->dev = &adapter->pdev->dev;
  655. ring->netdev = adapter->netdev;
  656. ring->numa_node = adapter->node;
  657. /* set flag indicating ring supports SCTP checksum offload */
  658. if (adapter->hw.mac.type >= e1000_82576)
  659. set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
  660. /* On i350, loopback VLAN packets have the tag byte-swapped. */
  661. if (adapter->hw.mac.type == e1000_i350)
  662. set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
  663. adapter->rx_ring[i] = ring;
  664. }
  665. /* Restore the adapter's original node */
  666. adapter->node = orig_node;
  667. igb_cache_ring_register(adapter);
  668. return 0;
  669. err:
  670. /* Restore the adapter's original node */
  671. adapter->node = orig_node;
  672. igb_free_queues(adapter);
  673. return -ENOMEM;
  674. }
  675. /**
  676. * igb_write_ivar - configure ivar for given MSI-X vector
  677. * @hw: pointer to the HW structure
  678. * @msix_vector: vector number we are allocating to a given ring
  679. * @index: row index of IVAR register to write within IVAR table
  680. * @offset: column offset of in IVAR, should be multiple of 8
  681. *
  682. * This function is intended to handle the writing of the IVAR register
  683. * for adapters 82576 and newer. The IVAR table consists of 2 columns,
  684. * each containing an cause allocation for an Rx and Tx ring, and a
  685. * variable number of rows depending on the number of queues supported.
  686. **/
  687. static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
  688. int index, int offset)
  689. {
  690. u32 ivar = array_rd32(E1000_IVAR0, index);
  691. /* clear any bits that are currently set */
  692. ivar &= ~((u32)0xFF << offset);
  693. /* write vector and valid bit */
  694. ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
  695. array_wr32(E1000_IVAR0, index, ivar);
  696. }
  697. #define IGB_N0_QUEUE -1
  698. static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
  699. {
  700. struct igb_adapter *adapter = q_vector->adapter;
  701. struct e1000_hw *hw = &adapter->hw;
  702. int rx_queue = IGB_N0_QUEUE;
  703. int tx_queue = IGB_N0_QUEUE;
  704. u32 msixbm = 0;
  705. if (q_vector->rx.ring)
  706. rx_queue = q_vector->rx.ring->reg_idx;
  707. if (q_vector->tx.ring)
  708. tx_queue = q_vector->tx.ring->reg_idx;
  709. switch (hw->mac.type) {
  710. case e1000_82575:
  711. /* The 82575 assigns vectors using a bitmask, which matches the
  712. bitmask for the EICR/EIMS/EIMC registers. To assign one
  713. or more queues to a vector, we write the appropriate bits
  714. into the MSIXBM register for that vector. */
  715. if (rx_queue > IGB_N0_QUEUE)
  716. msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
  717. if (tx_queue > IGB_N0_QUEUE)
  718. msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
  719. if (!adapter->msix_entries && msix_vector == 0)
  720. msixbm |= E1000_EIMS_OTHER;
  721. array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
  722. q_vector->eims_value = msixbm;
  723. break;
  724. case e1000_82576:
  725. /*
  726. * 82576 uses a table that essentially consists of 2 columns
  727. * with 8 rows. The ordering is column-major so we use the
  728. * lower 3 bits as the row index, and the 4th bit as the
  729. * column offset.
  730. */
  731. if (rx_queue > IGB_N0_QUEUE)
  732. igb_write_ivar(hw, msix_vector,
  733. rx_queue & 0x7,
  734. (rx_queue & 0x8) << 1);
  735. if (tx_queue > IGB_N0_QUEUE)
  736. igb_write_ivar(hw, msix_vector,
  737. tx_queue & 0x7,
  738. ((tx_queue & 0x8) << 1) + 8);
  739. q_vector->eims_value = 1 << msix_vector;
  740. break;
  741. case e1000_82580:
  742. case e1000_i350:
  743. /*
  744. * On 82580 and newer adapters the scheme is similar to 82576
  745. * however instead of ordering column-major we have things
  746. * ordered row-major. So we traverse the table by using
  747. * bit 0 as the column offset, and the remaining bits as the
  748. * row index.
  749. */
  750. if (rx_queue > IGB_N0_QUEUE)
  751. igb_write_ivar(hw, msix_vector,
  752. rx_queue >> 1,
  753. (rx_queue & 0x1) << 4);
  754. if (tx_queue > IGB_N0_QUEUE)
  755. igb_write_ivar(hw, msix_vector,
  756. tx_queue >> 1,
  757. ((tx_queue & 0x1) << 4) + 8);
  758. q_vector->eims_value = 1 << msix_vector;
  759. break;
  760. default:
  761. BUG();
  762. break;
  763. }
  764. /* add q_vector eims value to global eims_enable_mask */
  765. adapter->eims_enable_mask |= q_vector->eims_value;
  766. /* configure q_vector to set itr on first interrupt */
  767. q_vector->set_itr = 1;
  768. }
  769. /**
  770. * igb_configure_msix - Configure MSI-X hardware
  771. *
  772. * igb_configure_msix sets up the hardware to properly
  773. * generate MSI-X interrupts.
  774. **/
  775. static void igb_configure_msix(struct igb_adapter *adapter)
  776. {
  777. u32 tmp;
  778. int i, vector = 0;
  779. struct e1000_hw *hw = &adapter->hw;
  780. adapter->eims_enable_mask = 0;
  781. /* set vector for other causes, i.e. link changes */
  782. switch (hw->mac.type) {
  783. case e1000_82575:
  784. tmp = rd32(E1000_CTRL_EXT);
  785. /* enable MSI-X PBA support*/
  786. tmp |= E1000_CTRL_EXT_PBA_CLR;
  787. /* Auto-Mask interrupts upon ICR read. */
  788. tmp |= E1000_CTRL_EXT_EIAME;
  789. tmp |= E1000_CTRL_EXT_IRCA;
  790. wr32(E1000_CTRL_EXT, tmp);
  791. /* enable msix_other interrupt */
  792. array_wr32(E1000_MSIXBM(0), vector++,
  793. E1000_EIMS_OTHER);
  794. adapter->eims_other = E1000_EIMS_OTHER;
  795. break;
  796. case e1000_82576:
  797. case e1000_82580:
  798. case e1000_i350:
  799. /* Turn on MSI-X capability first, or our settings
  800. * won't stick. And it will take days to debug. */
  801. wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
  802. E1000_GPIE_PBA | E1000_GPIE_EIAME |
  803. E1000_GPIE_NSICR);
  804. /* enable msix_other interrupt */
  805. adapter->eims_other = 1 << vector;
  806. tmp = (vector++ | E1000_IVAR_VALID) << 8;
  807. wr32(E1000_IVAR_MISC, tmp);
  808. break;
  809. default:
  810. /* do nothing, since nothing else supports MSI-X */
  811. break;
  812. } /* switch (hw->mac.type) */
  813. adapter->eims_enable_mask |= adapter->eims_other;
  814. for (i = 0; i < adapter->num_q_vectors; i++)
  815. igb_assign_vector(adapter->q_vector[i], vector++);
  816. wrfl();
  817. }
  818. /**
  819. * igb_request_msix - Initialize MSI-X interrupts
  820. *
  821. * igb_request_msix allocates MSI-X vectors and requests interrupts from the
  822. * kernel.
  823. **/
  824. static int igb_request_msix(struct igb_adapter *adapter)
  825. {
  826. struct net_device *netdev = adapter->netdev;
  827. struct e1000_hw *hw = &adapter->hw;
  828. int i, err = 0, vector = 0;
  829. err = request_irq(adapter->msix_entries[vector].vector,
  830. igb_msix_other, 0, netdev->name, adapter);
  831. if (err)
  832. goto out;
  833. vector++;
  834. for (i = 0; i < adapter->num_q_vectors; i++) {
  835. struct igb_q_vector *q_vector = adapter->q_vector[i];
  836. q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
  837. if (q_vector->rx.ring && q_vector->tx.ring)
  838. sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
  839. q_vector->rx.ring->queue_index);
  840. else if (q_vector->tx.ring)
  841. sprintf(q_vector->name, "%s-tx-%u", netdev->name,
  842. q_vector->tx.ring->queue_index);
  843. else if (q_vector->rx.ring)
  844. sprintf(q_vector->name, "%s-rx-%u", netdev->name,
  845. q_vector->rx.ring->queue_index);
  846. else
  847. sprintf(q_vector->name, "%s-unused", netdev->name);
  848. err = request_irq(adapter->msix_entries[vector].vector,
  849. igb_msix_ring, 0, q_vector->name,
  850. q_vector);
  851. if (err)
  852. goto out;
  853. vector++;
  854. }
  855. igb_configure_msix(adapter);
  856. return 0;
  857. out:
  858. return err;
  859. }
  860. static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
  861. {
  862. if (adapter->msix_entries) {
  863. pci_disable_msix(adapter->pdev);
  864. kfree(adapter->msix_entries);
  865. adapter->msix_entries = NULL;
  866. } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
  867. pci_disable_msi(adapter->pdev);
  868. }
  869. }
  870. /**
  871. * igb_free_q_vectors - Free memory allocated for interrupt vectors
  872. * @adapter: board private structure to initialize
  873. *
  874. * This function frees the memory allocated to the q_vectors. In addition if
  875. * NAPI is enabled it will delete any references to the NAPI struct prior
  876. * to freeing the q_vector.
  877. **/
  878. static void igb_free_q_vectors(struct igb_adapter *adapter)
  879. {
  880. int v_idx;
  881. for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
  882. struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
  883. adapter->q_vector[v_idx] = NULL;
  884. if (!q_vector)
  885. continue;
  886. netif_napi_del(&q_vector->napi);
  887. kfree(q_vector);
  888. }
  889. adapter->num_q_vectors = 0;
  890. }
  891. /**
  892. * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
  893. *
  894. * This function resets the device so that it has 0 rx queues, tx queues, and
  895. * MSI-X interrupts allocated.
  896. */
  897. static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
  898. {
  899. igb_free_queues(adapter);
  900. igb_free_q_vectors(adapter);
  901. igb_reset_interrupt_capability(adapter);
  902. }
  903. /**
  904. * igb_set_interrupt_capability - set MSI or MSI-X if supported
  905. *
  906. * Attempt to configure interrupts using the best available
  907. * capabilities of the hardware and kernel.
  908. **/
  909. static int igb_set_interrupt_capability(struct igb_adapter *adapter)
  910. {
  911. int err;
  912. int numvecs, i;
  913. /* Number of supported queues. */
  914. adapter->num_rx_queues = adapter->rss_queues;
  915. if (adapter->vfs_allocated_count)
  916. adapter->num_tx_queues = 1;
  917. else
  918. adapter->num_tx_queues = adapter->rss_queues;
  919. /* start with one vector for every rx queue */
  920. numvecs = adapter->num_rx_queues;
  921. /* if tx handler is separate add 1 for every tx queue */
  922. if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
  923. numvecs += adapter->num_tx_queues;
  924. /* store the number of vectors reserved for queues */
  925. adapter->num_q_vectors = numvecs;
  926. /* add 1 vector for link status interrupts */
  927. numvecs++;
  928. adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
  929. GFP_KERNEL);
  930. if (!adapter->msix_entries)
  931. goto msi_only;
  932. for (i = 0; i < numvecs; i++)
  933. adapter->msix_entries[i].entry = i;
  934. err = pci_enable_msix(adapter->pdev,
  935. adapter->msix_entries,
  936. numvecs);
  937. if (err == 0)
  938. goto out;
  939. igb_reset_interrupt_capability(adapter);
  940. /* If we can't do MSI-X, try MSI */
  941. msi_only:
  942. #ifdef CONFIG_PCI_IOV
  943. /* disable SR-IOV for non MSI-X configurations */
  944. if (adapter->vf_data) {
  945. struct e1000_hw *hw = &adapter->hw;
  946. /* disable iov and allow time for transactions to clear */
  947. pci_disable_sriov(adapter->pdev);
  948. msleep(500);
  949. kfree(adapter->vf_data);
  950. adapter->vf_data = NULL;
  951. wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
  952. wrfl();
  953. msleep(100);
  954. dev_info(&adapter->pdev->dev, "IOV Disabled\n");
  955. }
  956. #endif
  957. adapter->vfs_allocated_count = 0;
  958. adapter->rss_queues = 1;
  959. adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
  960. adapter->num_rx_queues = 1;
  961. adapter->num_tx_queues = 1;
  962. adapter->num_q_vectors = 1;
  963. if (!pci_enable_msi(adapter->pdev))
  964. adapter->flags |= IGB_FLAG_HAS_MSI;
  965. out:
  966. /* Notify the stack of the (possibly) reduced queue counts. */
  967. netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
  968. return netif_set_real_num_rx_queues(adapter->netdev,
  969. adapter->num_rx_queues);
  970. }
  971. /**
  972. * igb_alloc_q_vectors - Allocate memory for interrupt vectors
  973. * @adapter: board private structure to initialize
  974. *
  975. * We allocate one q_vector per queue interrupt. If allocation fails we
  976. * return -ENOMEM.
  977. **/
  978. static int igb_alloc_q_vectors(struct igb_adapter *adapter)
  979. {
  980. struct igb_q_vector *q_vector;
  981. struct e1000_hw *hw = &adapter->hw;
  982. int v_idx;
  983. int orig_node = adapter->node;
  984. for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
  985. if ((adapter->num_q_vectors == (adapter->num_rx_queues +
  986. adapter->num_tx_queues)) &&
  987. (adapter->num_rx_queues == v_idx))
  988. adapter->node = orig_node;
  989. if (orig_node == -1) {
  990. int cur_node = next_online_node(adapter->node);
  991. if (cur_node == MAX_NUMNODES)
  992. cur_node = first_online_node;
  993. adapter->node = cur_node;
  994. }
  995. q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
  996. adapter->node);
  997. if (!q_vector)
  998. q_vector = kzalloc(sizeof(struct igb_q_vector),
  999. GFP_KERNEL);
  1000. if (!q_vector)
  1001. goto err_out;
  1002. q_vector->adapter = adapter;
  1003. q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
  1004. q_vector->itr_val = IGB_START_ITR;
  1005. netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
  1006. adapter->q_vector[v_idx] = q_vector;
  1007. }
  1008. /* Restore the adapter's original node */
  1009. adapter->node = orig_node;
  1010. return 0;
  1011. err_out:
  1012. /* Restore the adapter's original node */
  1013. adapter->node = orig_node;
  1014. igb_free_q_vectors(adapter);
  1015. return -ENOMEM;
  1016. }
  1017. static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
  1018. int ring_idx, int v_idx)
  1019. {
  1020. struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
  1021. q_vector->rx.ring = adapter->rx_ring[ring_idx];
  1022. q_vector->rx.ring->q_vector = q_vector;
  1023. q_vector->rx.count++;
  1024. q_vector->itr_val = adapter->rx_itr_setting;
  1025. if (q_vector->itr_val && q_vector->itr_val <= 3)
  1026. q_vector->itr_val = IGB_START_ITR;
  1027. }
  1028. static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
  1029. int ring_idx, int v_idx)
  1030. {
  1031. struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
  1032. q_vector->tx.ring = adapter->tx_ring[ring_idx];
  1033. q_vector->tx.ring->q_vector = q_vector;
  1034. q_vector->tx.count++;
  1035. q_vector->itr_val = adapter->tx_itr_setting;
  1036. q_vector->tx.work_limit = adapter->tx_work_limit;
  1037. if (q_vector->itr_val && q_vector->itr_val <= 3)
  1038. q_vector->itr_val = IGB_START_ITR;
  1039. }
  1040. /**
  1041. * igb_map_ring_to_vector - maps allocated queues to vectors
  1042. *
  1043. * This function maps the recently allocated queues to vectors.
  1044. **/
  1045. static int igb_map_ring_to_vector(struct igb_adapter *adapter)
  1046. {
  1047. int i;
  1048. int v_idx = 0;
  1049. if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
  1050. (adapter->num_q_vectors < adapter->num_tx_queues))
  1051. return -ENOMEM;
  1052. if (adapter->num_q_vectors >=
  1053. (adapter->num_rx_queues + adapter->num_tx_queues)) {
  1054. for (i = 0; i < adapter->num_rx_queues; i++)
  1055. igb_map_rx_ring_to_vector(adapter, i, v_idx++);
  1056. for (i = 0; i < adapter->num_tx_queues; i++)
  1057. igb_map_tx_ring_to_vector(adapter, i, v_idx++);
  1058. } else {
  1059. for (i = 0; i < adapter->num_rx_queues; i++) {
  1060. if (i < adapter->num_tx_queues)
  1061. igb_map_tx_ring_to_vector(adapter, i, v_idx);
  1062. igb_map_rx_ring_to_vector(adapter, i, v_idx++);
  1063. }
  1064. for (; i < adapter->num_tx_queues; i++)
  1065. igb_map_tx_ring_to_vector(adapter, i, v_idx++);
  1066. }
  1067. return 0;
  1068. }
  1069. /**
  1070. * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
  1071. *
  1072. * This function initializes the interrupts and allocates all of the queues.
  1073. **/
  1074. static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
  1075. {
  1076. struct pci_dev *pdev = adapter->pdev;
  1077. int err;
  1078. err = igb_set_interrupt_capability(adapter);
  1079. if (err)
  1080. return err;
  1081. err = igb_alloc_q_vectors(adapter);
  1082. if (err) {
  1083. dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
  1084. goto err_alloc_q_vectors;
  1085. }
  1086. err = igb_alloc_queues(adapter);
  1087. if (err) {
  1088. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  1089. goto err_alloc_queues;
  1090. }
  1091. err = igb_map_ring_to_vector(adapter);
  1092. if (err) {
  1093. dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
  1094. goto err_map_queues;
  1095. }
  1096. return 0;
  1097. err_map_queues:
  1098. igb_free_queues(adapter);
  1099. err_alloc_queues:
  1100. igb_free_q_vectors(adapter);
  1101. err_alloc_q_vectors:
  1102. igb_reset_interrupt_capability(adapter);
  1103. return err;
  1104. }
  1105. /**
  1106. * igb_request_irq - initialize interrupts
  1107. *
  1108. * Attempts to configure interrupts using the best available
  1109. * capabilities of the hardware and kernel.
  1110. **/
  1111. static int igb_request_irq(struct igb_adapter *adapter)
  1112. {
  1113. struct net_device *netdev = adapter->netdev;
  1114. struct pci_dev *pdev = adapter->pdev;
  1115. int err = 0;
  1116. if (adapter->msix_entries) {
  1117. err = igb_request_msix(adapter);
  1118. if (!err)
  1119. goto request_done;
  1120. /* fall back to MSI */
  1121. igb_clear_interrupt_scheme(adapter);
  1122. if (!pci_enable_msi(pdev))
  1123. adapter->flags |= IGB_FLAG_HAS_MSI;
  1124. igb_free_all_tx_resources(adapter);
  1125. igb_free_all_rx_resources(adapter);
  1126. adapter->num_tx_queues = 1;
  1127. adapter->num_rx_queues = 1;
  1128. adapter->num_q_vectors = 1;
  1129. err = igb_alloc_q_vectors(adapter);
  1130. if (err) {
  1131. dev_err(&pdev->dev,
  1132. "Unable to allocate memory for vectors\n");
  1133. goto request_done;
  1134. }
  1135. err = igb_alloc_queues(adapter);
  1136. if (err) {
  1137. dev_err(&pdev->dev,
  1138. "Unable to allocate memory for queues\n");
  1139. igb_free_q_vectors(adapter);
  1140. goto request_done;
  1141. }
  1142. igb_setup_all_tx_resources(adapter);
  1143. igb_setup_all_rx_resources(adapter);
  1144. }
  1145. igb_assign_vector(adapter->q_vector[0], 0);
  1146. if (adapter->flags & IGB_FLAG_HAS_MSI) {
  1147. err = request_irq(pdev->irq, igb_intr_msi, 0,
  1148. netdev->name, adapter);
  1149. if (!err)
  1150. goto request_done;
  1151. /* fall back to legacy interrupts */
  1152. igb_reset_interrupt_capability(adapter);
  1153. adapter->flags &= ~IGB_FLAG_HAS_MSI;
  1154. }
  1155. err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
  1156. netdev->name, adapter);
  1157. if (err)
  1158. dev_err(&pdev->dev, "Error %d getting interrupt\n",
  1159. err);
  1160. request_done:
  1161. return err;
  1162. }
  1163. static void igb_free_irq(struct igb_adapter *adapter)
  1164. {
  1165. if (adapter->msix_entries) {
  1166. int vector = 0, i;
  1167. free_irq(adapter->msix_entries[vector++].vector, adapter);
  1168. for (i = 0; i < adapter->num_q_vectors; i++)
  1169. free_irq(adapter->msix_entries[vector++].vector,
  1170. adapter->q_vector[i]);
  1171. } else {
  1172. free_irq(adapter->pdev->irq, adapter);
  1173. }
  1174. }
  1175. /**
  1176. * igb_irq_disable - Mask off interrupt generation on the NIC
  1177. * @adapter: board private structure
  1178. **/
  1179. static void igb_irq_disable(struct igb_adapter *adapter)
  1180. {
  1181. struct e1000_hw *hw = &adapter->hw;
  1182. /*
  1183. * we need to be careful when disabling interrupts. The VFs are also
  1184. * mapped into these registers and so clearing the bits can cause
  1185. * issues on the VF drivers so we only need to clear what we set
  1186. */
  1187. if (adapter->msix_entries) {
  1188. u32 regval = rd32(E1000_EIAM);
  1189. wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
  1190. wr32(E1000_EIMC, adapter->eims_enable_mask);
  1191. regval = rd32(E1000_EIAC);
  1192. wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
  1193. }
  1194. wr32(E1000_IAM, 0);
  1195. wr32(E1000_IMC, ~0);
  1196. wrfl();
  1197. if (adapter->msix_entries) {
  1198. int i;
  1199. for (i = 0; i < adapter->num_q_vectors; i++)
  1200. synchronize_irq(adapter->msix_entries[i].vector);
  1201. } else {
  1202. synchronize_irq(adapter->pdev->irq);
  1203. }
  1204. }
  1205. /**
  1206. * igb_irq_enable - Enable default interrupt generation settings
  1207. * @adapter: board private structure
  1208. **/
  1209. static void igb_irq_enable(struct igb_adapter *adapter)
  1210. {
  1211. struct e1000_hw *hw = &adapter->hw;
  1212. if (adapter->msix_entries) {
  1213. u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
  1214. u32 regval = rd32(E1000_EIAC);
  1215. wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
  1216. regval = rd32(E1000_EIAM);
  1217. wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
  1218. wr32(E1000_EIMS, adapter->eims_enable_mask);
  1219. if (adapter->vfs_allocated_count) {
  1220. wr32(E1000_MBVFIMR, 0xFF);
  1221. ims |= E1000_IMS_VMMB;
  1222. }
  1223. wr32(E1000_IMS, ims);
  1224. } else {
  1225. wr32(E1000_IMS, IMS_ENABLE_MASK |
  1226. E1000_IMS_DRSTA);
  1227. wr32(E1000_IAM, IMS_ENABLE_MASK |
  1228. E1000_IMS_DRSTA);
  1229. }
  1230. }
  1231. static void igb_update_mng_vlan(struct igb_adapter *adapter)
  1232. {
  1233. struct e1000_hw *hw = &adapter->hw;
  1234. u16 vid = adapter->hw.mng_cookie.vlan_id;
  1235. u16 old_vid = adapter->mng_vlan_id;
  1236. if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
  1237. /* add VID to filter table */
  1238. igb_vfta_set(hw, vid, true);
  1239. adapter->mng_vlan_id = vid;
  1240. } else {
  1241. adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
  1242. }
  1243. if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
  1244. (vid != old_vid) &&
  1245. !test_bit(old_vid, adapter->active_vlans)) {
  1246. /* remove VID from filter table */
  1247. igb_vfta_set(hw, old_vid, false);
  1248. }
  1249. }
  1250. /**
  1251. * igb_release_hw_control - release control of the h/w to f/w
  1252. * @adapter: address of board private structure
  1253. *
  1254. * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
  1255. * For ASF and Pass Through versions of f/w this means that the
  1256. * driver is no longer loaded.
  1257. *
  1258. **/
  1259. static void igb_release_hw_control(struct igb_adapter *adapter)
  1260. {
  1261. struct e1000_hw *hw = &adapter->hw;
  1262. u32 ctrl_ext;
  1263. /* Let firmware take over control of h/w */
  1264. ctrl_ext = rd32(E1000_CTRL_EXT);
  1265. wr32(E1000_CTRL_EXT,
  1266. ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
  1267. }
  1268. /**
  1269. * igb_get_hw_control - get control of the h/w from f/w
  1270. * @adapter: address of board private structure
  1271. *
  1272. * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
  1273. * For ASF and Pass Through versions of f/w this means that
  1274. * the driver is loaded.
  1275. *
  1276. **/
  1277. static void igb_get_hw_control(struct igb_adapter *adapter)
  1278. {
  1279. struct e1000_hw *hw = &adapter->hw;
  1280. u32 ctrl_ext;
  1281. /* Let firmware know the driver has taken over */
  1282. ctrl_ext = rd32(E1000_CTRL_EXT);
  1283. wr32(E1000_CTRL_EXT,
  1284. ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
  1285. }
  1286. /**
  1287. * igb_configure - configure the hardware for RX and TX
  1288. * @adapter: private board structure
  1289. **/
  1290. static void igb_configure(struct igb_adapter *adapter)
  1291. {
  1292. struct net_device *netdev = adapter->netdev;
  1293. int i;
  1294. igb_get_hw_control(adapter);
  1295. igb_set_rx_mode(netdev);
  1296. igb_restore_vlan(adapter);
  1297. igb_setup_tctl(adapter);
  1298. igb_setup_mrqc(adapter);
  1299. igb_setup_rctl(adapter);
  1300. igb_configure_tx(adapter);
  1301. igb_configure_rx(adapter);
  1302. igb_rx_fifo_flush_82575(&adapter->hw);
  1303. /* call igb_desc_unused which always leaves
  1304. * at least 1 descriptor unused to make sure
  1305. * next_to_use != next_to_clean */
  1306. for (i = 0; i < adapter->num_rx_queues; i++) {
  1307. struct igb_ring *ring = adapter->rx_ring[i];
  1308. igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
  1309. }
  1310. }
  1311. /**
  1312. * igb_power_up_link - Power up the phy/serdes link
  1313. * @adapter: address of board private structure
  1314. **/
  1315. void igb_power_up_link(struct igb_adapter *adapter)
  1316. {
  1317. if (adapter->hw.phy.media_type == e1000_media_type_copper)
  1318. igb_power_up_phy_copper(&adapter->hw);
  1319. else
  1320. igb_power_up_serdes_link_82575(&adapter->hw);
  1321. igb_reset_phy(&adapter->hw);
  1322. }
  1323. /**
  1324. * igb_power_down_link - Power down the phy/serdes link
  1325. * @adapter: address of board private structure
  1326. */
  1327. static void igb_power_down_link(struct igb_adapter *adapter)
  1328. {
  1329. if (adapter->hw.phy.media_type == e1000_media_type_copper)
  1330. igb_power_down_phy_copper_82575(&adapter->hw);
  1331. else
  1332. igb_shutdown_serdes_link_82575(&adapter->hw);
  1333. }
  1334. /**
  1335. * igb_up - Open the interface and prepare it to handle traffic
  1336. * @adapter: board private structure
  1337. **/
  1338. int igb_up(struct igb_adapter *adapter)
  1339. {
  1340. struct e1000_hw *hw = &adapter->hw;
  1341. int i;
  1342. /* hardware has been reset, we need to reload some things */
  1343. igb_configure(adapter);
  1344. clear_bit(__IGB_DOWN, &adapter->state);
  1345. for (i = 0; i < adapter->num_q_vectors; i++)
  1346. napi_enable(&(adapter->q_vector[i]->napi));
  1347. if (adapter->msix_entries)
  1348. igb_configure_msix(adapter);
  1349. else
  1350. igb_assign_vector(adapter->q_vector[0], 0);
  1351. /* Clear any pending interrupts. */
  1352. rd32(E1000_ICR);
  1353. igb_irq_enable(adapter);
  1354. /* notify VFs that reset has been completed */
  1355. if (adapter->vfs_allocated_count) {
  1356. u32 reg_data = rd32(E1000_CTRL_EXT);
  1357. reg_data |= E1000_CTRL_EXT_PFRSTD;
  1358. wr32(E1000_CTRL_EXT, reg_data);
  1359. }
  1360. netif_tx_start_all_queues(adapter->netdev);
  1361. /* start the watchdog. */
  1362. hw->mac.get_link_status = 1;
  1363. schedule_work(&adapter->watchdog_task);
  1364. return 0;
  1365. }
  1366. void igb_down(struct igb_adapter *adapter)
  1367. {
  1368. struct net_device *netdev = adapter->netdev;
  1369. struct e1000_hw *hw = &adapter->hw;
  1370. u32 tctl, rctl;
  1371. int i;
  1372. /* signal that we're down so the interrupt handler does not
  1373. * reschedule our watchdog timer */
  1374. set_bit(__IGB_DOWN, &adapter->state);
  1375. /* disable receives in the hardware */
  1376. rctl = rd32(E1000_RCTL);
  1377. wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
  1378. /* flush and sleep below */
  1379. netif_tx_stop_all_queues(netdev);
  1380. /* disable transmits in the hardware */
  1381. tctl = rd32(E1000_TCTL);
  1382. tctl &= ~E1000_TCTL_EN;
  1383. wr32(E1000_TCTL, tctl);
  1384. /* flush both disables and wait for them to finish */
  1385. wrfl();
  1386. msleep(10);
  1387. for (i = 0; i < adapter->num_q_vectors; i++)
  1388. napi_disable(&(adapter->q_vector[i]->napi));
  1389. igb_irq_disable(adapter);
  1390. del_timer_sync(&adapter->watchdog_timer);
  1391. del_timer_sync(&adapter->phy_info_timer);
  1392. netif_carrier_off(netdev);
  1393. /* record the stats before reset*/
  1394. spin_lock(&adapter->stats64_lock);
  1395. igb_update_stats(adapter, &adapter->stats64);
  1396. spin_unlock(&adapter->stats64_lock);
  1397. adapter->link_speed = 0;
  1398. adapter->link_duplex = 0;
  1399. if (!pci_channel_offline(adapter->pdev))
  1400. igb_reset(adapter);
  1401. igb_clean_all_tx_rings(adapter);
  1402. igb_clean_all_rx_rings(adapter);
  1403. #ifdef CONFIG_IGB_DCA
  1404. /* since we reset the hardware DCA settings were cleared */
  1405. igb_setup_dca(adapter);
  1406. #endif
  1407. }
  1408. void igb_reinit_locked(struct igb_adapter *adapter)
  1409. {
  1410. WARN_ON(in_interrupt());
  1411. while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
  1412. msleep(1);
  1413. igb_down(adapter);
  1414. igb_up(adapter);
  1415. clear_bit(__IGB_RESETTING, &adapter->state);
  1416. }
  1417. void igb_reset(struct igb_adapter *adapter)
  1418. {
  1419. struct pci_dev *pdev = adapter->pdev;
  1420. struct e1000_hw *hw = &adapter->hw;
  1421. struct e1000_mac_info *mac = &hw->mac;
  1422. struct e1000_fc_info *fc = &hw->fc;
  1423. u32 pba = 0, tx_space, min_tx_space, min_rx_space;
  1424. u16 hwm;
  1425. /* Repartition Pba for greater than 9k mtu
  1426. * To take effect CTRL.RST is required.
  1427. */
  1428. switch (mac->type) {
  1429. case e1000_i350:
  1430. case e1000_82580:
  1431. pba = rd32(E1000_RXPBS);
  1432. pba = igb_rxpbs_adjust_82580(pba);
  1433. break;
  1434. case e1000_82576:
  1435. pba = rd32(E1000_RXPBS);
  1436. pba &= E1000_RXPBS_SIZE_MASK_82576;
  1437. break;
  1438. case e1000_82575:
  1439. default:
  1440. pba = E1000_PBA_34K;
  1441. break;
  1442. }
  1443. if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
  1444. (mac->type < e1000_82576)) {
  1445. /* adjust PBA for jumbo frames */
  1446. wr32(E1000_PBA, pba);
  1447. /* To maintain wire speed transmits, the Tx FIFO should be
  1448. * large enough to accommodate two full transmit packets,
  1449. * rounded up to the next 1KB and expressed in KB. Likewise,
  1450. * the Rx FIFO should be large enough to accommodate at least
  1451. * one full receive packet and is similarly rounded up and
  1452. * expressed in KB. */
  1453. pba = rd32(E1000_PBA);
  1454. /* upper 16 bits has Tx packet buffer allocation size in KB */
  1455. tx_space = pba >> 16;
  1456. /* lower 16 bits has Rx packet buffer allocation size in KB */
  1457. pba &= 0xffff;
  1458. /* the tx fifo also stores 16 bytes of information about the tx
  1459. * but don't include ethernet FCS because hardware appends it */
  1460. min_tx_space = (adapter->max_frame_size +
  1461. sizeof(union e1000_adv_tx_desc) -
  1462. ETH_FCS_LEN) * 2;
  1463. min_tx_space = ALIGN(min_tx_space, 1024);
  1464. min_tx_space >>= 10;
  1465. /* software strips receive CRC, so leave room for it */
  1466. min_rx_space = adapter->max_frame_size;
  1467. min_rx_space = ALIGN(min_rx_space, 1024);
  1468. min_rx_space >>= 10;
  1469. /* If current Tx allocation is less than the min Tx FIFO size,
  1470. * and the min Tx FIFO size is less than the current Rx FIFO
  1471. * allocation, take space away from current Rx allocation */
  1472. if (tx_space < min_tx_space &&
  1473. ((min_tx_space - tx_space) < pba)) {
  1474. pba = pba - (min_tx_space - tx_space);
  1475. /* if short on rx space, rx wins and must trump tx
  1476. * adjustment */
  1477. if (pba < min_rx_space)
  1478. pba = min_rx_space;
  1479. }
  1480. wr32(E1000_PBA, pba);
  1481. }
  1482. /* flow control settings */
  1483. /* The high water mark must be low enough to fit one full frame
  1484. * (or the size used for early receive) above it in the Rx FIFO.
  1485. * Set it to the lower of:
  1486. * - 90% of the Rx FIFO size, or
  1487. * - the full Rx FIFO size minus one full frame */
  1488. hwm = min(((pba << 10) * 9 / 10),
  1489. ((pba << 10) - 2 * adapter->max_frame_size));
  1490. fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
  1491. fc->low_water = fc->high_water - 16;
  1492. fc->pause_time = 0xFFFF;
  1493. fc->send_xon = 1;
  1494. fc->current_mode = fc->requested_mode;
  1495. /* disable receive for all VFs and wait one second */
  1496. if (adapter->vfs_allocated_count) {
  1497. int i;
  1498. for (i = 0 ; i < adapter->vfs_allocated_count; i++)
  1499. adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
  1500. /* ping all the active vfs to let them know we are going down */
  1501. igb_ping_all_vfs(adapter);
  1502. /* disable transmits and receives */
  1503. wr32(E1000_VFRE, 0);
  1504. wr32(E1000_VFTE, 0);
  1505. }
  1506. /* Allow time for pending master requests to run */
  1507. hw->mac.ops.reset_hw(hw);
  1508. wr32(E1000_WUC, 0);
  1509. if (hw->mac.ops.init_hw(hw))
  1510. dev_err(&pdev->dev, "Hardware Error\n");
  1511. /*
  1512. * Flow control settings reset on hardware reset, so guarantee flow
  1513. * control is off when forcing speed.
  1514. */
  1515. if (!hw->mac.autoneg)
  1516. igb_force_mac_fc(hw);
  1517. igb_init_dmac(adapter, pba);
  1518. if (!netif_running(adapter->netdev))
  1519. igb_power_down_link(adapter);
  1520. igb_update_mng_vlan(adapter);
  1521. /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
  1522. wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
  1523. igb_get_phy_info(hw);
  1524. }
  1525. static netdev_features_t igb_fix_features(struct net_device *netdev,
  1526. netdev_features_t features)
  1527. {
  1528. /*
  1529. * Since there is no support for separate rx/tx vlan accel
  1530. * enable/disable make sure tx flag is always in same state as rx.
  1531. */
  1532. if (features & NETIF_F_HW_VLAN_RX)
  1533. features |= NETIF_F_HW_VLAN_TX;
  1534. else
  1535. features &= ~NETIF_F_HW_VLAN_TX;
  1536. return features;
  1537. }
  1538. static int igb_set_features(struct net_device *netdev,
  1539. netdev_features_t features)
  1540. {
  1541. netdev_features_t changed = netdev->features ^ features;
  1542. struct igb_adapter *adapter = netdev_priv(netdev);
  1543. if (changed & NETIF_F_HW_VLAN_RX)
  1544. igb_vlan_mode(netdev, features);
  1545. if (!(changed & NETIF_F_RXALL))
  1546. return 0;
  1547. netdev->features = features;
  1548. if (netif_running(netdev))
  1549. igb_reinit_locked(adapter);
  1550. else
  1551. igb_reset(adapter);
  1552. return 0;
  1553. }
  1554. static const struct net_device_ops igb_netdev_ops = {
  1555. .ndo_open = igb_open,
  1556. .ndo_stop = igb_close,
  1557. .ndo_start_xmit = igb_xmit_frame,
  1558. .ndo_get_stats64 = igb_get_stats64,
  1559. .ndo_set_rx_mode = igb_set_rx_mode,
  1560. .ndo_set_mac_address = igb_set_mac,
  1561. .ndo_change_mtu = igb_change_mtu,
  1562. .ndo_do_ioctl = igb_ioctl,
  1563. .ndo_tx_timeout = igb_tx_timeout,
  1564. .ndo_validate_addr = eth_validate_addr,
  1565. .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
  1566. .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
  1567. .ndo_set_vf_mac = igb_ndo_set_vf_mac,
  1568. .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
  1569. .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
  1570. .ndo_get_vf_config = igb_ndo_get_vf_config,
  1571. #ifdef CONFIG_NET_POLL_CONTROLLER
  1572. .ndo_poll_controller = igb_netpoll,
  1573. #endif
  1574. .ndo_fix_features = igb_fix_features,
  1575. .ndo_set_features = igb_set_features,
  1576. };
  1577. /**
  1578. * igb_probe - Device Initialization Routine
  1579. * @pdev: PCI device information struct
  1580. * @ent: entry in igb_pci_tbl
  1581. *
  1582. * Returns 0 on success, negative on failure
  1583. *
  1584. * igb_probe initializes an adapter identified by a pci_dev structure.
  1585. * The OS initialization, configuring of the adapter private structure,
  1586. * and a hardware reset occur.
  1587. **/
  1588. static int __devinit igb_probe(struct pci_dev *pdev,
  1589. const struct pci_device_id *ent)
  1590. {
  1591. struct net_device *netdev;
  1592. struct igb_adapter *adapter;
  1593. struct e1000_hw *hw;
  1594. u16 eeprom_data = 0;
  1595. s32 ret_val;
  1596. static int global_quad_port_a; /* global quad port a indication */
  1597. const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
  1598. unsigned long mmio_start, mmio_len;
  1599. int err, pci_using_dac;
  1600. u16 eeprom_apme_mask = IGB_EEPROM_APME;
  1601. u8 part_str[E1000_PBANUM_LENGTH];
  1602. /* Catch broken hardware that put the wrong VF device ID in
  1603. * the PCIe SR-IOV capability.
  1604. */
  1605. if (pdev->is_virtfn) {
  1606. WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
  1607. pci_name(pdev), pdev->vendor, pdev->device);
  1608. return -EINVAL;
  1609. }
  1610. err = pci_enable_device_mem(pdev);
  1611. if (err)
  1612. return err;
  1613. pci_using_dac = 0;
  1614. err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
  1615. if (!err) {
  1616. err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
  1617. if (!err)
  1618. pci_using_dac = 1;
  1619. } else {
  1620. err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
  1621. if (err) {
  1622. err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
  1623. if (err) {
  1624. dev_err(&pdev->dev, "No usable DMA "
  1625. "configuration, aborting\n");
  1626. goto err_dma;
  1627. }
  1628. }
  1629. }
  1630. err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
  1631. IORESOURCE_MEM),
  1632. igb_driver_name);
  1633. if (err)
  1634. goto err_pci_reg;
  1635. pci_enable_pcie_error_reporting(pdev);
  1636. pci_set_master(pdev);
  1637. pci_save_state(pdev);
  1638. err = -ENOMEM;
  1639. netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
  1640. IGB_MAX_TX_QUEUES);
  1641. if (!netdev)
  1642. goto err_alloc_etherdev;
  1643. SET_NETDEV_DEV(netdev, &pdev->dev);
  1644. pci_set_drvdata(pdev, netdev);
  1645. adapter = netdev_priv(netdev);
  1646. adapter->netdev = netdev;
  1647. adapter->pdev = pdev;
  1648. hw = &adapter->hw;
  1649. hw->back = adapter;
  1650. adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
  1651. mmio_start = pci_resource_start(pdev, 0);
  1652. mmio_len = pci_resource_len(pdev, 0);
  1653. err = -EIO;
  1654. hw->hw_addr = ioremap(mmio_start, mmio_len);
  1655. if (!hw->hw_addr)
  1656. goto err_ioremap;
  1657. netdev->netdev_ops = &igb_netdev_ops;
  1658. igb_set_ethtool_ops(netdev);
  1659. netdev->watchdog_timeo = 5 * HZ;
  1660. strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
  1661. netdev->mem_start = mmio_start;
  1662. netdev->mem_end = mmio_start + mmio_len;
  1663. /* PCI config space info */
  1664. hw->vendor_id = pdev->vendor;
  1665. hw->device_id = pdev->device;
  1666. hw->revision_id = pdev->revision;
  1667. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  1668. hw->subsystem_device_id = pdev->subsystem_device;
  1669. /* Copy the default MAC, PHY and NVM function pointers */
  1670. memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
  1671. memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
  1672. memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
  1673. /* Initialize skew-specific constants */
  1674. err = ei->get_invariants(hw);
  1675. if (err)
  1676. goto err_sw_init;
  1677. /* setup the private structure */
  1678. err = igb_sw_init(adapter);
  1679. if (err)
  1680. goto err_sw_init;
  1681. igb_get_bus_info_pcie(hw);
  1682. hw->phy.autoneg_wait_to_complete = false;
  1683. /* Copper options */
  1684. if (hw->phy.media_type == e1000_media_type_copper) {
  1685. hw->phy.mdix = AUTO_ALL_MODES;
  1686. hw->phy.disable_polarity_correction = false;
  1687. hw->phy.ms_type = e1000_ms_hw_default;
  1688. }
  1689. if (igb_check_reset_block(hw))
  1690. dev_info(&pdev->dev,
  1691. "PHY reset is blocked due to SOL/IDER session.\n");
  1692. /*
  1693. * features is initialized to 0 in allocation, it might have bits
  1694. * set by igb_sw_init so we should use an or instead of an
  1695. * assignment.
  1696. */
  1697. netdev->features |= NETIF_F_SG |
  1698. NETIF_F_IP_CSUM |
  1699. NETIF_F_IPV6_CSUM |
  1700. NETIF_F_TSO |
  1701. NETIF_F_TSO6 |
  1702. NETIF_F_RXHASH |
  1703. NETIF_F_RXCSUM |
  1704. NETIF_F_HW_VLAN_RX |
  1705. NETIF_F_HW_VLAN_TX;
  1706. /* copy netdev features into list of user selectable features */
  1707. netdev->hw_features |= netdev->features;
  1708. netdev->hw_features |= NETIF_F_RXALL;
  1709. /* set this bit last since it cannot be part of hw_features */
  1710. netdev->features |= NETIF_F_HW_VLAN_FILTER;
  1711. netdev->vlan_features |= NETIF_F_TSO |
  1712. NETIF_F_TSO6 |
  1713. NETIF_F_IP_CSUM |
  1714. NETIF_F_IPV6_CSUM |
  1715. NETIF_F_SG;
  1716. netdev->priv_flags |= IFF_SUPP_NOFCS;
  1717. if (pci_using_dac) {
  1718. netdev->features |= NETIF_F_HIGHDMA;
  1719. netdev->vlan_features |= NETIF_F_HIGHDMA;
  1720. }
  1721. if (hw->mac.type >= e1000_82576) {
  1722. netdev->hw_features |= NETIF_F_SCTP_CSUM;
  1723. netdev->features |= NETIF_F_SCTP_CSUM;
  1724. }
  1725. netdev->priv_flags |= IFF_UNICAST_FLT;
  1726. adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
  1727. /* before reading the NVM, reset the controller to put the device in a
  1728. * known good starting state */
  1729. hw->mac.ops.reset_hw(hw);
  1730. /* make sure the NVM is good */
  1731. if (hw->nvm.ops.validate(hw) < 0) {
  1732. dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
  1733. err = -EIO;
  1734. goto err_eeprom;
  1735. }
  1736. /* copy the MAC address out of the NVM */
  1737. if (hw->mac.ops.read_mac_addr(hw))
  1738. dev_err(&pdev->dev, "NVM Read Error\n");
  1739. memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
  1740. memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
  1741. if (!is_valid_ether_addr(netdev->perm_addr)) {
  1742. dev_err(&pdev->dev, "Invalid MAC Address\n");
  1743. err = -EIO;
  1744. goto err_eeprom;
  1745. }
  1746. setup_timer(&adapter->watchdog_timer, igb_watchdog,
  1747. (unsigned long) adapter);
  1748. setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
  1749. (unsigned long) adapter);
  1750. INIT_WORK(&adapter->reset_task, igb_reset_task);
  1751. INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
  1752. /* Initialize link properties that are user-changeable */
  1753. adapter->fc_autoneg = true;
  1754. hw->mac.autoneg = true;
  1755. hw->phy.autoneg_advertised = 0x2f;
  1756. hw->fc.requested_mode = e1000_fc_default;
  1757. hw->fc.current_mode = e1000_fc_default;
  1758. igb_validate_mdi_setting(hw);
  1759. /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
  1760. * enable the ACPI Magic Packet filter
  1761. */
  1762. if (hw->bus.func == 0)
  1763. hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
  1764. else if (hw->mac.type >= e1000_82580)
  1765. hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
  1766. NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
  1767. &eeprom_data);
  1768. else if (hw->bus.func == 1)
  1769. hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
  1770. if (eeprom_data & eeprom_apme_mask)
  1771. adapter->eeprom_wol |= E1000_WUFC_MAG;
  1772. /* now that we have the eeprom settings, apply the special cases where
  1773. * the eeprom may be wrong or the board simply won't support wake on
  1774. * lan on a particular port */
  1775. switch (pdev->device) {
  1776. case E1000_DEV_ID_82575GB_QUAD_COPPER:
  1777. adapter->eeprom_wol = 0;
  1778. break;
  1779. case E1000_DEV_ID_82575EB_FIBER_SERDES:
  1780. case E1000_DEV_ID_82576_FIBER:
  1781. case E1000_DEV_ID_82576_SERDES:
  1782. /* Wake events only supported on port A for dual fiber
  1783. * regardless of eeprom setting */
  1784. if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
  1785. adapter->eeprom_wol = 0;
  1786. break;
  1787. case E1000_DEV_ID_82576_QUAD_COPPER:
  1788. case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
  1789. /* if quad port adapter, disable WoL on all but port A */
  1790. if (global_quad_port_a != 0)
  1791. adapter->eeprom_wol = 0;
  1792. else
  1793. adapter->flags |= IGB_FLAG_QUAD_PORT_A;
  1794. /* Reset for multiple quad port adapters */
  1795. if (++global_quad_port_a == 4)
  1796. global_quad_port_a = 0;
  1797. break;
  1798. }
  1799. /* initialize the wol settings based on the eeprom settings */
  1800. adapter->wol = adapter->eeprom_wol;
  1801. device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
  1802. /* reset the hardware with the new settings */
  1803. igb_reset(adapter);
  1804. /* let the f/w know that the h/w is now under the control of the
  1805. * driver. */
  1806. igb_get_hw_control(adapter);
  1807. strcpy(netdev->name, "eth%d");
  1808. err = register_netdev(netdev);
  1809. if (err)
  1810. goto err_register;
  1811. /* carrier off reporting is important to ethtool even BEFORE open */
  1812. netif_carrier_off(netdev);
  1813. #ifdef CONFIG_IGB_DCA
  1814. if (dca_add_requester(&pdev->dev) == 0) {
  1815. adapter->flags |= IGB_FLAG_DCA_ENABLED;
  1816. dev_info(&pdev->dev, "DCA enabled\n");
  1817. igb_setup_dca(adapter);
  1818. }
  1819. #endif
  1820. #ifdef CONFIG_IGB_PTP
  1821. /* do hw tstamp init after resetting */
  1822. igb_ptp_init(adapter);
  1823. #endif
  1824. dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
  1825. /* print bus type/speed/width info */
  1826. dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
  1827. netdev->name,
  1828. ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
  1829. (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
  1830. "unknown"),
  1831. ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
  1832. (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
  1833. (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
  1834. "unknown"),
  1835. netdev->dev_addr);
  1836. ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
  1837. if (ret_val)
  1838. strcpy(part_str, "Unknown");
  1839. dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
  1840. dev_info(&pdev->dev,
  1841. "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
  1842. adapter->msix_entries ? "MSI-X" :
  1843. (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
  1844. adapter->num_rx_queues, adapter->num_tx_queues);
  1845. switch (hw->mac.type) {
  1846. case e1000_i350:
  1847. igb_set_eee_i350(hw);
  1848. break;
  1849. default:
  1850. break;
  1851. }
  1852. pm_runtime_put_noidle(&pdev->dev);
  1853. return 0;
  1854. err_register:
  1855. igb_release_hw_control(adapter);
  1856. err_eeprom:
  1857. if (!igb_check_reset_block(hw))
  1858. igb_reset_phy(hw);
  1859. if (hw->flash_address)
  1860. iounmap(hw->flash_address);
  1861. err_sw_init:
  1862. igb_clear_interrupt_scheme(adapter);
  1863. iounmap(hw->hw_addr);
  1864. err_ioremap:
  1865. free_netdev(netdev);
  1866. err_alloc_etherdev:
  1867. pci_release_selected_regions(pdev,
  1868. pci_select_bars(pdev, IORESOURCE_MEM));
  1869. err_pci_reg:
  1870. err_dma:
  1871. pci_disable_device(pdev);
  1872. return err;
  1873. }
  1874. /**
  1875. * igb_remove - Device Removal Routine
  1876. * @pdev: PCI device information struct
  1877. *
  1878. * igb_remove is called by the PCI subsystem to alert the driver
  1879. * that it should release a PCI device. The could be caused by a
  1880. * Hot-Plug event, or because the driver is going to be removed from
  1881. * memory.
  1882. **/
  1883. static void __devexit igb_remove(struct pci_dev *pdev)
  1884. {
  1885. struct net_device *netdev = pci_get_drvdata(pdev);
  1886. struct igb_adapter *adapter = netdev_priv(netdev);
  1887. struct e1000_hw *hw = &adapter->hw;
  1888. pm_runtime_get_noresume(&pdev->dev);
  1889. #ifdef CONFIG_IGB_PTP
  1890. igb_ptp_remove(adapter);
  1891. #endif
  1892. /*
  1893. * The watchdog timer may be rescheduled, so explicitly
  1894. * disable watchdog from being rescheduled.
  1895. */
  1896. set_bit(__IGB_DOWN, &adapter->state);
  1897. del_timer_sync(&adapter->watchdog_timer);
  1898. del_timer_sync(&adapter->phy_info_timer);
  1899. cancel_work_sync(&adapter->reset_task);
  1900. cancel_work_sync(&adapter->watchdog_task);
  1901. #ifdef CONFIG_IGB_DCA
  1902. if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
  1903. dev_info(&pdev->dev, "DCA disabled\n");
  1904. dca_remove_requester(&pdev->dev);
  1905. adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
  1906. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
  1907. }
  1908. #endif
  1909. /* Release control of h/w to f/w. If f/w is AMT enabled, this
  1910. * would have already happened in close and is redundant. */
  1911. igb_release_hw_control(adapter);
  1912. unregister_netdev(netdev);
  1913. igb_clear_interrupt_scheme(adapter);
  1914. #ifdef CONFIG_PCI_IOV
  1915. /* reclaim resources allocated to VFs */
  1916. if (adapter->vf_data) {
  1917. /* disable iov and allow time for transactions to clear */
  1918. if (!igb_check_vf_assignment(adapter)) {
  1919. pci_disable_sriov(pdev);
  1920. msleep(500);
  1921. } else {
  1922. dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
  1923. }
  1924. kfree(adapter->vf_data);
  1925. adapter->vf_data = NULL;
  1926. wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
  1927. wrfl();
  1928. msleep(100);
  1929. dev_info(&pdev->dev, "IOV Disabled\n");
  1930. }
  1931. #endif
  1932. iounmap(hw->hw_addr);
  1933. if (hw->flash_address)
  1934. iounmap(hw->flash_address);
  1935. pci_release_selected_regions(pdev,
  1936. pci_select_bars(pdev, IORESOURCE_MEM));
  1937. kfree(adapter->shadow_vfta);
  1938. free_netdev(netdev);
  1939. pci_disable_pcie_error_reporting(pdev);
  1940. pci_disable_device(pdev);
  1941. }
  1942. /**
  1943. * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
  1944. * @adapter: board private structure to initialize
  1945. *
  1946. * This function initializes the vf specific data storage and then attempts to
  1947. * allocate the VFs. The reason for ordering it this way is because it is much
  1948. * mor expensive time wise to disable SR-IOV than it is to allocate and free
  1949. * the memory for the VFs.
  1950. **/
  1951. static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
  1952. {
  1953. #ifdef CONFIG_PCI_IOV
  1954. struct pci_dev *pdev = adapter->pdev;
  1955. int old_vfs = igb_find_enabled_vfs(adapter);
  1956. int i;
  1957. if (old_vfs) {
  1958. dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
  1959. "max_vfs setting of %d\n", old_vfs, max_vfs);
  1960. adapter->vfs_allocated_count = old_vfs;
  1961. }
  1962. if (!adapter->vfs_allocated_count)
  1963. return;
  1964. adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
  1965. sizeof(struct vf_data_storage), GFP_KERNEL);
  1966. /* if allocation failed then we do not support SR-IOV */
  1967. if (!adapter->vf_data) {
  1968. adapter->vfs_allocated_count = 0;
  1969. dev_err(&pdev->dev, "Unable to allocate memory for VF "
  1970. "Data Storage\n");
  1971. goto out;
  1972. }
  1973. if (!old_vfs) {
  1974. if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
  1975. goto err_out;
  1976. }
  1977. dev_info(&pdev->dev, "%d VFs allocated\n",
  1978. adapter->vfs_allocated_count);
  1979. for (i = 0; i < adapter->vfs_allocated_count; i++)
  1980. igb_vf_configure(adapter, i);
  1981. /* DMA Coalescing is not supported in IOV mode. */
  1982. adapter->flags &= ~IGB_FLAG_DMAC;
  1983. goto out;
  1984. err_out:
  1985. kfree(adapter->vf_data);
  1986. adapter->vf_data = NULL;
  1987. adapter->vfs_allocated_count = 0;
  1988. out:
  1989. return;
  1990. #endif /* CONFIG_PCI_IOV */
  1991. }
  1992. /**
  1993. * igb_sw_init - Initialize general software structures (struct igb_adapter)
  1994. * @adapter: board private structure to initialize
  1995. *
  1996. * igb_sw_init initializes the Adapter private data structure.
  1997. * Fields are initialized based on PCI device information and
  1998. * OS network device settings (MTU size).
  1999. **/
  2000. static int __devinit igb_sw_init(struct igb_adapter *adapter)
  2001. {
  2002. struct e1000_hw *hw = &adapter->hw;
  2003. struct net_device *netdev = adapter->netdev;
  2004. struct pci_dev *pdev = adapter->pdev;
  2005. pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
  2006. /* set default ring sizes */
  2007. adapter->tx_ring_count = IGB_DEFAULT_TXD;
  2008. adapter->rx_ring_count = IGB_DEFAULT_RXD;
  2009. /* set default ITR values */
  2010. adapter->rx_itr_setting = IGB_DEFAULT_ITR;
  2011. adapter->tx_itr_setting = IGB_DEFAULT_ITR;
  2012. /* set default work limits */
  2013. adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
  2014. adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
  2015. VLAN_HLEN;
  2016. adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
  2017. adapter->node = -1;
  2018. spin_lock_init(&adapter->stats64_lock);
  2019. #ifdef CONFIG_PCI_IOV
  2020. switch (hw->mac.type) {
  2021. case e1000_82576:
  2022. case e1000_i350:
  2023. if (max_vfs > 7) {
  2024. dev_warn(&pdev->dev,
  2025. "Maximum of 7 VFs per PF, using max\n");
  2026. adapter->vfs_allocated_count = 7;
  2027. } else
  2028. adapter->vfs_allocated_count = max_vfs;
  2029. break;
  2030. default:
  2031. break;
  2032. }
  2033. #endif /* CONFIG_PCI_IOV */
  2034. adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
  2035. /* i350 cannot do RSS and SR-IOV at the same time */
  2036. if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
  2037. adapter->rss_queues = 1;
  2038. /*
  2039. * if rss_queues > 4 or vfs are going to be allocated with rss_queues
  2040. * then we should combine the queues into a queue pair in order to
  2041. * conserve interrupts due to limited supply
  2042. */
  2043. if ((adapter->rss_queues > 4) ||
  2044. ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
  2045. adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
  2046. /* Setup and initialize a copy of the hw vlan table array */
  2047. adapter->shadow_vfta = kzalloc(sizeof(u32) *
  2048. E1000_VLAN_FILTER_TBL_SIZE,
  2049. GFP_ATOMIC);
  2050. /* This call may decrease the number of queues */
  2051. if (igb_init_interrupt_scheme(adapter)) {
  2052. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  2053. return -ENOMEM;
  2054. }
  2055. igb_probe_vfs(adapter);
  2056. /* Explicitly disable IRQ since the NIC can be in any state. */
  2057. igb_irq_disable(adapter);
  2058. if (hw->mac.type == e1000_i350)
  2059. adapter->flags &= ~IGB_FLAG_DMAC;
  2060. set_bit(__IGB_DOWN, &adapter->state);
  2061. return 0;
  2062. }
  2063. /**
  2064. * igb_open - Called when a network interface is made active
  2065. * @netdev: network interface device structure
  2066. *
  2067. * Returns 0 on success, negative value on failure
  2068. *
  2069. * The open entry point is called when a network interface is made
  2070. * active by the system (IFF_UP). At this point all resources needed
  2071. * for transmit and receive operations are allocated, the interrupt
  2072. * handler is registered with the OS, the watchdog timer is started,
  2073. * and the stack is notified that the interface is ready.
  2074. **/
  2075. static int __igb_open(struct net_device *netdev, bool resuming)
  2076. {
  2077. struct igb_adapter *adapter = netdev_priv(netdev);
  2078. struct e1000_hw *hw = &adapter->hw;
  2079. struct pci_dev *pdev = adapter->pdev;
  2080. int err;
  2081. int i;
  2082. /* disallow open during test */
  2083. if (test_bit(__IGB_TESTING, &adapter->state)) {
  2084. WARN_ON(resuming);
  2085. return -EBUSY;
  2086. }
  2087. if (!resuming)
  2088. pm_runtime_get_sync(&pdev->dev);
  2089. netif_carrier_off(netdev);
  2090. /* allocate transmit descriptors */
  2091. err = igb_setup_all_tx_resources(adapter);
  2092. if (err)
  2093. goto err_setup_tx;
  2094. /* allocate receive descriptors */
  2095. err = igb_setup_all_rx_resources(adapter);
  2096. if (err)
  2097. goto err_setup_rx;
  2098. igb_power_up_link(adapter);
  2099. /* before we allocate an interrupt, we must be ready to handle it.
  2100. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
  2101. * as soon as we call pci_request_irq, so we have to setup our
  2102. * clean_rx handler before we do so. */
  2103. igb_configure(adapter);
  2104. err = igb_request_irq(adapter);
  2105. if (err)
  2106. goto err_req_irq;
  2107. /* From here on the code is the same as igb_up() */
  2108. clear_bit(__IGB_DOWN, &adapter->state);
  2109. for (i = 0; i < adapter->num_q_vectors; i++)
  2110. napi_enable(&(adapter->q_vector[i]->napi));
  2111. /* Clear any pending interrupts. */
  2112. rd32(E1000_ICR);
  2113. igb_irq_enable(adapter);
  2114. /* notify VFs that reset has been completed */
  2115. if (adapter->vfs_allocated_count) {
  2116. u32 reg_data = rd32(E1000_CTRL_EXT);
  2117. reg_data |= E1000_CTRL_EXT_PFRSTD;
  2118. wr32(E1000_CTRL_EXT, reg_data);
  2119. }
  2120. netif_tx_start_all_queues(netdev);
  2121. if (!resuming)
  2122. pm_runtime_put(&pdev->dev);
  2123. /* start the watchdog. */
  2124. hw->mac.get_link_status = 1;
  2125. schedule_work(&adapter->watchdog_task);
  2126. return 0;
  2127. err_req_irq:
  2128. igb_release_hw_control(adapter);
  2129. igb_power_down_link(adapter);
  2130. igb_free_all_rx_resources(adapter);
  2131. err_setup_rx:
  2132. igb_free_all_tx_resources(adapter);
  2133. err_setup_tx:
  2134. igb_reset(adapter);
  2135. if (!resuming)
  2136. pm_runtime_put(&pdev->dev);
  2137. return err;
  2138. }
  2139. static int igb_open(struct net_device *netdev)
  2140. {
  2141. return __igb_open(netdev, false);
  2142. }
  2143. /**
  2144. * igb_close - Disables a network interface
  2145. * @netdev: network interface device structure
  2146. *
  2147. * Returns 0, this is not allowed to fail
  2148. *
  2149. * The close entry point is called when an interface is de-activated
  2150. * by the OS. The hardware is still under the driver's control, but
  2151. * needs to be disabled. A global MAC reset is issued to stop the
  2152. * hardware, and all transmit and receive resources are freed.
  2153. **/
  2154. static int __igb_close(struct net_device *netdev, bool suspending)
  2155. {
  2156. struct igb_adapter *adapter = netdev_priv(netdev);
  2157. struct pci_dev *pdev = adapter->pdev;
  2158. WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
  2159. if (!suspending)
  2160. pm_runtime_get_sync(&pdev->dev);
  2161. igb_down(adapter);
  2162. igb_free_irq(adapter);
  2163. igb_free_all_tx_resources(adapter);
  2164. igb_free_all_rx_resources(adapter);
  2165. if (!suspending)
  2166. pm_runtime_put_sync(&pdev->dev);
  2167. return 0;
  2168. }
  2169. static int igb_close(struct net_device *netdev)
  2170. {
  2171. return __igb_close(netdev, false);
  2172. }
  2173. /**
  2174. * igb_setup_tx_resources - allocate Tx resources (Descriptors)
  2175. * @tx_ring: tx descriptor ring (for a specific queue) to setup
  2176. *
  2177. * Return 0 on success, negative on failure
  2178. **/
  2179. int igb_setup_tx_resources(struct igb_ring *tx_ring)
  2180. {
  2181. struct device *dev = tx_ring->dev;
  2182. int orig_node = dev_to_node(dev);
  2183. int size;
  2184. size = sizeof(struct igb_tx_buffer) * tx_ring->count;
  2185. tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
  2186. if (!tx_ring->tx_buffer_info)
  2187. tx_ring->tx_buffer_info = vzalloc(size);
  2188. if (!tx_ring->tx_buffer_info)
  2189. goto err;
  2190. /* round up to nearest 4K */
  2191. tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
  2192. tx_ring->size = ALIGN(tx_ring->size, 4096);
  2193. set_dev_node(dev, tx_ring->numa_node);
  2194. tx_ring->desc = dma_alloc_coherent(dev,
  2195. tx_ring->size,
  2196. &tx_ring->dma,
  2197. GFP_KERNEL);
  2198. set_dev_node(dev, orig_node);
  2199. if (!tx_ring->desc)
  2200. tx_ring->desc = dma_alloc_coherent(dev,
  2201. tx_ring->size,
  2202. &tx_ring->dma,
  2203. GFP_KERNEL);
  2204. if (!tx_ring->desc)
  2205. goto err;
  2206. tx_ring->next_to_use = 0;
  2207. tx_ring->next_to_clean = 0;
  2208. return 0;
  2209. err:
  2210. vfree(tx_ring->tx_buffer_info);
  2211. dev_err(dev,
  2212. "Unable to allocate memory for the transmit descriptor ring\n");
  2213. return -ENOMEM;
  2214. }
  2215. /**
  2216. * igb_setup_all_tx_resources - wrapper to allocate Tx resources
  2217. * (Descriptors) for all queues
  2218. * @adapter: board private structure
  2219. *
  2220. * Return 0 on success, negative on failure
  2221. **/
  2222. static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
  2223. {
  2224. struct pci_dev *pdev = adapter->pdev;
  2225. int i, err = 0;
  2226. for (i = 0; i < adapter->num_tx_queues; i++) {
  2227. err = igb_setup_tx_resources(adapter->tx_ring[i]);
  2228. if (err) {
  2229. dev_err(&pdev->dev,
  2230. "Allocation for Tx Queue %u failed\n", i);
  2231. for (i--; i >= 0; i--)
  2232. igb_free_tx_resources(adapter->tx_ring[i]);
  2233. break;
  2234. }
  2235. }
  2236. return err;
  2237. }
  2238. /**
  2239. * igb_setup_tctl - configure the transmit control registers
  2240. * @adapter: Board private structure
  2241. **/
  2242. void igb_setup_tctl(struct igb_adapter *adapter)
  2243. {
  2244. struct e1000_hw *hw = &adapter->hw;
  2245. u32 tctl;
  2246. /* disable queue 0 which is enabled by default on 82575 and 82576 */
  2247. wr32(E1000_TXDCTL(0), 0);
  2248. /* Program the Transmit Control Register */
  2249. tctl = rd32(E1000_TCTL);
  2250. tctl &= ~E1000_TCTL_CT;
  2251. tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
  2252. (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
  2253. igb_config_collision_dist(hw);
  2254. /* Enable transmits */
  2255. tctl |= E1000_TCTL_EN;
  2256. wr32(E1000_TCTL, tctl);
  2257. }
  2258. /**
  2259. * igb_configure_tx_ring - Configure transmit ring after Reset
  2260. * @adapter: board private structure
  2261. * @ring: tx ring to configure
  2262. *
  2263. * Configure a transmit ring after a reset.
  2264. **/
  2265. void igb_configure_tx_ring(struct igb_adapter *adapter,
  2266. struct igb_ring *ring)
  2267. {
  2268. struct e1000_hw *hw = &adapter->hw;
  2269. u32 txdctl = 0;
  2270. u64 tdba = ring->dma;
  2271. int reg_idx = ring->reg_idx;
  2272. /* disable the queue */
  2273. wr32(E1000_TXDCTL(reg_idx), 0);
  2274. wrfl();
  2275. mdelay(10);
  2276. wr32(E1000_TDLEN(reg_idx),
  2277. ring->count * sizeof(union e1000_adv_tx_desc));
  2278. wr32(E1000_TDBAL(reg_idx),
  2279. tdba & 0x00000000ffffffffULL);
  2280. wr32(E1000_TDBAH(reg_idx), tdba >> 32);
  2281. ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
  2282. wr32(E1000_TDH(reg_idx), 0);
  2283. writel(0, ring->tail);
  2284. txdctl |= IGB_TX_PTHRESH;
  2285. txdctl |= IGB_TX_HTHRESH << 8;
  2286. txdctl |= IGB_TX_WTHRESH << 16;
  2287. txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
  2288. wr32(E1000_TXDCTL(reg_idx), txdctl);
  2289. netdev_tx_reset_queue(txring_txq(ring));
  2290. }
  2291. /**
  2292. * igb_configure_tx - Configure transmit Unit after Reset
  2293. * @adapter: board private structure
  2294. *
  2295. * Configure the Tx unit of the MAC after a reset.
  2296. **/
  2297. static void igb_configure_tx(struct igb_adapter *adapter)
  2298. {
  2299. int i;
  2300. for (i = 0; i < adapter->num_tx_queues; i++)
  2301. igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
  2302. }
  2303. /**
  2304. * igb_setup_rx_resources - allocate Rx resources (Descriptors)
  2305. * @rx_ring: rx descriptor ring (for a specific queue) to setup
  2306. *
  2307. * Returns 0 on success, negative on failure
  2308. **/
  2309. int igb_setup_rx_resources(struct igb_ring *rx_ring)
  2310. {
  2311. struct device *dev = rx_ring->dev;
  2312. int orig_node = dev_to_node(dev);
  2313. int size, desc_len;
  2314. size = sizeof(struct igb_rx_buffer) * rx_ring->count;
  2315. rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
  2316. if (!rx_ring->rx_buffer_info)
  2317. rx_ring->rx_buffer_info = vzalloc(size);
  2318. if (!rx_ring->rx_buffer_info)
  2319. goto err;
  2320. desc_len = sizeof(union e1000_adv_rx_desc);
  2321. /* Round up to nearest 4K */
  2322. rx_ring->size = rx_ring->count * desc_len;
  2323. rx_ring->size = ALIGN(rx_ring->size, 4096);
  2324. set_dev_node(dev, rx_ring->numa_node);
  2325. rx_ring->desc = dma_alloc_coherent(dev,
  2326. rx_ring->size,
  2327. &rx_ring->dma,
  2328. GFP_KERNEL);
  2329. set_dev_node(dev, orig_node);
  2330. if (!rx_ring->desc)
  2331. rx_ring->desc = dma_alloc_coherent(dev,
  2332. rx_ring->size,
  2333. &rx_ring->dma,
  2334. GFP_KERNEL);
  2335. if (!rx_ring->desc)
  2336. goto err;
  2337. rx_ring->next_to_clean = 0;
  2338. rx_ring->next_to_use = 0;
  2339. return 0;
  2340. err:
  2341. vfree(rx_ring->rx_buffer_info);
  2342. rx_ring->rx_buffer_info = NULL;
  2343. dev_err(dev, "Unable to allocate memory for the receive descriptor"
  2344. " ring\n");
  2345. return -ENOMEM;
  2346. }
  2347. /**
  2348. * igb_setup_all_rx_resources - wrapper to allocate Rx resources
  2349. * (Descriptors) for all queues
  2350. * @adapter: board private structure
  2351. *
  2352. * Return 0 on success, negative on failure
  2353. **/
  2354. static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
  2355. {
  2356. struct pci_dev *pdev = adapter->pdev;
  2357. int i, err = 0;
  2358. for (i = 0; i < adapter->num_rx_queues; i++) {
  2359. err = igb_setup_rx_resources(adapter->rx_ring[i]);
  2360. if (err) {
  2361. dev_err(&pdev->dev,
  2362. "Allocation for Rx Queue %u failed\n", i);
  2363. for (i--; i >= 0; i--)
  2364. igb_free_rx_resources(adapter->rx_ring[i]);
  2365. break;
  2366. }
  2367. }
  2368. return err;
  2369. }
  2370. /**
  2371. * igb_setup_mrqc - configure the multiple receive queue control registers
  2372. * @adapter: Board private structure
  2373. **/
  2374. static void igb_setup_mrqc(struct igb_adapter *adapter)
  2375. {
  2376. struct e1000_hw *hw = &adapter->hw;
  2377. u32 mrqc, rxcsum;
  2378. u32 j, num_rx_queues, shift = 0, shift2 = 0;
  2379. union e1000_reta {
  2380. u32 dword;
  2381. u8 bytes[4];
  2382. } reta;
  2383. static const u8 rsshash[40] = {
  2384. 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
  2385. 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
  2386. 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
  2387. 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
  2388. /* Fill out hash function seeds */
  2389. for (j = 0; j < 10; j++) {
  2390. u32 rsskey = rsshash[(j * 4)];
  2391. rsskey |= rsshash[(j * 4) + 1] << 8;
  2392. rsskey |= rsshash[(j * 4) + 2] << 16;
  2393. rsskey |= rsshash[(j * 4) + 3] << 24;
  2394. array_wr32(E1000_RSSRK(0), j, rsskey);
  2395. }
  2396. num_rx_queues = adapter->rss_queues;
  2397. if (adapter->vfs_allocated_count) {
  2398. /* 82575 and 82576 supports 2 RSS queues for VMDq */
  2399. switch (hw->mac.type) {
  2400. case e1000_i350:
  2401. case e1000_82580:
  2402. num_rx_queues = 1;
  2403. shift = 0;
  2404. break;
  2405. case e1000_82576:
  2406. shift = 3;
  2407. num_rx_queues = 2;
  2408. break;
  2409. case e1000_82575:
  2410. shift = 2;
  2411. shift2 = 6;
  2412. default:
  2413. break;
  2414. }
  2415. } else {
  2416. if (hw->mac.type == e1000_82575)
  2417. shift = 6;
  2418. }
  2419. for (j = 0; j < (32 * 4); j++) {
  2420. reta.bytes[j & 3] = (j % num_rx_queues) << shift;
  2421. if (shift2)
  2422. reta.bytes[j & 3] |= num_rx_queues << shift2;
  2423. if ((j & 3) == 3)
  2424. wr32(E1000_RETA(j >> 2), reta.dword);
  2425. }
  2426. /*
  2427. * Disable raw packet checksumming so that RSS hash is placed in
  2428. * descriptor on writeback. No need to enable TCP/UDP/IP checksum
  2429. * offloads as they are enabled by default
  2430. */
  2431. rxcsum = rd32(E1000_RXCSUM);
  2432. rxcsum |= E1000_RXCSUM_PCSD;
  2433. if (adapter->hw.mac.type >= e1000_82576)
  2434. /* Enable Receive Checksum Offload for SCTP */
  2435. rxcsum |= E1000_RXCSUM_CRCOFL;
  2436. /* Don't need to set TUOFL or IPOFL, they default to 1 */
  2437. wr32(E1000_RXCSUM, rxcsum);
  2438. /* If VMDq is enabled then we set the appropriate mode for that, else
  2439. * we default to RSS so that an RSS hash is calculated per packet even
  2440. * if we are only using one queue */
  2441. if (adapter->vfs_allocated_count) {
  2442. if (hw->mac.type > e1000_82575) {
  2443. /* Set the default pool for the PF's first queue */
  2444. u32 vtctl = rd32(E1000_VT_CTL);
  2445. vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
  2446. E1000_VT_CTL_DISABLE_DEF_POOL);
  2447. vtctl |= adapter->vfs_allocated_count <<
  2448. E1000_VT_CTL_DEFAULT_POOL_SHIFT;
  2449. wr32(E1000_VT_CTL, vtctl);
  2450. }
  2451. if (adapter->rss_queues > 1)
  2452. mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
  2453. else
  2454. mrqc = E1000_MRQC_ENABLE_VMDQ;
  2455. } else {
  2456. mrqc = E1000_MRQC_ENABLE_RSS_4Q;
  2457. }
  2458. igb_vmm_control(adapter);
  2459. /*
  2460. * Generate RSS hash based on TCP port numbers and/or
  2461. * IPv4/v6 src and dst addresses since UDP cannot be
  2462. * hashed reliably due to IP fragmentation
  2463. */
  2464. mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
  2465. E1000_MRQC_RSS_FIELD_IPV4_TCP |
  2466. E1000_MRQC_RSS_FIELD_IPV6 |
  2467. E1000_MRQC_RSS_FIELD_IPV6_TCP |
  2468. E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
  2469. wr32(E1000_MRQC, mrqc);
  2470. }
  2471. /**
  2472. * igb_setup_rctl - configure the receive control registers
  2473. * @adapter: Board private structure
  2474. **/
  2475. void igb_setup_rctl(struct igb_adapter *adapter)
  2476. {
  2477. struct e1000_hw *hw = &adapter->hw;
  2478. u32 rctl;
  2479. rctl = rd32(E1000_RCTL);
  2480. rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
  2481. rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
  2482. rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
  2483. (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
  2484. /*
  2485. * enable stripping of CRC. It's unlikely this will break BMC
  2486. * redirection as it did with e1000. Newer features require
  2487. * that the HW strips the CRC.
  2488. */
  2489. rctl |= E1000_RCTL_SECRC;
  2490. /* disable store bad packets and clear size bits. */
  2491. rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
  2492. /* enable LPE to prevent packets larger than max_frame_size */
  2493. rctl |= E1000_RCTL_LPE;
  2494. /* disable queue 0 to prevent tail write w/o re-config */
  2495. wr32(E1000_RXDCTL(0), 0);
  2496. /* Attention!!! For SR-IOV PF driver operations you must enable
  2497. * queue drop for all VF and PF queues to prevent head of line blocking
  2498. * if an un-trusted VF does not provide descriptors to hardware.
  2499. */
  2500. if (adapter->vfs_allocated_count) {
  2501. /* set all queue drop enable bits */
  2502. wr32(E1000_QDE, ALL_QUEUES);
  2503. }
  2504. /* This is useful for sniffing bad packets. */
  2505. if (adapter->netdev->features & NETIF_F_RXALL) {
  2506. /* UPE and MPE will be handled by normal PROMISC logic
  2507. * in e1000e_set_rx_mode */
  2508. rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
  2509. E1000_RCTL_BAM | /* RX All Bcast Pkts */
  2510. E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
  2511. rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
  2512. E1000_RCTL_DPF | /* Allow filtered pause */
  2513. E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
  2514. /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
  2515. * and that breaks VLANs.
  2516. */
  2517. }
  2518. wr32(E1000_RCTL, rctl);
  2519. }
  2520. static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
  2521. int vfn)
  2522. {
  2523. struct e1000_hw *hw = &adapter->hw;
  2524. u32 vmolr;
  2525. /* if it isn't the PF check to see if VFs are enabled and
  2526. * increase the size to support vlan tags */
  2527. if (vfn < adapter->vfs_allocated_count &&
  2528. adapter->vf_data[vfn].vlans_enabled)
  2529. size += VLAN_TAG_SIZE;
  2530. vmolr = rd32(E1000_VMOLR(vfn));
  2531. vmolr &= ~E1000_VMOLR_RLPML_MASK;
  2532. vmolr |= size | E1000_VMOLR_LPE;
  2533. wr32(E1000_VMOLR(vfn), vmolr);
  2534. return 0;
  2535. }
  2536. /**
  2537. * igb_rlpml_set - set maximum receive packet size
  2538. * @adapter: board private structure
  2539. *
  2540. * Configure maximum receivable packet size.
  2541. **/
  2542. static void igb_rlpml_set(struct igb_adapter *adapter)
  2543. {
  2544. u32 max_frame_size = adapter->max_frame_size;
  2545. struct e1000_hw *hw = &adapter->hw;
  2546. u16 pf_id = adapter->vfs_allocated_count;
  2547. if (pf_id) {
  2548. igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
  2549. /*
  2550. * If we're in VMDQ or SR-IOV mode, then set global RLPML
  2551. * to our max jumbo frame size, in case we need to enable
  2552. * jumbo frames on one of the rings later.
  2553. * This will not pass over-length frames into the default
  2554. * queue because it's gated by the VMOLR.RLPML.
  2555. */
  2556. max_frame_size = MAX_JUMBO_FRAME_SIZE;
  2557. }
  2558. wr32(E1000_RLPML, max_frame_size);
  2559. }
  2560. static inline void igb_set_vmolr(struct igb_adapter *adapter,
  2561. int vfn, bool aupe)
  2562. {
  2563. struct e1000_hw *hw = &adapter->hw;
  2564. u32 vmolr;
  2565. /*
  2566. * This register exists only on 82576 and newer so if we are older then
  2567. * we should exit and do nothing
  2568. */
  2569. if (hw->mac.type < e1000_82576)
  2570. return;
  2571. vmolr = rd32(E1000_VMOLR(vfn));
  2572. vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
  2573. if (aupe)
  2574. vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
  2575. else
  2576. vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
  2577. /* clear all bits that might not be set */
  2578. vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
  2579. if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
  2580. vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
  2581. /*
  2582. * for VMDq only allow the VFs and pool 0 to accept broadcast and
  2583. * multicast packets
  2584. */
  2585. if (vfn <= adapter->vfs_allocated_count)
  2586. vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
  2587. wr32(E1000_VMOLR(vfn), vmolr);
  2588. }
  2589. /**
  2590. * igb_configure_rx_ring - Configure a receive ring after Reset
  2591. * @adapter: board private structure
  2592. * @ring: receive ring to be configured
  2593. *
  2594. * Configure the Rx unit of the MAC after a reset.
  2595. **/
  2596. void igb_configure_rx_ring(struct igb_adapter *adapter,
  2597. struct igb_ring *ring)
  2598. {
  2599. struct e1000_hw *hw = &adapter->hw;
  2600. u64 rdba = ring->dma;
  2601. int reg_idx = ring->reg_idx;
  2602. u32 srrctl = 0, rxdctl = 0;
  2603. /* disable the queue */
  2604. wr32(E1000_RXDCTL(reg_idx), 0);
  2605. /* Set DMA base address registers */
  2606. wr32(E1000_RDBAL(reg_idx),
  2607. rdba & 0x00000000ffffffffULL);
  2608. wr32(E1000_RDBAH(reg_idx), rdba >> 32);
  2609. wr32(E1000_RDLEN(reg_idx),
  2610. ring->count * sizeof(union e1000_adv_rx_desc));
  2611. /* initialize head and tail */
  2612. ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
  2613. wr32(E1000_RDH(reg_idx), 0);
  2614. writel(0, ring->tail);
  2615. /* set descriptor configuration */
  2616. srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
  2617. #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
  2618. srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
  2619. #else
  2620. srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
  2621. #endif
  2622. srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
  2623. if (hw->mac.type >= e1000_82580)
  2624. srrctl |= E1000_SRRCTL_TIMESTAMP;
  2625. /* Only set Drop Enable if we are supporting multiple queues */
  2626. if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
  2627. srrctl |= E1000_SRRCTL_DROP_EN;
  2628. wr32(E1000_SRRCTL(reg_idx), srrctl);
  2629. /* set filtering for VMDQ pools */
  2630. igb_set_vmolr(adapter, reg_idx & 0x7, true);
  2631. rxdctl |= IGB_RX_PTHRESH;
  2632. rxdctl |= IGB_RX_HTHRESH << 8;
  2633. rxdctl |= IGB_RX_WTHRESH << 16;
  2634. /* enable receive descriptor fetching */
  2635. rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
  2636. wr32(E1000_RXDCTL(reg_idx), rxdctl);
  2637. }
  2638. /**
  2639. * igb_configure_rx - Configure receive Unit after Reset
  2640. * @adapter: board private structure
  2641. *
  2642. * Configure the Rx unit of the MAC after a reset.
  2643. **/
  2644. static void igb_configure_rx(struct igb_adapter *adapter)
  2645. {
  2646. int i;
  2647. /* set UTA to appropriate mode */
  2648. igb_set_uta(adapter);
  2649. /* set the correct pool for the PF default MAC address in entry 0 */
  2650. igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
  2651. adapter->vfs_allocated_count);
  2652. /* Setup the HW Rx Head and Tail Descriptor Pointers and
  2653. * the Base and Length of the Rx Descriptor Ring */
  2654. for (i = 0; i < adapter->num_rx_queues; i++)
  2655. igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
  2656. }
  2657. /**
  2658. * igb_free_tx_resources - Free Tx Resources per Queue
  2659. * @tx_ring: Tx descriptor ring for a specific queue
  2660. *
  2661. * Free all transmit software resources
  2662. **/
  2663. void igb_free_tx_resources(struct igb_ring *tx_ring)
  2664. {
  2665. igb_clean_tx_ring(tx_ring);
  2666. vfree(tx_ring->tx_buffer_info);
  2667. tx_ring->tx_buffer_info = NULL;
  2668. /* if not set, then don't free */
  2669. if (!tx_ring->desc)
  2670. return;
  2671. dma_free_coherent(tx_ring->dev, tx_ring->size,
  2672. tx_ring->desc, tx_ring->dma);
  2673. tx_ring->desc = NULL;
  2674. }
  2675. /**
  2676. * igb_free_all_tx_resources - Free Tx Resources for All Queues
  2677. * @adapter: board private structure
  2678. *
  2679. * Free all transmit software resources
  2680. **/
  2681. static void igb_free_all_tx_resources(struct igb_adapter *adapter)
  2682. {
  2683. int i;
  2684. for (i = 0; i < adapter->num_tx_queues; i++)
  2685. igb_free_tx_resources(adapter->tx_ring[i]);
  2686. }
  2687. void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
  2688. struct igb_tx_buffer *tx_buffer)
  2689. {
  2690. if (tx_buffer->skb) {
  2691. dev_kfree_skb_any(tx_buffer->skb);
  2692. if (tx_buffer->dma)
  2693. dma_unmap_single(ring->dev,
  2694. tx_buffer->dma,
  2695. tx_buffer->length,
  2696. DMA_TO_DEVICE);
  2697. } else if (tx_buffer->dma) {
  2698. dma_unmap_page(ring->dev,
  2699. tx_buffer->dma,
  2700. tx_buffer->length,
  2701. DMA_TO_DEVICE);
  2702. }
  2703. tx_buffer->next_to_watch = NULL;
  2704. tx_buffer->skb = NULL;
  2705. tx_buffer->dma = 0;
  2706. /* buffer_info must be completely set up in the transmit path */
  2707. }
  2708. /**
  2709. * igb_clean_tx_ring - Free Tx Buffers
  2710. * @tx_ring: ring to be cleaned
  2711. **/
  2712. static void igb_clean_tx_ring(struct igb_ring *tx_ring)
  2713. {
  2714. struct igb_tx_buffer *buffer_info;
  2715. unsigned long size;
  2716. u16 i;
  2717. if (!tx_ring->tx_buffer_info)
  2718. return;
  2719. /* Free all the Tx ring sk_buffs */
  2720. for (i = 0; i < tx_ring->count; i++) {
  2721. buffer_info = &tx_ring->tx_buffer_info[i];
  2722. igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
  2723. }
  2724. size = sizeof(struct igb_tx_buffer) * tx_ring->count;
  2725. memset(tx_ring->tx_buffer_info, 0, size);
  2726. /* Zero out the descriptor ring */
  2727. memset(tx_ring->desc, 0, tx_ring->size);
  2728. tx_ring->next_to_use = 0;
  2729. tx_ring->next_to_clean = 0;
  2730. }
  2731. /**
  2732. * igb_clean_all_tx_rings - Free Tx Buffers for all queues
  2733. * @adapter: board private structure
  2734. **/
  2735. static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
  2736. {
  2737. int i;
  2738. for (i = 0; i < adapter->num_tx_queues; i++)
  2739. igb_clean_tx_ring(adapter->tx_ring[i]);
  2740. }
  2741. /**
  2742. * igb_free_rx_resources - Free Rx Resources
  2743. * @rx_ring: ring to clean the resources from
  2744. *
  2745. * Free all receive software resources
  2746. **/
  2747. void igb_free_rx_resources(struct igb_ring *rx_ring)
  2748. {
  2749. igb_clean_rx_ring(rx_ring);
  2750. vfree(rx_ring->rx_buffer_info);
  2751. rx_ring->rx_buffer_info = NULL;
  2752. /* if not set, then don't free */
  2753. if (!rx_ring->desc)
  2754. return;
  2755. dma_free_coherent(rx_ring->dev, rx_ring->size,
  2756. rx_ring->desc, rx_ring->dma);
  2757. rx_ring->desc = NULL;
  2758. }
  2759. /**
  2760. * igb_free_all_rx_resources - Free Rx Resources for All Queues
  2761. * @adapter: board private structure
  2762. *
  2763. * Free all receive software resources
  2764. **/
  2765. static void igb_free_all_rx_resources(struct igb_adapter *adapter)
  2766. {
  2767. int i;
  2768. for (i = 0; i < adapter->num_rx_queues; i++)
  2769. igb_free_rx_resources(adapter->rx_ring[i]);
  2770. }
  2771. /**
  2772. * igb_clean_rx_ring - Free Rx Buffers per Queue
  2773. * @rx_ring: ring to free buffers from
  2774. **/
  2775. static void igb_clean_rx_ring(struct igb_ring *rx_ring)
  2776. {
  2777. unsigned long size;
  2778. u16 i;
  2779. if (!rx_ring->rx_buffer_info)
  2780. return;
  2781. /* Free all the Rx ring sk_buffs */
  2782. for (i = 0; i < rx_ring->count; i++) {
  2783. struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
  2784. if (buffer_info->dma) {
  2785. dma_unmap_single(rx_ring->dev,
  2786. buffer_info->dma,
  2787. IGB_RX_HDR_LEN,
  2788. DMA_FROM_DEVICE);
  2789. buffer_info->dma = 0;
  2790. }
  2791. if (buffer_info->skb) {
  2792. dev_kfree_skb(buffer_info->skb);
  2793. buffer_info->skb = NULL;
  2794. }
  2795. if (buffer_info->page_dma) {
  2796. dma_unmap_page(rx_ring->dev,
  2797. buffer_info->page_dma,
  2798. PAGE_SIZE / 2,
  2799. DMA_FROM_DEVICE);
  2800. buffer_info->page_dma = 0;
  2801. }
  2802. if (buffer_info->page) {
  2803. put_page(buffer_info->page);
  2804. buffer_info->page = NULL;
  2805. buffer_info->page_offset = 0;
  2806. }
  2807. }
  2808. size = sizeof(struct igb_rx_buffer) * rx_ring->count;
  2809. memset(rx_ring->rx_buffer_info, 0, size);
  2810. /* Zero out the descriptor ring */
  2811. memset(rx_ring->desc, 0, rx_ring->size);
  2812. rx_ring->next_to_clean = 0;
  2813. rx_ring->next_to_use = 0;
  2814. }
  2815. /**
  2816. * igb_clean_all_rx_rings - Free Rx Buffers for all queues
  2817. * @adapter: board private structure
  2818. **/
  2819. static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
  2820. {
  2821. int i;
  2822. for (i = 0; i < adapter->num_rx_queues; i++)
  2823. igb_clean_rx_ring(adapter->rx_ring[i]);
  2824. }
  2825. /**
  2826. * igb_set_mac - Change the Ethernet Address of the NIC
  2827. * @netdev: network interface device structure
  2828. * @p: pointer to an address structure
  2829. *
  2830. * Returns 0 on success, negative on failure
  2831. **/
  2832. static int igb_set_mac(struct net_device *netdev, void *p)
  2833. {
  2834. struct igb_adapter *adapter = netdev_priv(netdev);
  2835. struct e1000_hw *hw = &adapter->hw;
  2836. struct sockaddr *addr = p;
  2837. if (!is_valid_ether_addr(addr->sa_data))
  2838. return -EADDRNOTAVAIL;
  2839. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  2840. memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
  2841. /* set the correct pool for the new PF MAC address in entry 0 */
  2842. igb_rar_set_qsel(adapter, hw->mac.addr, 0,
  2843. adapter->vfs_allocated_count);
  2844. return 0;
  2845. }
  2846. /**
  2847. * igb_write_mc_addr_list - write multicast addresses to MTA
  2848. * @netdev: network interface device structure
  2849. *
  2850. * Writes multicast address list to the MTA hash table.
  2851. * Returns: -ENOMEM on failure
  2852. * 0 on no addresses written
  2853. * X on writing X addresses to MTA
  2854. **/
  2855. static int igb_write_mc_addr_list(struct net_device *netdev)
  2856. {
  2857. struct igb_adapter *adapter = netdev_priv(netdev);
  2858. struct e1000_hw *hw = &adapter->hw;
  2859. struct netdev_hw_addr *ha;
  2860. u8 *mta_list;
  2861. int i;
  2862. if (netdev_mc_empty(netdev)) {
  2863. /* nothing to program, so clear mc list */
  2864. igb_update_mc_addr_list(hw, NULL, 0);
  2865. igb_restore_vf_multicasts(adapter);
  2866. return 0;
  2867. }
  2868. mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
  2869. if (!mta_list)
  2870. return -ENOMEM;
  2871. /* The shared function expects a packed array of only addresses. */
  2872. i = 0;
  2873. netdev_for_each_mc_addr(ha, netdev)
  2874. memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
  2875. igb_update_mc_addr_list(hw, mta_list, i);
  2876. kfree(mta_list);
  2877. return netdev_mc_count(netdev);
  2878. }
  2879. /**
  2880. * igb_write_uc_addr_list - write unicast addresses to RAR table
  2881. * @netdev: network interface device structure
  2882. *
  2883. * Writes unicast address list to the RAR table.
  2884. * Returns: -ENOMEM on failure/insufficient address space
  2885. * 0 on no addresses written
  2886. * X on writing X addresses to the RAR table
  2887. **/
  2888. static int igb_write_uc_addr_list(struct net_device *netdev)
  2889. {
  2890. struct igb_adapter *adapter = netdev_priv(netdev);
  2891. struct e1000_hw *hw = &adapter->hw;
  2892. unsigned int vfn = adapter->vfs_allocated_count;
  2893. unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
  2894. int count = 0;
  2895. /* return ENOMEM indicating insufficient memory for addresses */
  2896. if (netdev_uc_count(netdev) > rar_entries)
  2897. return -ENOMEM;
  2898. if (!netdev_uc_empty(netdev) && rar_entries) {
  2899. struct netdev_hw_addr *ha;
  2900. netdev_for_each_uc_addr(ha, netdev) {
  2901. if (!rar_entries)
  2902. break;
  2903. igb_rar_set_qsel(adapter, ha->addr,
  2904. rar_entries--,
  2905. vfn);
  2906. count++;
  2907. }
  2908. }
  2909. /* write the addresses in reverse order to avoid write combining */
  2910. for (; rar_entries > 0 ; rar_entries--) {
  2911. wr32(E1000_RAH(rar_entries), 0);
  2912. wr32(E1000_RAL(rar_entries), 0);
  2913. }
  2914. wrfl();
  2915. return count;
  2916. }
  2917. /**
  2918. * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
  2919. * @netdev: network interface device structure
  2920. *
  2921. * The set_rx_mode entry point is called whenever the unicast or multicast
  2922. * address lists or the network interface flags are updated. This routine is
  2923. * responsible for configuring the hardware for proper unicast, multicast,
  2924. * promiscuous mode, and all-multi behavior.
  2925. **/
  2926. static void igb_set_rx_mode(struct net_device *netdev)
  2927. {
  2928. struct igb_adapter *adapter = netdev_priv(netdev);
  2929. struct e1000_hw *hw = &adapter->hw;
  2930. unsigned int vfn = adapter->vfs_allocated_count;
  2931. u32 rctl, vmolr = 0;
  2932. int count;
  2933. /* Check for Promiscuous and All Multicast modes */
  2934. rctl = rd32(E1000_RCTL);
  2935. /* clear the effected bits */
  2936. rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
  2937. if (netdev->flags & IFF_PROMISC) {
  2938. rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
  2939. vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
  2940. } else {
  2941. if (netdev->flags & IFF_ALLMULTI) {
  2942. rctl |= E1000_RCTL_MPE;
  2943. vmolr |= E1000_VMOLR_MPME;
  2944. } else {
  2945. /*
  2946. * Write addresses to the MTA, if the attempt fails
  2947. * then we should just turn on promiscuous mode so
  2948. * that we can at least receive multicast traffic
  2949. */
  2950. count = igb_write_mc_addr_list(netdev);
  2951. if (count < 0) {
  2952. rctl |= E1000_RCTL_MPE;
  2953. vmolr |= E1000_VMOLR_MPME;
  2954. } else if (count) {
  2955. vmolr |= E1000_VMOLR_ROMPE;
  2956. }
  2957. }
  2958. /*
  2959. * Write addresses to available RAR registers, if there is not
  2960. * sufficient space to store all the addresses then enable
  2961. * unicast promiscuous mode
  2962. */
  2963. count = igb_write_uc_addr_list(netdev);
  2964. if (count < 0) {
  2965. rctl |= E1000_RCTL_UPE;
  2966. vmolr |= E1000_VMOLR_ROPE;
  2967. }
  2968. rctl |= E1000_RCTL_VFE;
  2969. }
  2970. wr32(E1000_RCTL, rctl);
  2971. /*
  2972. * In order to support SR-IOV and eventually VMDq it is necessary to set
  2973. * the VMOLR to enable the appropriate modes. Without this workaround
  2974. * we will have issues with VLAN tag stripping not being done for frames
  2975. * that are only arriving because we are the default pool
  2976. */
  2977. if (hw->mac.type < e1000_82576)
  2978. return;
  2979. vmolr |= rd32(E1000_VMOLR(vfn)) &
  2980. ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
  2981. wr32(E1000_VMOLR(vfn), vmolr);
  2982. igb_restore_vf_multicasts(adapter);
  2983. }
  2984. static void igb_check_wvbr(struct igb_adapter *adapter)
  2985. {
  2986. struct e1000_hw *hw = &adapter->hw;
  2987. u32 wvbr = 0;
  2988. switch (hw->mac.type) {
  2989. case e1000_82576:
  2990. case e1000_i350:
  2991. if (!(wvbr = rd32(E1000_WVBR)))
  2992. return;
  2993. break;
  2994. default:
  2995. break;
  2996. }
  2997. adapter->wvbr |= wvbr;
  2998. }
  2999. #define IGB_STAGGERED_QUEUE_OFFSET 8
  3000. static void igb_spoof_check(struct igb_adapter *adapter)
  3001. {
  3002. int j;
  3003. if (!adapter->wvbr)
  3004. return;
  3005. for(j = 0; j < adapter->vfs_allocated_count; j++) {
  3006. if (adapter->wvbr & (1 << j) ||
  3007. adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
  3008. dev_warn(&adapter->pdev->dev,
  3009. "Spoof event(s) detected on VF %d\n", j);
  3010. adapter->wvbr &=
  3011. ~((1 << j) |
  3012. (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
  3013. }
  3014. }
  3015. }
  3016. /* Need to wait a few seconds after link up to get diagnostic information from
  3017. * the phy */
  3018. static void igb_update_phy_info(unsigned long data)
  3019. {
  3020. struct igb_adapter *adapter = (struct igb_adapter *) data;
  3021. igb_get_phy_info(&adapter->hw);
  3022. }
  3023. /**
  3024. * igb_has_link - check shared code for link and determine up/down
  3025. * @adapter: pointer to driver private info
  3026. **/
  3027. bool igb_has_link(struct igb_adapter *adapter)
  3028. {
  3029. struct e1000_hw *hw = &adapter->hw;
  3030. bool link_active = false;
  3031. s32 ret_val = 0;
  3032. /* get_link_status is set on LSC (link status) interrupt or
  3033. * rx sequence error interrupt. get_link_status will stay
  3034. * false until the e1000_check_for_link establishes link
  3035. * for copper adapters ONLY
  3036. */
  3037. switch (hw->phy.media_type) {
  3038. case e1000_media_type_copper:
  3039. if (hw->mac.get_link_status) {
  3040. ret_val = hw->mac.ops.check_for_link(hw);
  3041. link_active = !hw->mac.get_link_status;
  3042. } else {
  3043. link_active = true;
  3044. }
  3045. break;
  3046. case e1000_media_type_internal_serdes:
  3047. ret_val = hw->mac.ops.check_for_link(hw);
  3048. link_active = hw->mac.serdes_has_link;
  3049. break;
  3050. default:
  3051. case e1000_media_type_unknown:
  3052. break;
  3053. }
  3054. return link_active;
  3055. }
  3056. static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
  3057. {
  3058. bool ret = false;
  3059. u32 ctrl_ext, thstat;
  3060. /* check for thermal sensor event on i350, copper only */
  3061. if (hw->mac.type == e1000_i350) {
  3062. thstat = rd32(E1000_THSTAT);
  3063. ctrl_ext = rd32(E1000_CTRL_EXT);
  3064. if ((hw->phy.media_type == e1000_media_type_copper) &&
  3065. !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
  3066. ret = !!(thstat & event);
  3067. }
  3068. }
  3069. return ret;
  3070. }
  3071. /**
  3072. * igb_watchdog - Timer Call-back
  3073. * @data: pointer to adapter cast into an unsigned long
  3074. **/
  3075. static void igb_watchdog(unsigned long data)
  3076. {
  3077. struct igb_adapter *adapter = (struct igb_adapter *)data;
  3078. /* Do the rest outside of interrupt context */
  3079. schedule_work(&adapter->watchdog_task);
  3080. }
  3081. static void igb_watchdog_task(struct work_struct *work)
  3082. {
  3083. struct igb_adapter *adapter = container_of(work,
  3084. struct igb_adapter,
  3085. watchdog_task);
  3086. struct e1000_hw *hw = &adapter->hw;
  3087. struct net_device *netdev = adapter->netdev;
  3088. u32 link;
  3089. int i;
  3090. link = igb_has_link(adapter);
  3091. if (link) {
  3092. /* Cancel scheduled suspend requests. */
  3093. pm_runtime_resume(netdev->dev.parent);
  3094. if (!netif_carrier_ok(netdev)) {
  3095. u32 ctrl;
  3096. hw->mac.ops.get_speed_and_duplex(hw,
  3097. &adapter->link_speed,
  3098. &adapter->link_duplex);
  3099. ctrl = rd32(E1000_CTRL);
  3100. /* Links status message must follow this format */
  3101. printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
  3102. "Duplex, Flow Control: %s\n",
  3103. netdev->name,
  3104. adapter->link_speed,
  3105. adapter->link_duplex == FULL_DUPLEX ?
  3106. "Full" : "Half",
  3107. (ctrl & E1000_CTRL_TFCE) &&
  3108. (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
  3109. (ctrl & E1000_CTRL_RFCE) ? "RX" :
  3110. (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
  3111. /* check for thermal sensor event */
  3112. if (igb_thermal_sensor_event(hw,
  3113. E1000_THSTAT_LINK_THROTTLE)) {
  3114. netdev_info(netdev, "The network adapter link "
  3115. "speed was downshifted because it "
  3116. "overheated\n");
  3117. }
  3118. /* adjust timeout factor according to speed/duplex */
  3119. adapter->tx_timeout_factor = 1;
  3120. switch (adapter->link_speed) {
  3121. case SPEED_10:
  3122. adapter->tx_timeout_factor = 14;
  3123. break;
  3124. case SPEED_100:
  3125. /* maybe add some timeout factor ? */
  3126. break;
  3127. }
  3128. netif_carrier_on(netdev);
  3129. igb_ping_all_vfs(adapter);
  3130. igb_check_vf_rate_limit(adapter);
  3131. /* link state has changed, schedule phy info update */
  3132. if (!test_bit(__IGB_DOWN, &adapter->state))
  3133. mod_timer(&adapter->phy_info_timer,
  3134. round_jiffies(jiffies + 2 * HZ));
  3135. }
  3136. } else {
  3137. if (netif_carrier_ok(netdev)) {
  3138. adapter->link_speed = 0;
  3139. adapter->link_duplex = 0;
  3140. /* check for thermal sensor event */
  3141. if (igb_thermal_sensor_event(hw,
  3142. E1000_THSTAT_PWR_DOWN)) {
  3143. netdev_err(netdev, "The network adapter was "
  3144. "stopped because it overheated\n");
  3145. }
  3146. /* Links status message must follow this format */
  3147. printk(KERN_INFO "igb: %s NIC Link is Down\n",
  3148. netdev->name);
  3149. netif_carrier_off(netdev);
  3150. igb_ping_all_vfs(adapter);
  3151. /* link state has changed, schedule phy info update */
  3152. if (!test_bit(__IGB_DOWN, &adapter->state))
  3153. mod_timer(&adapter->phy_info_timer,
  3154. round_jiffies(jiffies + 2 * HZ));
  3155. pm_schedule_suspend(netdev->dev.parent,
  3156. MSEC_PER_SEC * 5);
  3157. }
  3158. }
  3159. spin_lock(&adapter->stats64_lock);
  3160. igb_update_stats(adapter, &adapter->stats64);
  3161. spin_unlock(&adapter->stats64_lock);
  3162. for (i = 0; i < adapter->num_tx_queues; i++) {
  3163. struct igb_ring *tx_ring = adapter->tx_ring[i];
  3164. if (!netif_carrier_ok(netdev)) {
  3165. /* We've lost link, so the controller stops DMA,
  3166. * but we've got queued Tx work that's never going
  3167. * to get done, so reset controller to flush Tx.
  3168. * (Do the reset outside of interrupt context). */
  3169. if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
  3170. adapter->tx_timeout_count++;
  3171. schedule_work(&adapter->reset_task);
  3172. /* return immediately since reset is imminent */
  3173. return;
  3174. }
  3175. }
  3176. /* Force detection of hung controller every watchdog period */
  3177. set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
  3178. }
  3179. /* Cause software interrupt to ensure rx ring is cleaned */
  3180. if (adapter->msix_entries) {
  3181. u32 eics = 0;
  3182. for (i = 0; i < adapter->num_q_vectors; i++)
  3183. eics |= adapter->q_vector[i]->eims_value;
  3184. wr32(E1000_EICS, eics);
  3185. } else {
  3186. wr32(E1000_ICS, E1000_ICS_RXDMT0);
  3187. }
  3188. igb_spoof_check(adapter);
  3189. /* Reset the timer */
  3190. if (!test_bit(__IGB_DOWN, &adapter->state))
  3191. mod_timer(&adapter->watchdog_timer,
  3192. round_jiffies(jiffies + 2 * HZ));
  3193. }
  3194. enum latency_range {
  3195. lowest_latency = 0,
  3196. low_latency = 1,
  3197. bulk_latency = 2,
  3198. latency_invalid = 255
  3199. };
  3200. /**
  3201. * igb_update_ring_itr - update the dynamic ITR value based on packet size
  3202. *
  3203. * Stores a new ITR value based on strictly on packet size. This
  3204. * algorithm is less sophisticated than that used in igb_update_itr,
  3205. * due to the difficulty of synchronizing statistics across multiple
  3206. * receive rings. The divisors and thresholds used by this function
  3207. * were determined based on theoretical maximum wire speed and testing
  3208. * data, in order to minimize response time while increasing bulk
  3209. * throughput.
  3210. * This functionality is controlled by the InterruptThrottleRate module
  3211. * parameter (see igb_param.c)
  3212. * NOTE: This function is called only when operating in a multiqueue
  3213. * receive environment.
  3214. * @q_vector: pointer to q_vector
  3215. **/
  3216. static void igb_update_ring_itr(struct igb_q_vector *q_vector)
  3217. {
  3218. int new_val = q_vector->itr_val;
  3219. int avg_wire_size = 0;
  3220. struct igb_adapter *adapter = q_vector->adapter;
  3221. unsigned int packets;
  3222. /* For non-gigabit speeds, just fix the interrupt rate at 4000
  3223. * ints/sec - ITR timer value of 120 ticks.
  3224. */
  3225. if (adapter->link_speed != SPEED_1000) {
  3226. new_val = IGB_4K_ITR;
  3227. goto set_itr_val;
  3228. }
  3229. packets = q_vector->rx.total_packets;
  3230. if (packets)
  3231. avg_wire_size = q_vector->rx.total_bytes / packets;
  3232. packets = q_vector->tx.total_packets;
  3233. if (packets)
  3234. avg_wire_size = max_t(u32, avg_wire_size,
  3235. q_vector->tx.total_bytes / packets);
  3236. /* if avg_wire_size isn't set no work was done */
  3237. if (!avg_wire_size)
  3238. goto clear_counts;
  3239. /* Add 24 bytes to size to account for CRC, preamble, and gap */
  3240. avg_wire_size += 24;
  3241. /* Don't starve jumbo frames */
  3242. avg_wire_size = min(avg_wire_size, 3000);
  3243. /* Give a little boost to mid-size frames */
  3244. if ((avg_wire_size > 300) && (avg_wire_size < 1200))
  3245. new_val = avg_wire_size / 3;
  3246. else
  3247. new_val = avg_wire_size / 2;
  3248. /* conservative mode (itr 3) eliminates the lowest_latency setting */
  3249. if (new_val < IGB_20K_ITR &&
  3250. ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
  3251. (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
  3252. new_val = IGB_20K_ITR;
  3253. set_itr_val:
  3254. if (new_val != q_vector->itr_val) {
  3255. q_vector->itr_val = new_val;
  3256. q_vector->set_itr = 1;
  3257. }
  3258. clear_counts:
  3259. q_vector->rx.total_bytes = 0;
  3260. q_vector->rx.total_packets = 0;
  3261. q_vector->tx.total_bytes = 0;
  3262. q_vector->tx.total_packets = 0;
  3263. }
  3264. /**
  3265. * igb_update_itr - update the dynamic ITR value based on statistics
  3266. * Stores a new ITR value based on packets and byte
  3267. * counts during the last interrupt. The advantage of per interrupt
  3268. * computation is faster updates and more accurate ITR for the current
  3269. * traffic pattern. Constants in this function were computed
  3270. * based on theoretical maximum wire speed and thresholds were set based
  3271. * on testing data as well as attempting to minimize response time
  3272. * while increasing bulk throughput.
  3273. * this functionality is controlled by the InterruptThrottleRate module
  3274. * parameter (see igb_param.c)
  3275. * NOTE: These calculations are only valid when operating in a single-
  3276. * queue environment.
  3277. * @q_vector: pointer to q_vector
  3278. * @ring_container: ring info to update the itr for
  3279. **/
  3280. static void igb_update_itr(struct igb_q_vector *q_vector,
  3281. struct igb_ring_container *ring_container)
  3282. {
  3283. unsigned int packets = ring_container->total_packets;
  3284. unsigned int bytes = ring_container->total_bytes;
  3285. u8 itrval = ring_container->itr;
  3286. /* no packets, exit with status unchanged */
  3287. if (packets == 0)
  3288. return;
  3289. switch (itrval) {
  3290. case lowest_latency:
  3291. /* handle TSO and jumbo frames */
  3292. if (bytes/packets > 8000)
  3293. itrval = bulk_latency;
  3294. else if ((packets < 5) && (bytes > 512))
  3295. itrval = low_latency;
  3296. break;
  3297. case low_latency: /* 50 usec aka 20000 ints/s */
  3298. if (bytes > 10000) {
  3299. /* this if handles the TSO accounting */
  3300. if (bytes/packets > 8000) {
  3301. itrval = bulk_latency;
  3302. } else if ((packets < 10) || ((bytes/packets) > 1200)) {
  3303. itrval = bulk_latency;
  3304. } else if ((packets > 35)) {
  3305. itrval = lowest_latency;
  3306. }
  3307. } else if (bytes/packets > 2000) {
  3308. itrval = bulk_latency;
  3309. } else if (packets <= 2 && bytes < 512) {
  3310. itrval = lowest_latency;
  3311. }
  3312. break;
  3313. case bulk_latency: /* 250 usec aka 4000 ints/s */
  3314. if (bytes > 25000) {
  3315. if (packets > 35)
  3316. itrval = low_latency;
  3317. } else if (bytes < 1500) {
  3318. itrval = low_latency;
  3319. }
  3320. break;
  3321. }
  3322. /* clear work counters since we have the values we need */
  3323. ring_container->total_bytes = 0;
  3324. ring_container->total_packets = 0;
  3325. /* write updated itr to ring container */
  3326. ring_container->itr = itrval;
  3327. }
  3328. static void igb_set_itr(struct igb_q_vector *q_vector)
  3329. {
  3330. struct igb_adapter *adapter = q_vector->adapter;
  3331. u32 new_itr = q_vector->itr_val;
  3332. u8 current_itr = 0;
  3333. /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
  3334. if (adapter->link_speed != SPEED_1000) {
  3335. current_itr = 0;
  3336. new_itr = IGB_4K_ITR;
  3337. goto set_itr_now;
  3338. }
  3339. igb_update_itr(q_vector, &q_vector->tx);
  3340. igb_update_itr(q_vector, &q_vector->rx);
  3341. current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
  3342. /* conservative mode (itr 3) eliminates the lowest_latency setting */
  3343. if (current_itr == lowest_latency &&
  3344. ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
  3345. (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
  3346. current_itr = low_latency;
  3347. switch (current_itr) {
  3348. /* counts and packets in update_itr are dependent on these numbers */
  3349. case lowest_latency:
  3350. new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
  3351. break;
  3352. case low_latency:
  3353. new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
  3354. break;
  3355. case bulk_latency:
  3356. new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
  3357. break;
  3358. default:
  3359. break;
  3360. }
  3361. set_itr_now:
  3362. if (new_itr != q_vector->itr_val) {
  3363. /* this attempts to bias the interrupt rate towards Bulk
  3364. * by adding intermediate steps when interrupt rate is
  3365. * increasing */
  3366. new_itr = new_itr > q_vector->itr_val ?
  3367. max((new_itr * q_vector->itr_val) /
  3368. (new_itr + (q_vector->itr_val >> 2)),
  3369. new_itr) :
  3370. new_itr;
  3371. /* Don't write the value here; it resets the adapter's
  3372. * internal timer, and causes us to delay far longer than
  3373. * we should between interrupts. Instead, we write the ITR
  3374. * value at the beginning of the next interrupt so the timing
  3375. * ends up being correct.
  3376. */
  3377. q_vector->itr_val = new_itr;
  3378. q_vector->set_itr = 1;
  3379. }
  3380. }
  3381. static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
  3382. u32 type_tucmd, u32 mss_l4len_idx)
  3383. {
  3384. struct e1000_adv_tx_context_desc *context_desc;
  3385. u16 i = tx_ring->next_to_use;
  3386. context_desc = IGB_TX_CTXTDESC(tx_ring, i);
  3387. i++;
  3388. tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  3389. /* set bits to identify this as an advanced context descriptor */
  3390. type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
  3391. /* For 82575, context index must be unique per ring. */
  3392. if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
  3393. mss_l4len_idx |= tx_ring->reg_idx << 4;
  3394. context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
  3395. context_desc->seqnum_seed = 0;
  3396. context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
  3397. context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
  3398. }
  3399. static int igb_tso(struct igb_ring *tx_ring,
  3400. struct igb_tx_buffer *first,
  3401. u8 *hdr_len)
  3402. {
  3403. struct sk_buff *skb = first->skb;
  3404. u32 vlan_macip_lens, type_tucmd;
  3405. u32 mss_l4len_idx, l4len;
  3406. if (!skb_is_gso(skb))
  3407. return 0;
  3408. if (skb_header_cloned(skb)) {
  3409. int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  3410. if (err)
  3411. return err;
  3412. }
  3413. /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
  3414. type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
  3415. if (first->protocol == __constant_htons(ETH_P_IP)) {
  3416. struct iphdr *iph = ip_hdr(skb);
  3417. iph->tot_len = 0;
  3418. iph->check = 0;
  3419. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  3420. iph->daddr, 0,
  3421. IPPROTO_TCP,
  3422. 0);
  3423. type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
  3424. first->tx_flags |= IGB_TX_FLAGS_TSO |
  3425. IGB_TX_FLAGS_CSUM |
  3426. IGB_TX_FLAGS_IPV4;
  3427. } else if (skb_is_gso_v6(skb)) {
  3428. ipv6_hdr(skb)->payload_len = 0;
  3429. tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  3430. &ipv6_hdr(skb)->daddr,
  3431. 0, IPPROTO_TCP, 0);
  3432. first->tx_flags |= IGB_TX_FLAGS_TSO |
  3433. IGB_TX_FLAGS_CSUM;
  3434. }
  3435. /* compute header lengths */
  3436. l4len = tcp_hdrlen(skb);
  3437. *hdr_len = skb_transport_offset(skb) + l4len;
  3438. /* update gso size and bytecount with header size */
  3439. first->gso_segs = skb_shinfo(skb)->gso_segs;
  3440. first->bytecount += (first->gso_segs - 1) * *hdr_len;
  3441. /* MSS L4LEN IDX */
  3442. mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
  3443. mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
  3444. /* VLAN MACLEN IPLEN */
  3445. vlan_macip_lens = skb_network_header_len(skb);
  3446. vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
  3447. vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
  3448. igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
  3449. return 1;
  3450. }
  3451. static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
  3452. {
  3453. struct sk_buff *skb = first->skb;
  3454. u32 vlan_macip_lens = 0;
  3455. u32 mss_l4len_idx = 0;
  3456. u32 type_tucmd = 0;
  3457. if (skb->ip_summed != CHECKSUM_PARTIAL) {
  3458. if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
  3459. return;
  3460. } else {
  3461. u8 l4_hdr = 0;
  3462. switch (first->protocol) {
  3463. case __constant_htons(ETH_P_IP):
  3464. vlan_macip_lens |= skb_network_header_len(skb);
  3465. type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
  3466. l4_hdr = ip_hdr(skb)->protocol;
  3467. break;
  3468. case __constant_htons(ETH_P_IPV6):
  3469. vlan_macip_lens |= skb_network_header_len(skb);
  3470. l4_hdr = ipv6_hdr(skb)->nexthdr;
  3471. break;
  3472. default:
  3473. if (unlikely(net_ratelimit())) {
  3474. dev_warn(tx_ring->dev,
  3475. "partial checksum but proto=%x!\n",
  3476. first->protocol);
  3477. }
  3478. break;
  3479. }
  3480. switch (l4_hdr) {
  3481. case IPPROTO_TCP:
  3482. type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
  3483. mss_l4len_idx = tcp_hdrlen(skb) <<
  3484. E1000_ADVTXD_L4LEN_SHIFT;
  3485. break;
  3486. case IPPROTO_SCTP:
  3487. type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
  3488. mss_l4len_idx = sizeof(struct sctphdr) <<
  3489. E1000_ADVTXD_L4LEN_SHIFT;
  3490. break;
  3491. case IPPROTO_UDP:
  3492. mss_l4len_idx = sizeof(struct udphdr) <<
  3493. E1000_ADVTXD_L4LEN_SHIFT;
  3494. break;
  3495. default:
  3496. if (unlikely(net_ratelimit())) {
  3497. dev_warn(tx_ring->dev,
  3498. "partial checksum but l4 proto=%x!\n",
  3499. l4_hdr);
  3500. }
  3501. break;
  3502. }
  3503. /* update TX checksum flag */
  3504. first->tx_flags |= IGB_TX_FLAGS_CSUM;
  3505. }
  3506. vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
  3507. vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
  3508. igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
  3509. }
  3510. static __le32 igb_tx_cmd_type(u32 tx_flags)
  3511. {
  3512. /* set type for advanced descriptor with frame checksum insertion */
  3513. __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
  3514. E1000_ADVTXD_DCMD_IFCS |
  3515. E1000_ADVTXD_DCMD_DEXT);
  3516. /* set HW vlan bit if vlan is present */
  3517. if (tx_flags & IGB_TX_FLAGS_VLAN)
  3518. cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
  3519. /* set timestamp bit if present */
  3520. if (tx_flags & IGB_TX_FLAGS_TSTAMP)
  3521. cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
  3522. /* set segmentation bits for TSO */
  3523. if (tx_flags & IGB_TX_FLAGS_TSO)
  3524. cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
  3525. return cmd_type;
  3526. }
  3527. static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
  3528. union e1000_adv_tx_desc *tx_desc,
  3529. u32 tx_flags, unsigned int paylen)
  3530. {
  3531. u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
  3532. /* 82575 requires a unique index per ring if any offload is enabled */
  3533. if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
  3534. test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
  3535. olinfo_status |= tx_ring->reg_idx << 4;
  3536. /* insert L4 checksum */
  3537. if (tx_flags & IGB_TX_FLAGS_CSUM) {
  3538. olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
  3539. /* insert IPv4 checksum */
  3540. if (tx_flags & IGB_TX_FLAGS_IPV4)
  3541. olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
  3542. }
  3543. tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
  3544. }
  3545. /*
  3546. * The largest size we can write to the descriptor is 65535. In order to
  3547. * maintain a power of two alignment we have to limit ourselves to 32K.
  3548. */
  3549. #define IGB_MAX_TXD_PWR 15
  3550. #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
  3551. static void igb_tx_map(struct igb_ring *tx_ring,
  3552. struct igb_tx_buffer *first,
  3553. const u8 hdr_len)
  3554. {
  3555. struct sk_buff *skb = first->skb;
  3556. struct igb_tx_buffer *tx_buffer_info;
  3557. union e1000_adv_tx_desc *tx_desc;
  3558. dma_addr_t dma;
  3559. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
  3560. unsigned int data_len = skb->data_len;
  3561. unsigned int size = skb_headlen(skb);
  3562. unsigned int paylen = skb->len - hdr_len;
  3563. __le32 cmd_type;
  3564. u32 tx_flags = first->tx_flags;
  3565. u16 i = tx_ring->next_to_use;
  3566. tx_desc = IGB_TX_DESC(tx_ring, i);
  3567. igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
  3568. cmd_type = igb_tx_cmd_type(tx_flags);
  3569. dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
  3570. if (dma_mapping_error(tx_ring->dev, dma))
  3571. goto dma_error;
  3572. /* record length, and DMA address */
  3573. first->length = size;
  3574. first->dma = dma;
  3575. tx_desc->read.buffer_addr = cpu_to_le64(dma);
  3576. for (;;) {
  3577. while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
  3578. tx_desc->read.cmd_type_len =
  3579. cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
  3580. i++;
  3581. tx_desc++;
  3582. if (i == tx_ring->count) {
  3583. tx_desc = IGB_TX_DESC(tx_ring, 0);
  3584. i = 0;
  3585. }
  3586. dma += IGB_MAX_DATA_PER_TXD;
  3587. size -= IGB_MAX_DATA_PER_TXD;
  3588. tx_desc->read.olinfo_status = 0;
  3589. tx_desc->read.buffer_addr = cpu_to_le64(dma);
  3590. }
  3591. if (likely(!data_len))
  3592. break;
  3593. tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
  3594. i++;
  3595. tx_desc++;
  3596. if (i == tx_ring->count) {
  3597. tx_desc = IGB_TX_DESC(tx_ring, 0);
  3598. i = 0;
  3599. }
  3600. size = skb_frag_size(frag);
  3601. data_len -= size;
  3602. dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
  3603. size, DMA_TO_DEVICE);
  3604. if (dma_mapping_error(tx_ring->dev, dma))
  3605. goto dma_error;
  3606. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  3607. tx_buffer_info->length = size;
  3608. tx_buffer_info->dma = dma;
  3609. tx_desc->read.olinfo_status = 0;
  3610. tx_desc->read.buffer_addr = cpu_to_le64(dma);
  3611. frag++;
  3612. }
  3613. netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
  3614. /* write last descriptor with RS and EOP bits */
  3615. cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
  3616. if (unlikely(skb->no_fcs))
  3617. cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
  3618. tx_desc->read.cmd_type_len = cmd_type;
  3619. /* set the timestamp */
  3620. first->time_stamp = jiffies;
  3621. /*
  3622. * Force memory writes to complete before letting h/w know there
  3623. * are new descriptors to fetch. (Only applicable for weak-ordered
  3624. * memory model archs, such as IA-64).
  3625. *
  3626. * We also need this memory barrier to make certain all of the
  3627. * status bits have been updated before next_to_watch is written.
  3628. */
  3629. wmb();
  3630. /* set next_to_watch value indicating a packet is present */
  3631. first->next_to_watch = tx_desc;
  3632. i++;
  3633. if (i == tx_ring->count)
  3634. i = 0;
  3635. tx_ring->next_to_use = i;
  3636. writel(i, tx_ring->tail);
  3637. /* we need this if more than one processor can write to our tail
  3638. * at a time, it syncronizes IO on IA64/Altix systems */
  3639. mmiowb();
  3640. return;
  3641. dma_error:
  3642. dev_err(tx_ring->dev, "TX DMA map failed\n");
  3643. /* clear dma mappings for failed tx_buffer_info map */
  3644. for (;;) {
  3645. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  3646. igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
  3647. if (tx_buffer_info == first)
  3648. break;
  3649. if (i == 0)
  3650. i = tx_ring->count;
  3651. i--;
  3652. }
  3653. tx_ring->next_to_use = i;
  3654. }
  3655. static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
  3656. {
  3657. struct net_device *netdev = tx_ring->netdev;
  3658. netif_stop_subqueue(netdev, tx_ring->queue_index);
  3659. /* Herbert's original patch had:
  3660. * smp_mb__after_netif_stop_queue();
  3661. * but since that doesn't exist yet, just open code it. */
  3662. smp_mb();
  3663. /* We need to check again in a case another CPU has just
  3664. * made room available. */
  3665. if (igb_desc_unused(tx_ring) < size)
  3666. return -EBUSY;
  3667. /* A reprieve! */
  3668. netif_wake_subqueue(netdev, tx_ring->queue_index);
  3669. u64_stats_update_begin(&tx_ring->tx_syncp2);
  3670. tx_ring->tx_stats.restart_queue2++;
  3671. u64_stats_update_end(&tx_ring->tx_syncp2);
  3672. return 0;
  3673. }
  3674. static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
  3675. {
  3676. if (igb_desc_unused(tx_ring) >= size)
  3677. return 0;
  3678. return __igb_maybe_stop_tx(tx_ring, size);
  3679. }
  3680. netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
  3681. struct igb_ring *tx_ring)
  3682. {
  3683. struct igb_tx_buffer *first;
  3684. int tso;
  3685. u32 tx_flags = 0;
  3686. __be16 protocol = vlan_get_protocol(skb);
  3687. u8 hdr_len = 0;
  3688. /* need: 1 descriptor per page,
  3689. * + 2 desc gap to keep tail from touching head,
  3690. * + 1 desc for skb->data,
  3691. * + 1 desc for context descriptor,
  3692. * otherwise try next time */
  3693. if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
  3694. /* this is a hard error */
  3695. return NETDEV_TX_BUSY;
  3696. }
  3697. /* record the location of the first descriptor for this packet */
  3698. first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
  3699. first->skb = skb;
  3700. first->bytecount = skb->len;
  3701. first->gso_segs = 1;
  3702. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
  3703. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  3704. tx_flags |= IGB_TX_FLAGS_TSTAMP;
  3705. }
  3706. if (vlan_tx_tag_present(skb)) {
  3707. tx_flags |= IGB_TX_FLAGS_VLAN;
  3708. tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
  3709. }
  3710. /* record initial flags and protocol */
  3711. first->tx_flags = tx_flags;
  3712. first->protocol = protocol;
  3713. tso = igb_tso(tx_ring, first, &hdr_len);
  3714. if (tso < 0)
  3715. goto out_drop;
  3716. else if (!tso)
  3717. igb_tx_csum(tx_ring, first);
  3718. igb_tx_map(tx_ring, first, hdr_len);
  3719. /* Make sure there is space in the ring for the next send. */
  3720. igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
  3721. return NETDEV_TX_OK;
  3722. out_drop:
  3723. igb_unmap_and_free_tx_resource(tx_ring, first);
  3724. return NETDEV_TX_OK;
  3725. }
  3726. static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
  3727. struct sk_buff *skb)
  3728. {
  3729. unsigned int r_idx = skb->queue_mapping;
  3730. if (r_idx >= adapter->num_tx_queues)
  3731. r_idx = r_idx % adapter->num_tx_queues;
  3732. return adapter->tx_ring[r_idx];
  3733. }
  3734. static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
  3735. struct net_device *netdev)
  3736. {
  3737. struct igb_adapter *adapter = netdev_priv(netdev);
  3738. if (test_bit(__IGB_DOWN, &adapter->state)) {
  3739. dev_kfree_skb_any(skb);
  3740. return NETDEV_TX_OK;
  3741. }
  3742. if (skb->len <= 0) {
  3743. dev_kfree_skb_any(skb);
  3744. return NETDEV_TX_OK;
  3745. }
  3746. /*
  3747. * The minimum packet size with TCTL.PSP set is 17 so pad the skb
  3748. * in order to meet this minimum size requirement.
  3749. */
  3750. if (skb->len < 17) {
  3751. if (skb_padto(skb, 17))
  3752. return NETDEV_TX_OK;
  3753. skb->len = 17;
  3754. }
  3755. return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
  3756. }
  3757. /**
  3758. * igb_tx_timeout - Respond to a Tx Hang
  3759. * @netdev: network interface device structure
  3760. **/
  3761. static void igb_tx_timeout(struct net_device *netdev)
  3762. {
  3763. struct igb_adapter *adapter = netdev_priv(netdev);
  3764. struct e1000_hw *hw = &adapter->hw;
  3765. /* Do the reset outside of interrupt context */
  3766. adapter->tx_timeout_count++;
  3767. if (hw->mac.type >= e1000_82580)
  3768. hw->dev_spec._82575.global_device_reset = true;
  3769. schedule_work(&adapter->reset_task);
  3770. wr32(E1000_EICS,
  3771. (adapter->eims_enable_mask & ~adapter->eims_other));
  3772. }
  3773. static void igb_reset_task(struct work_struct *work)
  3774. {
  3775. struct igb_adapter *adapter;
  3776. adapter = container_of(work, struct igb_adapter, reset_task);
  3777. igb_dump(adapter);
  3778. netdev_err(adapter->netdev, "Reset adapter\n");
  3779. igb_reinit_locked(adapter);
  3780. }
  3781. /**
  3782. * igb_get_stats64 - Get System Network Statistics
  3783. * @netdev: network interface device structure
  3784. * @stats: rtnl_link_stats64 pointer
  3785. *
  3786. **/
  3787. static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
  3788. struct rtnl_link_stats64 *stats)
  3789. {
  3790. struct igb_adapter *adapter = netdev_priv(netdev);
  3791. spin_lock(&adapter->stats64_lock);
  3792. igb_update_stats(adapter, &adapter->stats64);
  3793. memcpy(stats, &adapter->stats64, sizeof(*stats));
  3794. spin_unlock(&adapter->stats64_lock);
  3795. return stats;
  3796. }
  3797. /**
  3798. * igb_change_mtu - Change the Maximum Transfer Unit
  3799. * @netdev: network interface device structure
  3800. * @new_mtu: new value for maximum frame size
  3801. *
  3802. * Returns 0 on success, negative on failure
  3803. **/
  3804. static int igb_change_mtu(struct net_device *netdev, int new_mtu)
  3805. {
  3806. struct igb_adapter *adapter = netdev_priv(netdev);
  3807. struct pci_dev *pdev = adapter->pdev;
  3808. int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  3809. if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
  3810. dev_err(&pdev->dev, "Invalid MTU setting\n");
  3811. return -EINVAL;
  3812. }
  3813. #define MAX_STD_JUMBO_FRAME_SIZE 9238
  3814. if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
  3815. dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
  3816. return -EINVAL;
  3817. }
  3818. while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
  3819. msleep(1);
  3820. /* igb_down has a dependency on max_frame_size */
  3821. adapter->max_frame_size = max_frame;
  3822. if (netif_running(netdev))
  3823. igb_down(adapter);
  3824. dev_info(&pdev->dev, "changing MTU from %d to %d\n",
  3825. netdev->mtu, new_mtu);
  3826. netdev->mtu = new_mtu;
  3827. if (netif_running(netdev))
  3828. igb_up(adapter);
  3829. else
  3830. igb_reset(adapter);
  3831. clear_bit(__IGB_RESETTING, &adapter->state);
  3832. return 0;
  3833. }
  3834. /**
  3835. * igb_update_stats - Update the board statistics counters
  3836. * @adapter: board private structure
  3837. **/
  3838. void igb_update_stats(struct igb_adapter *adapter,
  3839. struct rtnl_link_stats64 *net_stats)
  3840. {
  3841. struct e1000_hw *hw = &adapter->hw;
  3842. struct pci_dev *pdev = adapter->pdev;
  3843. u32 reg, mpc;
  3844. u16 phy_tmp;
  3845. int i;
  3846. u64 bytes, packets;
  3847. unsigned int start;
  3848. u64 _bytes, _packets;
  3849. #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
  3850. /*
  3851. * Prevent stats update while adapter is being reset, or if the pci
  3852. * connection is down.
  3853. */
  3854. if (adapter->link_speed == 0)
  3855. return;
  3856. if (pci_channel_offline(pdev))
  3857. return;
  3858. bytes = 0;
  3859. packets = 0;
  3860. for (i = 0; i < adapter->num_rx_queues; i++) {
  3861. u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
  3862. struct igb_ring *ring = adapter->rx_ring[i];
  3863. ring->rx_stats.drops += rqdpc_tmp;
  3864. net_stats->rx_fifo_errors += rqdpc_tmp;
  3865. do {
  3866. start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
  3867. _bytes = ring->rx_stats.bytes;
  3868. _packets = ring->rx_stats.packets;
  3869. } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
  3870. bytes += _bytes;
  3871. packets += _packets;
  3872. }
  3873. net_stats->rx_bytes = bytes;
  3874. net_stats->rx_packets = packets;
  3875. bytes = 0;
  3876. packets = 0;
  3877. for (i = 0; i < adapter->num_tx_queues; i++) {
  3878. struct igb_ring *ring = adapter->tx_ring[i];
  3879. do {
  3880. start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
  3881. _bytes = ring->tx_stats.bytes;
  3882. _packets = ring->tx_stats.packets;
  3883. } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
  3884. bytes += _bytes;
  3885. packets += _packets;
  3886. }
  3887. net_stats->tx_bytes = bytes;
  3888. net_stats->tx_packets = packets;
  3889. /* read stats registers */
  3890. adapter->stats.crcerrs += rd32(E1000_CRCERRS);
  3891. adapter->stats.gprc += rd32(E1000_GPRC);
  3892. adapter->stats.gorc += rd32(E1000_GORCL);
  3893. rd32(E1000_GORCH); /* clear GORCL */
  3894. adapter->stats.bprc += rd32(E1000_BPRC);
  3895. adapter->stats.mprc += rd32(E1000_MPRC);
  3896. adapter->stats.roc += rd32(E1000_ROC);
  3897. adapter->stats.prc64 += rd32(E1000_PRC64);
  3898. adapter->stats.prc127 += rd32(E1000_PRC127);
  3899. adapter->stats.prc255 += rd32(E1000_PRC255);
  3900. adapter->stats.prc511 += rd32(E1000_PRC511);
  3901. adapter->stats.prc1023 += rd32(E1000_PRC1023);
  3902. adapter->stats.prc1522 += rd32(E1000_PRC1522);
  3903. adapter->stats.symerrs += rd32(E1000_SYMERRS);
  3904. adapter->stats.sec += rd32(E1000_SEC);
  3905. mpc = rd32(E1000_MPC);
  3906. adapter->stats.mpc += mpc;
  3907. net_stats->rx_fifo_errors += mpc;
  3908. adapter->stats.scc += rd32(E1000_SCC);
  3909. adapter->stats.ecol += rd32(E1000_ECOL);
  3910. adapter->stats.mcc += rd32(E1000_MCC);
  3911. adapter->stats.latecol += rd32(E1000_LATECOL);
  3912. adapter->stats.dc += rd32(E1000_DC);
  3913. adapter->stats.rlec += rd32(E1000_RLEC);
  3914. adapter->stats.xonrxc += rd32(E1000_XONRXC);
  3915. adapter->stats.xontxc += rd32(E1000_XONTXC);
  3916. adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
  3917. adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
  3918. adapter->stats.fcruc += rd32(E1000_FCRUC);
  3919. adapter->stats.gptc += rd32(E1000_GPTC);
  3920. adapter->stats.gotc += rd32(E1000_GOTCL);
  3921. rd32(E1000_GOTCH); /* clear GOTCL */
  3922. adapter->stats.rnbc += rd32(E1000_RNBC);
  3923. adapter->stats.ruc += rd32(E1000_RUC);
  3924. adapter->stats.rfc += rd32(E1000_RFC);
  3925. adapter->stats.rjc += rd32(E1000_RJC);
  3926. adapter->stats.tor += rd32(E1000_TORH);
  3927. adapter->stats.tot += rd32(E1000_TOTH);
  3928. adapter->stats.tpr += rd32(E1000_TPR);
  3929. adapter->stats.ptc64 += rd32(E1000_PTC64);
  3930. adapter->stats.ptc127 += rd32(E1000_PTC127);
  3931. adapter->stats.ptc255 += rd32(E1000_PTC255);
  3932. adapter->stats.ptc511 += rd32(E1000_PTC511);
  3933. adapter->stats.ptc1023 += rd32(E1000_PTC1023);
  3934. adapter->stats.ptc1522 += rd32(E1000_PTC1522);
  3935. adapter->stats.mptc += rd32(E1000_MPTC);
  3936. adapter->stats.bptc += rd32(E1000_BPTC);
  3937. adapter->stats.tpt += rd32(E1000_TPT);
  3938. adapter->stats.colc += rd32(E1000_COLC);
  3939. adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
  3940. /* read internal phy specific stats */
  3941. reg = rd32(E1000_CTRL_EXT);
  3942. if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
  3943. adapter->stats.rxerrc += rd32(E1000_RXERRC);
  3944. adapter->stats.tncrs += rd32(E1000_TNCRS);
  3945. }
  3946. adapter->stats.tsctc += rd32(E1000_TSCTC);
  3947. adapter->stats.tsctfc += rd32(E1000_TSCTFC);
  3948. adapter->stats.iac += rd32(E1000_IAC);
  3949. adapter->stats.icrxoc += rd32(E1000_ICRXOC);
  3950. adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
  3951. adapter->stats.icrxatc += rd32(E1000_ICRXATC);
  3952. adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
  3953. adapter->stats.ictxatc += rd32(E1000_ICTXATC);
  3954. adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
  3955. adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
  3956. adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
  3957. /* Fill out the OS statistics structure */
  3958. net_stats->multicast = adapter->stats.mprc;
  3959. net_stats->collisions = adapter->stats.colc;
  3960. /* Rx Errors */
  3961. /* RLEC on some newer hardware can be incorrect so build
  3962. * our own version based on RUC and ROC */
  3963. net_stats->rx_errors = adapter->stats.rxerrc +
  3964. adapter->stats.crcerrs + adapter->stats.algnerrc +
  3965. adapter->stats.ruc + adapter->stats.roc +
  3966. adapter->stats.cexterr;
  3967. net_stats->rx_length_errors = adapter->stats.ruc +
  3968. adapter->stats.roc;
  3969. net_stats->rx_crc_errors = adapter->stats.crcerrs;
  3970. net_stats->rx_frame_errors = adapter->stats.algnerrc;
  3971. net_stats->rx_missed_errors = adapter->stats.mpc;
  3972. /* Tx Errors */
  3973. net_stats->tx_errors = adapter->stats.ecol +
  3974. adapter->stats.latecol;
  3975. net_stats->tx_aborted_errors = adapter->stats.ecol;
  3976. net_stats->tx_window_errors = adapter->stats.latecol;
  3977. net_stats->tx_carrier_errors = adapter->stats.tncrs;
  3978. /* Tx Dropped needs to be maintained elsewhere */
  3979. /* Phy Stats */
  3980. if (hw->phy.media_type == e1000_media_type_copper) {
  3981. if ((adapter->link_speed == SPEED_1000) &&
  3982. (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
  3983. phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
  3984. adapter->phy_stats.idle_errors += phy_tmp;
  3985. }
  3986. }
  3987. /* Management Stats */
  3988. adapter->stats.mgptc += rd32(E1000_MGTPTC);
  3989. adapter->stats.mgprc += rd32(E1000_MGTPRC);
  3990. adapter->stats.mgpdc += rd32(E1000_MGTPDC);
  3991. /* OS2BMC Stats */
  3992. reg = rd32(E1000_MANC);
  3993. if (reg & E1000_MANC_EN_BMC2OS) {
  3994. adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
  3995. adapter->stats.o2bspc += rd32(E1000_O2BSPC);
  3996. adapter->stats.b2ospc += rd32(E1000_B2OSPC);
  3997. adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
  3998. }
  3999. }
  4000. static irqreturn_t igb_msix_other(int irq, void *data)
  4001. {
  4002. struct igb_adapter *adapter = data;
  4003. struct e1000_hw *hw = &adapter->hw;
  4004. u32 icr = rd32(E1000_ICR);
  4005. /* reading ICR causes bit 31 of EICR to be cleared */
  4006. if (icr & E1000_ICR_DRSTA)
  4007. schedule_work(&adapter->reset_task);
  4008. if (icr & E1000_ICR_DOUTSYNC) {
  4009. /* HW is reporting DMA is out of sync */
  4010. adapter->stats.doosync++;
  4011. /* The DMA Out of Sync is also indication of a spoof event
  4012. * in IOV mode. Check the Wrong VM Behavior register to
  4013. * see if it is really a spoof event. */
  4014. igb_check_wvbr(adapter);
  4015. }
  4016. /* Check for a mailbox event */
  4017. if (icr & E1000_ICR_VMMB)
  4018. igb_msg_task(adapter);
  4019. if (icr & E1000_ICR_LSC) {
  4020. hw->mac.get_link_status = 1;
  4021. /* guard against interrupt when we're going down */
  4022. if (!test_bit(__IGB_DOWN, &adapter->state))
  4023. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  4024. }
  4025. wr32(E1000_EIMS, adapter->eims_other);
  4026. return IRQ_HANDLED;
  4027. }
  4028. static void igb_write_itr(struct igb_q_vector *q_vector)
  4029. {
  4030. struct igb_adapter *adapter = q_vector->adapter;
  4031. u32 itr_val = q_vector->itr_val & 0x7FFC;
  4032. if (!q_vector->set_itr)
  4033. return;
  4034. if (!itr_val)
  4035. itr_val = 0x4;
  4036. if (adapter->hw.mac.type == e1000_82575)
  4037. itr_val |= itr_val << 16;
  4038. else
  4039. itr_val |= E1000_EITR_CNT_IGNR;
  4040. writel(itr_val, q_vector->itr_register);
  4041. q_vector->set_itr = 0;
  4042. }
  4043. static irqreturn_t igb_msix_ring(int irq, void *data)
  4044. {
  4045. struct igb_q_vector *q_vector = data;
  4046. /* Write the ITR value calculated from the previous interrupt. */
  4047. igb_write_itr(q_vector);
  4048. napi_schedule(&q_vector->napi);
  4049. return IRQ_HANDLED;
  4050. }
  4051. #ifdef CONFIG_IGB_DCA
  4052. static void igb_update_dca(struct igb_q_vector *q_vector)
  4053. {
  4054. struct igb_adapter *adapter = q_vector->adapter;
  4055. struct e1000_hw *hw = &adapter->hw;
  4056. int cpu = get_cpu();
  4057. if (q_vector->cpu == cpu)
  4058. goto out_no_update;
  4059. if (q_vector->tx.ring) {
  4060. int q = q_vector->tx.ring->reg_idx;
  4061. u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
  4062. if (hw->mac.type == e1000_82575) {
  4063. dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
  4064. dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
  4065. } else {
  4066. dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
  4067. dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
  4068. E1000_DCA_TXCTRL_CPUID_SHIFT;
  4069. }
  4070. dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
  4071. wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
  4072. }
  4073. if (q_vector->rx.ring) {
  4074. int q = q_vector->rx.ring->reg_idx;
  4075. u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
  4076. if (hw->mac.type == e1000_82575) {
  4077. dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
  4078. dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
  4079. } else {
  4080. dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
  4081. dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
  4082. E1000_DCA_RXCTRL_CPUID_SHIFT;
  4083. }
  4084. dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
  4085. dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
  4086. dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
  4087. wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
  4088. }
  4089. q_vector->cpu = cpu;
  4090. out_no_update:
  4091. put_cpu();
  4092. }
  4093. static void igb_setup_dca(struct igb_adapter *adapter)
  4094. {
  4095. struct e1000_hw *hw = &adapter->hw;
  4096. int i;
  4097. if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
  4098. return;
  4099. /* Always use CB2 mode, difference is masked in the CB driver. */
  4100. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
  4101. for (i = 0; i < adapter->num_q_vectors; i++) {
  4102. adapter->q_vector[i]->cpu = -1;
  4103. igb_update_dca(adapter->q_vector[i]);
  4104. }
  4105. }
  4106. static int __igb_notify_dca(struct device *dev, void *data)
  4107. {
  4108. struct net_device *netdev = dev_get_drvdata(dev);
  4109. struct igb_adapter *adapter = netdev_priv(netdev);
  4110. struct pci_dev *pdev = adapter->pdev;
  4111. struct e1000_hw *hw = &adapter->hw;
  4112. unsigned long event = *(unsigned long *)data;
  4113. switch (event) {
  4114. case DCA_PROVIDER_ADD:
  4115. /* if already enabled, don't do it again */
  4116. if (adapter->flags & IGB_FLAG_DCA_ENABLED)
  4117. break;
  4118. if (dca_add_requester(dev) == 0) {
  4119. adapter->flags |= IGB_FLAG_DCA_ENABLED;
  4120. dev_info(&pdev->dev, "DCA enabled\n");
  4121. igb_setup_dca(adapter);
  4122. break;
  4123. }
  4124. /* Fall Through since DCA is disabled. */
  4125. case DCA_PROVIDER_REMOVE:
  4126. if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
  4127. /* without this a class_device is left
  4128. * hanging around in the sysfs model */
  4129. dca_remove_requester(dev);
  4130. dev_info(&pdev->dev, "DCA disabled\n");
  4131. adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
  4132. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
  4133. }
  4134. break;
  4135. }
  4136. return 0;
  4137. }
  4138. static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
  4139. void *p)
  4140. {
  4141. int ret_val;
  4142. ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
  4143. __igb_notify_dca);
  4144. return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
  4145. }
  4146. #endif /* CONFIG_IGB_DCA */
  4147. #ifdef CONFIG_PCI_IOV
  4148. static int igb_vf_configure(struct igb_adapter *adapter, int vf)
  4149. {
  4150. unsigned char mac_addr[ETH_ALEN];
  4151. struct pci_dev *pdev = adapter->pdev;
  4152. struct e1000_hw *hw = &adapter->hw;
  4153. struct pci_dev *pvfdev;
  4154. unsigned int device_id;
  4155. u16 thisvf_devfn;
  4156. random_ether_addr(mac_addr);
  4157. igb_set_vf_mac(adapter, vf, mac_addr);
  4158. switch (adapter->hw.mac.type) {
  4159. case e1000_82576:
  4160. device_id = IGB_82576_VF_DEV_ID;
  4161. /* VF Stride for 82576 is 2 */
  4162. thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
  4163. (pdev->devfn & 1);
  4164. break;
  4165. case e1000_i350:
  4166. device_id = IGB_I350_VF_DEV_ID;
  4167. /* VF Stride for I350 is 4 */
  4168. thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
  4169. (pdev->devfn & 3);
  4170. break;
  4171. default:
  4172. device_id = 0;
  4173. thisvf_devfn = 0;
  4174. break;
  4175. }
  4176. pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
  4177. while (pvfdev) {
  4178. if (pvfdev->devfn == thisvf_devfn)
  4179. break;
  4180. pvfdev = pci_get_device(hw->vendor_id,
  4181. device_id, pvfdev);
  4182. }
  4183. if (pvfdev)
  4184. adapter->vf_data[vf].vfdev = pvfdev;
  4185. else
  4186. dev_err(&pdev->dev,
  4187. "Couldn't find pci dev ptr for VF %4.4x\n",
  4188. thisvf_devfn);
  4189. return pvfdev != NULL;
  4190. }
  4191. static int igb_find_enabled_vfs(struct igb_adapter *adapter)
  4192. {
  4193. struct e1000_hw *hw = &adapter->hw;
  4194. struct pci_dev *pdev = adapter->pdev;
  4195. struct pci_dev *pvfdev;
  4196. u16 vf_devfn = 0;
  4197. u16 vf_stride;
  4198. unsigned int device_id;
  4199. int vfs_found = 0;
  4200. switch (adapter->hw.mac.type) {
  4201. case e1000_82576:
  4202. device_id = IGB_82576_VF_DEV_ID;
  4203. /* VF Stride for 82576 is 2 */
  4204. vf_stride = 2;
  4205. break;
  4206. case e1000_i350:
  4207. device_id = IGB_I350_VF_DEV_ID;
  4208. /* VF Stride for I350 is 4 */
  4209. vf_stride = 4;
  4210. break;
  4211. default:
  4212. device_id = 0;
  4213. vf_stride = 0;
  4214. break;
  4215. }
  4216. vf_devfn = pdev->devfn + 0x80;
  4217. pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
  4218. while (pvfdev) {
  4219. if (pvfdev->devfn == vf_devfn &&
  4220. (pvfdev->bus->number >= pdev->bus->number))
  4221. vfs_found++;
  4222. vf_devfn += vf_stride;
  4223. pvfdev = pci_get_device(hw->vendor_id,
  4224. device_id, pvfdev);
  4225. }
  4226. return vfs_found;
  4227. }
  4228. static int igb_check_vf_assignment(struct igb_adapter *adapter)
  4229. {
  4230. int i;
  4231. for (i = 0; i < adapter->vfs_allocated_count; i++) {
  4232. if (adapter->vf_data[i].vfdev) {
  4233. if (adapter->vf_data[i].vfdev->dev_flags &
  4234. PCI_DEV_FLAGS_ASSIGNED)
  4235. return true;
  4236. }
  4237. }
  4238. return false;
  4239. }
  4240. #endif
  4241. static void igb_ping_all_vfs(struct igb_adapter *adapter)
  4242. {
  4243. struct e1000_hw *hw = &adapter->hw;
  4244. u32 ping;
  4245. int i;
  4246. for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
  4247. ping = E1000_PF_CONTROL_MSG;
  4248. if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
  4249. ping |= E1000_VT_MSGTYPE_CTS;
  4250. igb_write_mbx(hw, &ping, 1, i);
  4251. }
  4252. }
  4253. static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
  4254. {
  4255. struct e1000_hw *hw = &adapter->hw;
  4256. u32 vmolr = rd32(E1000_VMOLR(vf));
  4257. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  4258. vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
  4259. IGB_VF_FLAG_MULTI_PROMISC);
  4260. vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
  4261. if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
  4262. vmolr |= E1000_VMOLR_MPME;
  4263. vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
  4264. *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
  4265. } else {
  4266. /*
  4267. * if we have hashes and we are clearing a multicast promisc
  4268. * flag we need to write the hashes to the MTA as this step
  4269. * was previously skipped
  4270. */
  4271. if (vf_data->num_vf_mc_hashes > 30) {
  4272. vmolr |= E1000_VMOLR_MPME;
  4273. } else if (vf_data->num_vf_mc_hashes) {
  4274. int j;
  4275. vmolr |= E1000_VMOLR_ROMPE;
  4276. for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
  4277. igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
  4278. }
  4279. }
  4280. wr32(E1000_VMOLR(vf), vmolr);
  4281. /* there are flags left unprocessed, likely not supported */
  4282. if (*msgbuf & E1000_VT_MSGINFO_MASK)
  4283. return -EINVAL;
  4284. return 0;
  4285. }
  4286. static int igb_set_vf_multicasts(struct igb_adapter *adapter,
  4287. u32 *msgbuf, u32 vf)
  4288. {
  4289. int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
  4290. u16 *hash_list = (u16 *)&msgbuf[1];
  4291. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  4292. int i;
  4293. /* salt away the number of multicast addresses assigned
  4294. * to this VF for later use to restore when the PF multi cast
  4295. * list changes
  4296. */
  4297. vf_data->num_vf_mc_hashes = n;
  4298. /* only up to 30 hash values supported */
  4299. if (n > 30)
  4300. n = 30;
  4301. /* store the hashes for later use */
  4302. for (i = 0; i < n; i++)
  4303. vf_data->vf_mc_hashes[i] = hash_list[i];
  4304. /* Flush and reset the mta with the new values */
  4305. igb_set_rx_mode(adapter->netdev);
  4306. return 0;
  4307. }
  4308. static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
  4309. {
  4310. struct e1000_hw *hw = &adapter->hw;
  4311. struct vf_data_storage *vf_data;
  4312. int i, j;
  4313. for (i = 0; i < adapter->vfs_allocated_count; i++) {
  4314. u32 vmolr = rd32(E1000_VMOLR(i));
  4315. vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
  4316. vf_data = &adapter->vf_data[i];
  4317. if ((vf_data->num_vf_mc_hashes > 30) ||
  4318. (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
  4319. vmolr |= E1000_VMOLR_MPME;
  4320. } else if (vf_data->num_vf_mc_hashes) {
  4321. vmolr |= E1000_VMOLR_ROMPE;
  4322. for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
  4323. igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
  4324. }
  4325. wr32(E1000_VMOLR(i), vmolr);
  4326. }
  4327. }
  4328. static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
  4329. {
  4330. struct e1000_hw *hw = &adapter->hw;
  4331. u32 pool_mask, reg, vid;
  4332. int i;
  4333. pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
  4334. /* Find the vlan filter for this id */
  4335. for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
  4336. reg = rd32(E1000_VLVF(i));
  4337. /* remove the vf from the pool */
  4338. reg &= ~pool_mask;
  4339. /* if pool is empty then remove entry from vfta */
  4340. if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
  4341. (reg & E1000_VLVF_VLANID_ENABLE)) {
  4342. reg = 0;
  4343. vid = reg & E1000_VLVF_VLANID_MASK;
  4344. igb_vfta_set(hw, vid, false);
  4345. }
  4346. wr32(E1000_VLVF(i), reg);
  4347. }
  4348. adapter->vf_data[vf].vlans_enabled = 0;
  4349. }
  4350. static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
  4351. {
  4352. struct e1000_hw *hw = &adapter->hw;
  4353. u32 reg, i;
  4354. /* The vlvf table only exists on 82576 hardware and newer */
  4355. if (hw->mac.type < e1000_82576)
  4356. return -1;
  4357. /* we only need to do this if VMDq is enabled */
  4358. if (!adapter->vfs_allocated_count)
  4359. return -1;
  4360. /* Find the vlan filter for this id */
  4361. for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
  4362. reg = rd32(E1000_VLVF(i));
  4363. if ((reg & E1000_VLVF_VLANID_ENABLE) &&
  4364. vid == (reg & E1000_VLVF_VLANID_MASK))
  4365. break;
  4366. }
  4367. if (add) {
  4368. if (i == E1000_VLVF_ARRAY_SIZE) {
  4369. /* Did not find a matching VLAN ID entry that was
  4370. * enabled. Search for a free filter entry, i.e.
  4371. * one without the enable bit set
  4372. */
  4373. for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
  4374. reg = rd32(E1000_VLVF(i));
  4375. if (!(reg & E1000_VLVF_VLANID_ENABLE))
  4376. break;
  4377. }
  4378. }
  4379. if (i < E1000_VLVF_ARRAY_SIZE) {
  4380. /* Found an enabled/available entry */
  4381. reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
  4382. /* if !enabled we need to set this up in vfta */
  4383. if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
  4384. /* add VID to filter table */
  4385. igb_vfta_set(hw, vid, true);
  4386. reg |= E1000_VLVF_VLANID_ENABLE;
  4387. }
  4388. reg &= ~E1000_VLVF_VLANID_MASK;
  4389. reg |= vid;
  4390. wr32(E1000_VLVF(i), reg);
  4391. /* do not modify RLPML for PF devices */
  4392. if (vf >= adapter->vfs_allocated_count)
  4393. return 0;
  4394. if (!adapter->vf_data[vf].vlans_enabled) {
  4395. u32 size;
  4396. reg = rd32(E1000_VMOLR(vf));
  4397. size = reg & E1000_VMOLR_RLPML_MASK;
  4398. size += 4;
  4399. reg &= ~E1000_VMOLR_RLPML_MASK;
  4400. reg |= size;
  4401. wr32(E1000_VMOLR(vf), reg);
  4402. }
  4403. adapter->vf_data[vf].vlans_enabled++;
  4404. }
  4405. } else {
  4406. if (i < E1000_VLVF_ARRAY_SIZE) {
  4407. /* remove vf from the pool */
  4408. reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
  4409. /* if pool is empty then remove entry from vfta */
  4410. if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
  4411. reg = 0;
  4412. igb_vfta_set(hw, vid, false);
  4413. }
  4414. wr32(E1000_VLVF(i), reg);
  4415. /* do not modify RLPML for PF devices */
  4416. if (vf >= adapter->vfs_allocated_count)
  4417. return 0;
  4418. adapter->vf_data[vf].vlans_enabled--;
  4419. if (!adapter->vf_data[vf].vlans_enabled) {
  4420. u32 size;
  4421. reg = rd32(E1000_VMOLR(vf));
  4422. size = reg & E1000_VMOLR_RLPML_MASK;
  4423. size -= 4;
  4424. reg &= ~E1000_VMOLR_RLPML_MASK;
  4425. reg |= size;
  4426. wr32(E1000_VMOLR(vf), reg);
  4427. }
  4428. }
  4429. }
  4430. return 0;
  4431. }
  4432. static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
  4433. {
  4434. struct e1000_hw *hw = &adapter->hw;
  4435. if (vid)
  4436. wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
  4437. else
  4438. wr32(E1000_VMVIR(vf), 0);
  4439. }
  4440. static int igb_ndo_set_vf_vlan(struct net_device *netdev,
  4441. int vf, u16 vlan, u8 qos)
  4442. {
  4443. int err = 0;
  4444. struct igb_adapter *adapter = netdev_priv(netdev);
  4445. if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
  4446. return -EINVAL;
  4447. if (vlan || qos) {
  4448. err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
  4449. if (err)
  4450. goto out;
  4451. igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
  4452. igb_set_vmolr(adapter, vf, !vlan);
  4453. adapter->vf_data[vf].pf_vlan = vlan;
  4454. adapter->vf_data[vf].pf_qos = qos;
  4455. dev_info(&adapter->pdev->dev,
  4456. "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
  4457. if (test_bit(__IGB_DOWN, &adapter->state)) {
  4458. dev_warn(&adapter->pdev->dev,
  4459. "The VF VLAN has been set,"
  4460. " but the PF device is not up.\n");
  4461. dev_warn(&adapter->pdev->dev,
  4462. "Bring the PF device up before"
  4463. " attempting to use the VF device.\n");
  4464. }
  4465. } else {
  4466. igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
  4467. false, vf);
  4468. igb_set_vmvir(adapter, vlan, vf);
  4469. igb_set_vmolr(adapter, vf, true);
  4470. adapter->vf_data[vf].pf_vlan = 0;
  4471. adapter->vf_data[vf].pf_qos = 0;
  4472. }
  4473. out:
  4474. return err;
  4475. }
  4476. static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
  4477. {
  4478. int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
  4479. int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
  4480. return igb_vlvf_set(adapter, vid, add, vf);
  4481. }
  4482. static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
  4483. {
  4484. /* clear flags - except flag that indicates PF has set the MAC */
  4485. adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
  4486. adapter->vf_data[vf].last_nack = jiffies;
  4487. /* reset offloads to defaults */
  4488. igb_set_vmolr(adapter, vf, true);
  4489. /* reset vlans for device */
  4490. igb_clear_vf_vfta(adapter, vf);
  4491. if (adapter->vf_data[vf].pf_vlan)
  4492. igb_ndo_set_vf_vlan(adapter->netdev, vf,
  4493. adapter->vf_data[vf].pf_vlan,
  4494. adapter->vf_data[vf].pf_qos);
  4495. else
  4496. igb_clear_vf_vfta(adapter, vf);
  4497. /* reset multicast table array for vf */
  4498. adapter->vf_data[vf].num_vf_mc_hashes = 0;
  4499. /* Flush and reset the mta with the new values */
  4500. igb_set_rx_mode(adapter->netdev);
  4501. }
  4502. static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
  4503. {
  4504. unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
  4505. /* generate a new mac address as we were hotplug removed/added */
  4506. if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
  4507. random_ether_addr(vf_mac);
  4508. /* process remaining reset events */
  4509. igb_vf_reset(adapter, vf);
  4510. }
  4511. static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
  4512. {
  4513. struct e1000_hw *hw = &adapter->hw;
  4514. unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
  4515. int rar_entry = hw->mac.rar_entry_count - (vf + 1);
  4516. u32 reg, msgbuf[3];
  4517. u8 *addr = (u8 *)(&msgbuf[1]);
  4518. /* process all the same items cleared in a function level reset */
  4519. igb_vf_reset(adapter, vf);
  4520. /* set vf mac address */
  4521. igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
  4522. /* enable transmit and receive for vf */
  4523. reg = rd32(E1000_VFTE);
  4524. wr32(E1000_VFTE, reg | (1 << vf));
  4525. reg = rd32(E1000_VFRE);
  4526. wr32(E1000_VFRE, reg | (1 << vf));
  4527. adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
  4528. /* reply to reset with ack and vf mac address */
  4529. msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
  4530. memcpy(addr, vf_mac, 6);
  4531. igb_write_mbx(hw, msgbuf, 3, vf);
  4532. }
  4533. static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
  4534. {
  4535. /*
  4536. * The VF MAC Address is stored in a packed array of bytes
  4537. * starting at the second 32 bit word of the msg array
  4538. */
  4539. unsigned char *addr = (char *)&msg[1];
  4540. int err = -1;
  4541. if (is_valid_ether_addr(addr))
  4542. err = igb_set_vf_mac(adapter, vf, addr);
  4543. return err;
  4544. }
  4545. static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
  4546. {
  4547. struct e1000_hw *hw = &adapter->hw;
  4548. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  4549. u32 msg = E1000_VT_MSGTYPE_NACK;
  4550. /* if device isn't clear to send it shouldn't be reading either */
  4551. if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
  4552. time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
  4553. igb_write_mbx(hw, &msg, 1, vf);
  4554. vf_data->last_nack = jiffies;
  4555. }
  4556. }
  4557. static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
  4558. {
  4559. struct pci_dev *pdev = adapter->pdev;
  4560. u32 msgbuf[E1000_VFMAILBOX_SIZE];
  4561. struct e1000_hw *hw = &adapter->hw;
  4562. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  4563. s32 retval;
  4564. retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
  4565. if (retval) {
  4566. /* if receive failed revoke VF CTS stats and restart init */
  4567. dev_err(&pdev->dev, "Error receiving message from VF\n");
  4568. vf_data->flags &= ~IGB_VF_FLAG_CTS;
  4569. if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
  4570. return;
  4571. goto out;
  4572. }
  4573. /* this is a message we already processed, do nothing */
  4574. if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
  4575. return;
  4576. /*
  4577. * until the vf completes a reset it should not be
  4578. * allowed to start any configuration.
  4579. */
  4580. if (msgbuf[0] == E1000_VF_RESET) {
  4581. igb_vf_reset_msg(adapter, vf);
  4582. return;
  4583. }
  4584. if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
  4585. if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
  4586. return;
  4587. retval = -1;
  4588. goto out;
  4589. }
  4590. switch ((msgbuf[0] & 0xFFFF)) {
  4591. case E1000_VF_SET_MAC_ADDR:
  4592. retval = -EINVAL;
  4593. if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
  4594. retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
  4595. else
  4596. dev_warn(&pdev->dev,
  4597. "VF %d attempted to override administratively "
  4598. "set MAC address\nReload the VF driver to "
  4599. "resume operations\n", vf);
  4600. break;
  4601. case E1000_VF_SET_PROMISC:
  4602. retval = igb_set_vf_promisc(adapter, msgbuf, vf);
  4603. break;
  4604. case E1000_VF_SET_MULTICAST:
  4605. retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
  4606. break;
  4607. case E1000_VF_SET_LPE:
  4608. retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
  4609. break;
  4610. case E1000_VF_SET_VLAN:
  4611. retval = -1;
  4612. if (vf_data->pf_vlan)
  4613. dev_warn(&pdev->dev,
  4614. "VF %d attempted to override administratively "
  4615. "set VLAN tag\nReload the VF driver to "
  4616. "resume operations\n", vf);
  4617. else
  4618. retval = igb_set_vf_vlan(adapter, msgbuf, vf);
  4619. break;
  4620. default:
  4621. dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
  4622. retval = -1;
  4623. break;
  4624. }
  4625. msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
  4626. out:
  4627. /* notify the VF of the results of what it sent us */
  4628. if (retval)
  4629. msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
  4630. else
  4631. msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
  4632. igb_write_mbx(hw, msgbuf, 1, vf);
  4633. }
  4634. static void igb_msg_task(struct igb_adapter *adapter)
  4635. {
  4636. struct e1000_hw *hw = &adapter->hw;
  4637. u32 vf;
  4638. for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
  4639. /* process any reset requests */
  4640. if (!igb_check_for_rst(hw, vf))
  4641. igb_vf_reset_event(adapter, vf);
  4642. /* process any messages pending */
  4643. if (!igb_check_for_msg(hw, vf))
  4644. igb_rcv_msg_from_vf(adapter, vf);
  4645. /* process any acks */
  4646. if (!igb_check_for_ack(hw, vf))
  4647. igb_rcv_ack_from_vf(adapter, vf);
  4648. }
  4649. }
  4650. /**
  4651. * igb_set_uta - Set unicast filter table address
  4652. * @adapter: board private structure
  4653. *
  4654. * The unicast table address is a register array of 32-bit registers.
  4655. * The table is meant to be used in a way similar to how the MTA is used
  4656. * however due to certain limitations in the hardware it is necessary to
  4657. * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
  4658. * enable bit to allow vlan tag stripping when promiscuous mode is enabled
  4659. **/
  4660. static void igb_set_uta(struct igb_adapter *adapter)
  4661. {
  4662. struct e1000_hw *hw = &adapter->hw;
  4663. int i;
  4664. /* The UTA table only exists on 82576 hardware and newer */
  4665. if (hw->mac.type < e1000_82576)
  4666. return;
  4667. /* we only need to do this if VMDq is enabled */
  4668. if (!adapter->vfs_allocated_count)
  4669. return;
  4670. for (i = 0; i < hw->mac.uta_reg_count; i++)
  4671. array_wr32(E1000_UTA, i, ~0);
  4672. }
  4673. /**
  4674. * igb_intr_msi - Interrupt Handler
  4675. * @irq: interrupt number
  4676. * @data: pointer to a network interface device structure
  4677. **/
  4678. static irqreturn_t igb_intr_msi(int irq, void *data)
  4679. {
  4680. struct igb_adapter *adapter = data;
  4681. struct igb_q_vector *q_vector = adapter->q_vector[0];
  4682. struct e1000_hw *hw = &adapter->hw;
  4683. /* read ICR disables interrupts using IAM */
  4684. u32 icr = rd32(E1000_ICR);
  4685. igb_write_itr(q_vector);
  4686. if (icr & E1000_ICR_DRSTA)
  4687. schedule_work(&adapter->reset_task);
  4688. if (icr & E1000_ICR_DOUTSYNC) {
  4689. /* HW is reporting DMA is out of sync */
  4690. adapter->stats.doosync++;
  4691. }
  4692. if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
  4693. hw->mac.get_link_status = 1;
  4694. if (!test_bit(__IGB_DOWN, &adapter->state))
  4695. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  4696. }
  4697. napi_schedule(&q_vector->napi);
  4698. return IRQ_HANDLED;
  4699. }
  4700. /**
  4701. * igb_intr - Legacy Interrupt Handler
  4702. * @irq: interrupt number
  4703. * @data: pointer to a network interface device structure
  4704. **/
  4705. static irqreturn_t igb_intr(int irq, void *data)
  4706. {
  4707. struct igb_adapter *adapter = data;
  4708. struct igb_q_vector *q_vector = adapter->q_vector[0];
  4709. struct e1000_hw *hw = &adapter->hw;
  4710. /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
  4711. * need for the IMC write */
  4712. u32 icr = rd32(E1000_ICR);
  4713. /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
  4714. * not set, then the adapter didn't send an interrupt */
  4715. if (!(icr & E1000_ICR_INT_ASSERTED))
  4716. return IRQ_NONE;
  4717. igb_write_itr(q_vector);
  4718. if (icr & E1000_ICR_DRSTA)
  4719. schedule_work(&adapter->reset_task);
  4720. if (icr & E1000_ICR_DOUTSYNC) {
  4721. /* HW is reporting DMA is out of sync */
  4722. adapter->stats.doosync++;
  4723. }
  4724. if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
  4725. hw->mac.get_link_status = 1;
  4726. /* guard against interrupt when we're going down */
  4727. if (!test_bit(__IGB_DOWN, &adapter->state))
  4728. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  4729. }
  4730. napi_schedule(&q_vector->napi);
  4731. return IRQ_HANDLED;
  4732. }
  4733. static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
  4734. {
  4735. struct igb_adapter *adapter = q_vector->adapter;
  4736. struct e1000_hw *hw = &adapter->hw;
  4737. if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
  4738. (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
  4739. if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
  4740. igb_set_itr(q_vector);
  4741. else
  4742. igb_update_ring_itr(q_vector);
  4743. }
  4744. if (!test_bit(__IGB_DOWN, &adapter->state)) {
  4745. if (adapter->msix_entries)
  4746. wr32(E1000_EIMS, q_vector->eims_value);
  4747. else
  4748. igb_irq_enable(adapter);
  4749. }
  4750. }
  4751. /**
  4752. * igb_poll - NAPI Rx polling callback
  4753. * @napi: napi polling structure
  4754. * @budget: count of how many packets we should handle
  4755. **/
  4756. static int igb_poll(struct napi_struct *napi, int budget)
  4757. {
  4758. struct igb_q_vector *q_vector = container_of(napi,
  4759. struct igb_q_vector,
  4760. napi);
  4761. bool clean_complete = true;
  4762. #ifdef CONFIG_IGB_DCA
  4763. if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
  4764. igb_update_dca(q_vector);
  4765. #endif
  4766. if (q_vector->tx.ring)
  4767. clean_complete = igb_clean_tx_irq(q_vector);
  4768. if (q_vector->rx.ring)
  4769. clean_complete &= igb_clean_rx_irq(q_vector, budget);
  4770. /* If all work not completed, return budget and keep polling */
  4771. if (!clean_complete)
  4772. return budget;
  4773. /* If not enough Rx work done, exit the polling mode */
  4774. napi_complete(napi);
  4775. igb_ring_irq_enable(q_vector);
  4776. return 0;
  4777. }
  4778. #ifdef CONFIG_IGB_PTP
  4779. /**
  4780. * igb_tx_hwtstamp - utility function which checks for TX time stamp
  4781. * @q_vector: pointer to q_vector containing needed info
  4782. * @buffer: pointer to igb_tx_buffer structure
  4783. *
  4784. * If we were asked to do hardware stamping and such a time stamp is
  4785. * available, then it must have been for this skb here because we only
  4786. * allow only one such packet into the queue.
  4787. */
  4788. static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
  4789. struct igb_tx_buffer *buffer_info)
  4790. {
  4791. struct igb_adapter *adapter = q_vector->adapter;
  4792. struct e1000_hw *hw = &adapter->hw;
  4793. struct skb_shared_hwtstamps shhwtstamps;
  4794. u64 regval;
  4795. /* if skb does not support hw timestamp or TX stamp not valid exit */
  4796. if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
  4797. !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
  4798. return;
  4799. regval = rd32(E1000_TXSTMPL);
  4800. regval |= (u64)rd32(E1000_TXSTMPH) << 32;
  4801. igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
  4802. skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
  4803. }
  4804. #endif
  4805. /**
  4806. * igb_clean_tx_irq - Reclaim resources after transmit completes
  4807. * @q_vector: pointer to q_vector containing needed info
  4808. * returns true if ring is completely cleaned
  4809. **/
  4810. static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
  4811. {
  4812. struct igb_adapter *adapter = q_vector->adapter;
  4813. struct igb_ring *tx_ring = q_vector->tx.ring;
  4814. struct igb_tx_buffer *tx_buffer;
  4815. union e1000_adv_tx_desc *tx_desc, *eop_desc;
  4816. unsigned int total_bytes = 0, total_packets = 0;
  4817. unsigned int budget = q_vector->tx.work_limit;
  4818. unsigned int i = tx_ring->next_to_clean;
  4819. if (test_bit(__IGB_DOWN, &adapter->state))
  4820. return true;
  4821. tx_buffer = &tx_ring->tx_buffer_info[i];
  4822. tx_desc = IGB_TX_DESC(tx_ring, i);
  4823. i -= tx_ring->count;
  4824. for (; budget; budget--) {
  4825. eop_desc = tx_buffer->next_to_watch;
  4826. /* prevent any other reads prior to eop_desc */
  4827. rmb();
  4828. /* if next_to_watch is not set then there is no work pending */
  4829. if (!eop_desc)
  4830. break;
  4831. /* if DD is not set pending work has not been completed */
  4832. if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
  4833. break;
  4834. /* clear next_to_watch to prevent false hangs */
  4835. tx_buffer->next_to_watch = NULL;
  4836. /* update the statistics for this packet */
  4837. total_bytes += tx_buffer->bytecount;
  4838. total_packets += tx_buffer->gso_segs;
  4839. #ifdef CONFIG_IGB_PTP
  4840. /* retrieve hardware timestamp */
  4841. igb_tx_hwtstamp(q_vector, tx_buffer);
  4842. #endif
  4843. /* free the skb */
  4844. dev_kfree_skb_any(tx_buffer->skb);
  4845. tx_buffer->skb = NULL;
  4846. /* unmap skb header data */
  4847. dma_unmap_single(tx_ring->dev,
  4848. tx_buffer->dma,
  4849. tx_buffer->length,
  4850. DMA_TO_DEVICE);
  4851. /* clear last DMA location and unmap remaining buffers */
  4852. while (tx_desc != eop_desc) {
  4853. tx_buffer->dma = 0;
  4854. tx_buffer++;
  4855. tx_desc++;
  4856. i++;
  4857. if (unlikely(!i)) {
  4858. i -= tx_ring->count;
  4859. tx_buffer = tx_ring->tx_buffer_info;
  4860. tx_desc = IGB_TX_DESC(tx_ring, 0);
  4861. }
  4862. /* unmap any remaining paged data */
  4863. if (tx_buffer->dma) {
  4864. dma_unmap_page(tx_ring->dev,
  4865. tx_buffer->dma,
  4866. tx_buffer->length,
  4867. DMA_TO_DEVICE);
  4868. }
  4869. }
  4870. /* clear last DMA location */
  4871. tx_buffer->dma = 0;
  4872. /* move us one more past the eop_desc for start of next pkt */
  4873. tx_buffer++;
  4874. tx_desc++;
  4875. i++;
  4876. if (unlikely(!i)) {
  4877. i -= tx_ring->count;
  4878. tx_buffer = tx_ring->tx_buffer_info;
  4879. tx_desc = IGB_TX_DESC(tx_ring, 0);
  4880. }
  4881. }
  4882. netdev_tx_completed_queue(txring_txq(tx_ring),
  4883. total_packets, total_bytes);
  4884. i += tx_ring->count;
  4885. tx_ring->next_to_clean = i;
  4886. u64_stats_update_begin(&tx_ring->tx_syncp);
  4887. tx_ring->tx_stats.bytes += total_bytes;
  4888. tx_ring->tx_stats.packets += total_packets;
  4889. u64_stats_update_end(&tx_ring->tx_syncp);
  4890. q_vector->tx.total_bytes += total_bytes;
  4891. q_vector->tx.total_packets += total_packets;
  4892. if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
  4893. struct e1000_hw *hw = &adapter->hw;
  4894. eop_desc = tx_buffer->next_to_watch;
  4895. /* Detect a transmit hang in hardware, this serializes the
  4896. * check with the clearing of time_stamp and movement of i */
  4897. clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
  4898. if (eop_desc &&
  4899. time_after(jiffies, tx_buffer->time_stamp +
  4900. (adapter->tx_timeout_factor * HZ)) &&
  4901. !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
  4902. /* detected Tx unit hang */
  4903. dev_err(tx_ring->dev,
  4904. "Detected Tx Unit Hang\n"
  4905. " Tx Queue <%d>\n"
  4906. " TDH <%x>\n"
  4907. " TDT <%x>\n"
  4908. " next_to_use <%x>\n"
  4909. " next_to_clean <%x>\n"
  4910. "buffer_info[next_to_clean]\n"
  4911. " time_stamp <%lx>\n"
  4912. " next_to_watch <%p>\n"
  4913. " jiffies <%lx>\n"
  4914. " desc.status <%x>\n",
  4915. tx_ring->queue_index,
  4916. rd32(E1000_TDH(tx_ring->reg_idx)),
  4917. readl(tx_ring->tail),
  4918. tx_ring->next_to_use,
  4919. tx_ring->next_to_clean,
  4920. tx_buffer->time_stamp,
  4921. eop_desc,
  4922. jiffies,
  4923. eop_desc->wb.status);
  4924. netif_stop_subqueue(tx_ring->netdev,
  4925. tx_ring->queue_index);
  4926. /* we are about to reset, no point in enabling stuff */
  4927. return true;
  4928. }
  4929. }
  4930. if (unlikely(total_packets &&
  4931. netif_carrier_ok(tx_ring->netdev) &&
  4932. igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
  4933. /* Make sure that anybody stopping the queue after this
  4934. * sees the new next_to_clean.
  4935. */
  4936. smp_mb();
  4937. if (__netif_subqueue_stopped(tx_ring->netdev,
  4938. tx_ring->queue_index) &&
  4939. !(test_bit(__IGB_DOWN, &adapter->state))) {
  4940. netif_wake_subqueue(tx_ring->netdev,
  4941. tx_ring->queue_index);
  4942. u64_stats_update_begin(&tx_ring->tx_syncp);
  4943. tx_ring->tx_stats.restart_queue++;
  4944. u64_stats_update_end(&tx_ring->tx_syncp);
  4945. }
  4946. }
  4947. return !!budget;
  4948. }
  4949. static inline void igb_rx_checksum(struct igb_ring *ring,
  4950. union e1000_adv_rx_desc *rx_desc,
  4951. struct sk_buff *skb)
  4952. {
  4953. skb_checksum_none_assert(skb);
  4954. /* Ignore Checksum bit is set */
  4955. if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
  4956. return;
  4957. /* Rx checksum disabled via ethtool */
  4958. if (!(ring->netdev->features & NETIF_F_RXCSUM))
  4959. return;
  4960. /* TCP/UDP checksum error bit is set */
  4961. if (igb_test_staterr(rx_desc,
  4962. E1000_RXDEXT_STATERR_TCPE |
  4963. E1000_RXDEXT_STATERR_IPE)) {
  4964. /*
  4965. * work around errata with sctp packets where the TCPE aka
  4966. * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
  4967. * packets, (aka let the stack check the crc32c)
  4968. */
  4969. if (!((skb->len == 60) &&
  4970. test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
  4971. u64_stats_update_begin(&ring->rx_syncp);
  4972. ring->rx_stats.csum_err++;
  4973. u64_stats_update_end(&ring->rx_syncp);
  4974. }
  4975. /* let the stack verify checksum errors */
  4976. return;
  4977. }
  4978. /* It must be a TCP or UDP packet with a valid checksum */
  4979. if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
  4980. E1000_RXD_STAT_UDPCS))
  4981. skb->ip_summed = CHECKSUM_UNNECESSARY;
  4982. dev_dbg(ring->dev, "cksum success: bits %08X\n",
  4983. le32_to_cpu(rx_desc->wb.upper.status_error));
  4984. }
  4985. static inline void igb_rx_hash(struct igb_ring *ring,
  4986. union e1000_adv_rx_desc *rx_desc,
  4987. struct sk_buff *skb)
  4988. {
  4989. if (ring->netdev->features & NETIF_F_RXHASH)
  4990. skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
  4991. }
  4992. #ifdef CONFIG_IGB_PTP
  4993. static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
  4994. union e1000_adv_rx_desc *rx_desc,
  4995. struct sk_buff *skb)
  4996. {
  4997. struct igb_adapter *adapter = q_vector->adapter;
  4998. struct e1000_hw *hw = &adapter->hw;
  4999. u64 regval;
  5000. if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
  5001. E1000_RXDADV_STAT_TS))
  5002. return;
  5003. /*
  5004. * If this bit is set, then the RX registers contain the time stamp. No
  5005. * other packet will be time stamped until we read these registers, so
  5006. * read the registers to make them available again. Because only one
  5007. * packet can be time stamped at a time, we know that the register
  5008. * values must belong to this one here and therefore we don't need to
  5009. * compare any of the additional attributes stored for it.
  5010. *
  5011. * If nothing went wrong, then it should have a shared tx_flags that we
  5012. * can turn into a skb_shared_hwtstamps.
  5013. */
  5014. if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
  5015. u32 *stamp = (u32 *)skb->data;
  5016. regval = le32_to_cpu(*(stamp + 2));
  5017. regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
  5018. skb_pull(skb, IGB_TS_HDR_LEN);
  5019. } else {
  5020. if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
  5021. return;
  5022. regval = rd32(E1000_RXSTMPL);
  5023. regval |= (u64)rd32(E1000_RXSTMPH) << 32;
  5024. }
  5025. igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
  5026. }
  5027. #endif
  5028. static void igb_rx_vlan(struct igb_ring *ring,
  5029. union e1000_adv_rx_desc *rx_desc,
  5030. struct sk_buff *skb)
  5031. {
  5032. if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
  5033. u16 vid;
  5034. if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
  5035. test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
  5036. vid = be16_to_cpu(rx_desc->wb.upper.vlan);
  5037. else
  5038. vid = le16_to_cpu(rx_desc->wb.upper.vlan);
  5039. __vlan_hwaccel_put_tag(skb, vid);
  5040. }
  5041. }
  5042. static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
  5043. {
  5044. /* HW will not DMA in data larger than the given buffer, even if it
  5045. * parses the (NFS, of course) header to be larger. In that case, it
  5046. * fills the header buffer and spills the rest into the page.
  5047. */
  5048. u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
  5049. E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
  5050. if (hlen > IGB_RX_HDR_LEN)
  5051. hlen = IGB_RX_HDR_LEN;
  5052. return hlen;
  5053. }
  5054. static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
  5055. {
  5056. struct igb_ring *rx_ring = q_vector->rx.ring;
  5057. union e1000_adv_rx_desc *rx_desc;
  5058. const int current_node = numa_node_id();
  5059. unsigned int total_bytes = 0, total_packets = 0;
  5060. u16 cleaned_count = igb_desc_unused(rx_ring);
  5061. u16 i = rx_ring->next_to_clean;
  5062. rx_desc = IGB_RX_DESC(rx_ring, i);
  5063. while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
  5064. struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
  5065. struct sk_buff *skb = buffer_info->skb;
  5066. union e1000_adv_rx_desc *next_rxd;
  5067. buffer_info->skb = NULL;
  5068. prefetch(skb->data);
  5069. i++;
  5070. if (i == rx_ring->count)
  5071. i = 0;
  5072. next_rxd = IGB_RX_DESC(rx_ring, i);
  5073. prefetch(next_rxd);
  5074. /*
  5075. * This memory barrier is needed to keep us from reading
  5076. * any other fields out of the rx_desc until we know the
  5077. * RXD_STAT_DD bit is set
  5078. */
  5079. rmb();
  5080. if (!skb_is_nonlinear(skb)) {
  5081. __skb_put(skb, igb_get_hlen(rx_desc));
  5082. dma_unmap_single(rx_ring->dev, buffer_info->dma,
  5083. IGB_RX_HDR_LEN,
  5084. DMA_FROM_DEVICE);
  5085. buffer_info->dma = 0;
  5086. }
  5087. if (rx_desc->wb.upper.length) {
  5088. u16 length = le16_to_cpu(rx_desc->wb.upper.length);
  5089. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  5090. buffer_info->page,
  5091. buffer_info->page_offset,
  5092. length);
  5093. skb->len += length;
  5094. skb->data_len += length;
  5095. skb->truesize += PAGE_SIZE / 2;
  5096. if ((page_count(buffer_info->page) != 1) ||
  5097. (page_to_nid(buffer_info->page) != current_node))
  5098. buffer_info->page = NULL;
  5099. else
  5100. get_page(buffer_info->page);
  5101. dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
  5102. PAGE_SIZE / 2, DMA_FROM_DEVICE);
  5103. buffer_info->page_dma = 0;
  5104. }
  5105. if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
  5106. struct igb_rx_buffer *next_buffer;
  5107. next_buffer = &rx_ring->rx_buffer_info[i];
  5108. buffer_info->skb = next_buffer->skb;
  5109. buffer_info->dma = next_buffer->dma;
  5110. next_buffer->skb = skb;
  5111. next_buffer->dma = 0;
  5112. goto next_desc;
  5113. }
  5114. if (unlikely((igb_test_staterr(rx_desc,
  5115. E1000_RXDEXT_ERR_FRAME_ERR_MASK))
  5116. && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
  5117. dev_kfree_skb_any(skb);
  5118. goto next_desc;
  5119. }
  5120. #ifdef CONFIG_IGB_PTP
  5121. igb_rx_hwtstamp(q_vector, rx_desc, skb);
  5122. #endif
  5123. igb_rx_hash(rx_ring, rx_desc, skb);
  5124. igb_rx_checksum(rx_ring, rx_desc, skb);
  5125. igb_rx_vlan(rx_ring, rx_desc, skb);
  5126. total_bytes += skb->len;
  5127. total_packets++;
  5128. skb->protocol = eth_type_trans(skb, rx_ring->netdev);
  5129. napi_gro_receive(&q_vector->napi, skb);
  5130. budget--;
  5131. next_desc:
  5132. if (!budget)
  5133. break;
  5134. cleaned_count++;
  5135. /* return some buffers to hardware, one at a time is too slow */
  5136. if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
  5137. igb_alloc_rx_buffers(rx_ring, cleaned_count);
  5138. cleaned_count = 0;
  5139. }
  5140. /* use prefetched values */
  5141. rx_desc = next_rxd;
  5142. }
  5143. rx_ring->next_to_clean = i;
  5144. u64_stats_update_begin(&rx_ring->rx_syncp);
  5145. rx_ring->rx_stats.packets += total_packets;
  5146. rx_ring->rx_stats.bytes += total_bytes;
  5147. u64_stats_update_end(&rx_ring->rx_syncp);
  5148. q_vector->rx.total_packets += total_packets;
  5149. q_vector->rx.total_bytes += total_bytes;
  5150. if (cleaned_count)
  5151. igb_alloc_rx_buffers(rx_ring, cleaned_count);
  5152. return !!budget;
  5153. }
  5154. static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
  5155. struct igb_rx_buffer *bi)
  5156. {
  5157. struct sk_buff *skb = bi->skb;
  5158. dma_addr_t dma = bi->dma;
  5159. if (dma)
  5160. return true;
  5161. if (likely(!skb)) {
  5162. skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
  5163. IGB_RX_HDR_LEN);
  5164. bi->skb = skb;
  5165. if (!skb) {
  5166. rx_ring->rx_stats.alloc_failed++;
  5167. return false;
  5168. }
  5169. /* initialize skb for ring */
  5170. skb_record_rx_queue(skb, rx_ring->queue_index);
  5171. }
  5172. dma = dma_map_single(rx_ring->dev, skb->data,
  5173. IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
  5174. if (dma_mapping_error(rx_ring->dev, dma)) {
  5175. rx_ring->rx_stats.alloc_failed++;
  5176. return false;
  5177. }
  5178. bi->dma = dma;
  5179. return true;
  5180. }
  5181. static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
  5182. struct igb_rx_buffer *bi)
  5183. {
  5184. struct page *page = bi->page;
  5185. dma_addr_t page_dma = bi->page_dma;
  5186. unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
  5187. if (page_dma)
  5188. return true;
  5189. if (!page) {
  5190. page = alloc_page(GFP_ATOMIC | __GFP_COLD);
  5191. bi->page = page;
  5192. if (unlikely(!page)) {
  5193. rx_ring->rx_stats.alloc_failed++;
  5194. return false;
  5195. }
  5196. }
  5197. page_dma = dma_map_page(rx_ring->dev, page,
  5198. page_offset, PAGE_SIZE / 2,
  5199. DMA_FROM_DEVICE);
  5200. if (dma_mapping_error(rx_ring->dev, page_dma)) {
  5201. rx_ring->rx_stats.alloc_failed++;
  5202. return false;
  5203. }
  5204. bi->page_dma = page_dma;
  5205. bi->page_offset = page_offset;
  5206. return true;
  5207. }
  5208. /**
  5209. * igb_alloc_rx_buffers - Replace used receive buffers; packet split
  5210. * @adapter: address of board private structure
  5211. **/
  5212. void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
  5213. {
  5214. union e1000_adv_rx_desc *rx_desc;
  5215. struct igb_rx_buffer *bi;
  5216. u16 i = rx_ring->next_to_use;
  5217. rx_desc = IGB_RX_DESC(rx_ring, i);
  5218. bi = &rx_ring->rx_buffer_info[i];
  5219. i -= rx_ring->count;
  5220. while (cleaned_count--) {
  5221. if (!igb_alloc_mapped_skb(rx_ring, bi))
  5222. break;
  5223. /* Refresh the desc even if buffer_addrs didn't change
  5224. * because each write-back erases this info. */
  5225. rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
  5226. if (!igb_alloc_mapped_page(rx_ring, bi))
  5227. break;
  5228. rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
  5229. rx_desc++;
  5230. bi++;
  5231. i++;
  5232. if (unlikely(!i)) {
  5233. rx_desc = IGB_RX_DESC(rx_ring, 0);
  5234. bi = rx_ring->rx_buffer_info;
  5235. i -= rx_ring->count;
  5236. }
  5237. /* clear the hdr_addr for the next_to_use descriptor */
  5238. rx_desc->read.hdr_addr = 0;
  5239. }
  5240. i += rx_ring->count;
  5241. if (rx_ring->next_to_use != i) {
  5242. rx_ring->next_to_use = i;
  5243. /* Force memory writes to complete before letting h/w
  5244. * know there are new descriptors to fetch. (Only
  5245. * applicable for weak-ordered memory model archs,
  5246. * such as IA-64). */
  5247. wmb();
  5248. writel(i, rx_ring->tail);
  5249. }
  5250. }
  5251. /**
  5252. * igb_mii_ioctl -
  5253. * @netdev:
  5254. * @ifreq:
  5255. * @cmd:
  5256. **/
  5257. static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  5258. {
  5259. struct igb_adapter *adapter = netdev_priv(netdev);
  5260. struct mii_ioctl_data *data = if_mii(ifr);
  5261. if (adapter->hw.phy.media_type != e1000_media_type_copper)
  5262. return -EOPNOTSUPP;
  5263. switch (cmd) {
  5264. case SIOCGMIIPHY:
  5265. data->phy_id = adapter->hw.phy.addr;
  5266. break;
  5267. case SIOCGMIIREG:
  5268. if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
  5269. &data->val_out))
  5270. return -EIO;
  5271. break;
  5272. case SIOCSMIIREG:
  5273. default:
  5274. return -EOPNOTSUPP;
  5275. }
  5276. return 0;
  5277. }
  5278. /**
  5279. * igb_hwtstamp_ioctl - control hardware time stamping
  5280. * @netdev:
  5281. * @ifreq:
  5282. * @cmd:
  5283. *
  5284. * Outgoing time stamping can be enabled and disabled. Play nice and
  5285. * disable it when requested, although it shouldn't case any overhead
  5286. * when no packet needs it. At most one packet in the queue may be
  5287. * marked for time stamping, otherwise it would be impossible to tell
  5288. * for sure to which packet the hardware time stamp belongs.
  5289. *
  5290. * Incoming time stamping has to be configured via the hardware
  5291. * filters. Not all combinations are supported, in particular event
  5292. * type has to be specified. Matching the kind of event packet is
  5293. * not supported, with the exception of "all V2 events regardless of
  5294. * level 2 or 4".
  5295. *
  5296. **/
  5297. static int igb_hwtstamp_ioctl(struct net_device *netdev,
  5298. struct ifreq *ifr, int cmd)
  5299. {
  5300. struct igb_adapter *adapter = netdev_priv(netdev);
  5301. struct e1000_hw *hw = &adapter->hw;
  5302. struct hwtstamp_config config;
  5303. u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
  5304. u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
  5305. u32 tsync_rx_cfg = 0;
  5306. bool is_l4 = false;
  5307. bool is_l2 = false;
  5308. u32 regval;
  5309. if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
  5310. return -EFAULT;
  5311. /* reserved for future extensions */
  5312. if (config.flags)
  5313. return -EINVAL;
  5314. switch (config.tx_type) {
  5315. case HWTSTAMP_TX_OFF:
  5316. tsync_tx_ctl = 0;
  5317. case HWTSTAMP_TX_ON:
  5318. break;
  5319. default:
  5320. return -ERANGE;
  5321. }
  5322. switch (config.rx_filter) {
  5323. case HWTSTAMP_FILTER_NONE:
  5324. tsync_rx_ctl = 0;
  5325. break;
  5326. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  5327. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  5328. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  5329. case HWTSTAMP_FILTER_ALL:
  5330. /*
  5331. * register TSYNCRXCFG must be set, therefore it is not
  5332. * possible to time stamp both Sync and Delay_Req messages
  5333. * => fall back to time stamping all packets
  5334. */
  5335. tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
  5336. config.rx_filter = HWTSTAMP_FILTER_ALL;
  5337. break;
  5338. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  5339. tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
  5340. tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
  5341. is_l4 = true;
  5342. break;
  5343. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  5344. tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
  5345. tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
  5346. is_l4 = true;
  5347. break;
  5348. case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
  5349. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  5350. tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
  5351. tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
  5352. is_l2 = true;
  5353. is_l4 = true;
  5354. config.rx_filter = HWTSTAMP_FILTER_SOME;
  5355. break;
  5356. case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
  5357. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  5358. tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
  5359. tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
  5360. is_l2 = true;
  5361. is_l4 = true;
  5362. config.rx_filter = HWTSTAMP_FILTER_SOME;
  5363. break;
  5364. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  5365. case HWTSTAMP_FILTER_PTP_V2_SYNC:
  5366. case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
  5367. tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
  5368. config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
  5369. is_l2 = true;
  5370. is_l4 = true;
  5371. break;
  5372. default:
  5373. return -ERANGE;
  5374. }
  5375. if (hw->mac.type == e1000_82575) {
  5376. if (tsync_rx_ctl | tsync_tx_ctl)
  5377. return -EINVAL;
  5378. return 0;
  5379. }
  5380. /*
  5381. * Per-packet timestamping only works if all packets are
  5382. * timestamped, so enable timestamping in all packets as
  5383. * long as one rx filter was configured.
  5384. */
  5385. if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
  5386. tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
  5387. tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
  5388. }
  5389. /* enable/disable TX */
  5390. regval = rd32(E1000_TSYNCTXCTL);
  5391. regval &= ~E1000_TSYNCTXCTL_ENABLED;
  5392. regval |= tsync_tx_ctl;
  5393. wr32(E1000_TSYNCTXCTL, regval);
  5394. /* enable/disable RX */
  5395. regval = rd32(E1000_TSYNCRXCTL);
  5396. regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
  5397. regval |= tsync_rx_ctl;
  5398. wr32(E1000_TSYNCRXCTL, regval);
  5399. /* define which PTP packets are time stamped */
  5400. wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
  5401. /* define ethertype filter for timestamped packets */
  5402. if (is_l2)
  5403. wr32(E1000_ETQF(3),
  5404. (E1000_ETQF_FILTER_ENABLE | /* enable filter */
  5405. E1000_ETQF_1588 | /* enable timestamping */
  5406. ETH_P_1588)); /* 1588 eth protocol type */
  5407. else
  5408. wr32(E1000_ETQF(3), 0);
  5409. #define PTP_PORT 319
  5410. /* L4 Queue Filter[3]: filter by destination port and protocol */
  5411. if (is_l4) {
  5412. u32 ftqf = (IPPROTO_UDP /* UDP */
  5413. | E1000_FTQF_VF_BP /* VF not compared */
  5414. | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
  5415. | E1000_FTQF_MASK); /* mask all inputs */
  5416. ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
  5417. wr32(E1000_IMIR(3), htons(PTP_PORT));
  5418. wr32(E1000_IMIREXT(3),
  5419. (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
  5420. if (hw->mac.type == e1000_82576) {
  5421. /* enable source port check */
  5422. wr32(E1000_SPQF(3), htons(PTP_PORT));
  5423. ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
  5424. }
  5425. wr32(E1000_FTQF(3), ftqf);
  5426. } else {
  5427. wr32(E1000_FTQF(3), E1000_FTQF_MASK);
  5428. }
  5429. wrfl();
  5430. adapter->hwtstamp_config = config;
  5431. /* clear TX/RX time stamp registers, just to be sure */
  5432. regval = rd32(E1000_TXSTMPH);
  5433. regval = rd32(E1000_RXSTMPH);
  5434. return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
  5435. -EFAULT : 0;
  5436. }
  5437. /**
  5438. * igb_ioctl -
  5439. * @netdev:
  5440. * @ifreq:
  5441. * @cmd:
  5442. **/
  5443. static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  5444. {
  5445. switch (cmd) {
  5446. case SIOCGMIIPHY:
  5447. case SIOCGMIIREG:
  5448. case SIOCSMIIREG:
  5449. return igb_mii_ioctl(netdev, ifr, cmd);
  5450. case SIOCSHWTSTAMP:
  5451. return igb_hwtstamp_ioctl(netdev, ifr, cmd);
  5452. default:
  5453. return -EOPNOTSUPP;
  5454. }
  5455. }
  5456. s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
  5457. {
  5458. struct igb_adapter *adapter = hw->back;
  5459. u16 cap_offset;
  5460. cap_offset = adapter->pdev->pcie_cap;
  5461. if (!cap_offset)
  5462. return -E1000_ERR_CONFIG;
  5463. pci_read_config_word(adapter->pdev, cap_offset + reg, value);
  5464. return 0;
  5465. }
  5466. s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
  5467. {
  5468. struct igb_adapter *adapter = hw->back;
  5469. u16 cap_offset;
  5470. cap_offset = adapter->pdev->pcie_cap;
  5471. if (!cap_offset)
  5472. return -E1000_ERR_CONFIG;
  5473. pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
  5474. return 0;
  5475. }
  5476. static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
  5477. {
  5478. struct igb_adapter *adapter = netdev_priv(netdev);
  5479. struct e1000_hw *hw = &adapter->hw;
  5480. u32 ctrl, rctl;
  5481. bool enable = !!(features & NETIF_F_HW_VLAN_RX);
  5482. if (enable) {
  5483. /* enable VLAN tag insert/strip */
  5484. ctrl = rd32(E1000_CTRL);
  5485. ctrl |= E1000_CTRL_VME;
  5486. wr32(E1000_CTRL, ctrl);
  5487. /* Disable CFI check */
  5488. rctl = rd32(E1000_RCTL);
  5489. rctl &= ~E1000_RCTL_CFIEN;
  5490. wr32(E1000_RCTL, rctl);
  5491. } else {
  5492. /* disable VLAN tag insert/strip */
  5493. ctrl = rd32(E1000_CTRL);
  5494. ctrl &= ~E1000_CTRL_VME;
  5495. wr32(E1000_CTRL, ctrl);
  5496. }
  5497. igb_rlpml_set(adapter);
  5498. }
  5499. static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
  5500. {
  5501. struct igb_adapter *adapter = netdev_priv(netdev);
  5502. struct e1000_hw *hw = &adapter->hw;
  5503. int pf_id = adapter->vfs_allocated_count;
  5504. /* attempt to add filter to vlvf array */
  5505. igb_vlvf_set(adapter, vid, true, pf_id);
  5506. /* add the filter since PF can receive vlans w/o entry in vlvf */
  5507. igb_vfta_set(hw, vid, true);
  5508. set_bit(vid, adapter->active_vlans);
  5509. return 0;
  5510. }
  5511. static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
  5512. {
  5513. struct igb_adapter *adapter = netdev_priv(netdev);
  5514. struct e1000_hw *hw = &adapter->hw;
  5515. int pf_id = adapter->vfs_allocated_count;
  5516. s32 err;
  5517. /* remove vlan from VLVF table array */
  5518. err = igb_vlvf_set(adapter, vid, false, pf_id);
  5519. /* if vid was not present in VLVF just remove it from table */
  5520. if (err)
  5521. igb_vfta_set(hw, vid, false);
  5522. clear_bit(vid, adapter->active_vlans);
  5523. return 0;
  5524. }
  5525. static void igb_restore_vlan(struct igb_adapter *adapter)
  5526. {
  5527. u16 vid;
  5528. igb_vlan_mode(adapter->netdev, adapter->netdev->features);
  5529. for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
  5530. igb_vlan_rx_add_vid(adapter->netdev, vid);
  5531. }
  5532. int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
  5533. {
  5534. struct pci_dev *pdev = adapter->pdev;
  5535. struct e1000_mac_info *mac = &adapter->hw.mac;
  5536. mac->autoneg = 0;
  5537. /* Make sure dplx is at most 1 bit and lsb of speed is not set
  5538. * for the switch() below to work */
  5539. if ((spd & 1) || (dplx & ~1))
  5540. goto err_inval;
  5541. /* Fiber NIC's only allow 1000 Gbps Full duplex */
  5542. if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
  5543. spd != SPEED_1000 &&
  5544. dplx != DUPLEX_FULL)
  5545. goto err_inval;
  5546. switch (spd + dplx) {
  5547. case SPEED_10 + DUPLEX_HALF:
  5548. mac->forced_speed_duplex = ADVERTISE_10_HALF;
  5549. break;
  5550. case SPEED_10 + DUPLEX_FULL:
  5551. mac->forced_speed_duplex = ADVERTISE_10_FULL;
  5552. break;
  5553. case SPEED_100 + DUPLEX_HALF:
  5554. mac->forced_speed_duplex = ADVERTISE_100_HALF;
  5555. break;
  5556. case SPEED_100 + DUPLEX_FULL:
  5557. mac->forced_speed_duplex = ADVERTISE_100_FULL;
  5558. break;
  5559. case SPEED_1000 + DUPLEX_FULL:
  5560. mac->autoneg = 1;
  5561. adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
  5562. break;
  5563. case SPEED_1000 + DUPLEX_HALF: /* not supported */
  5564. default:
  5565. goto err_inval;
  5566. }
  5567. return 0;
  5568. err_inval:
  5569. dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
  5570. return -EINVAL;
  5571. }
  5572. static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
  5573. bool runtime)
  5574. {
  5575. struct net_device *netdev = pci_get_drvdata(pdev);
  5576. struct igb_adapter *adapter = netdev_priv(netdev);
  5577. struct e1000_hw *hw = &adapter->hw;
  5578. u32 ctrl, rctl, status;
  5579. u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
  5580. #ifdef CONFIG_PM
  5581. int retval = 0;
  5582. #endif
  5583. netif_device_detach(netdev);
  5584. if (netif_running(netdev))
  5585. __igb_close(netdev, true);
  5586. igb_clear_interrupt_scheme(adapter);
  5587. #ifdef CONFIG_PM
  5588. retval = pci_save_state(pdev);
  5589. if (retval)
  5590. return retval;
  5591. #endif
  5592. status = rd32(E1000_STATUS);
  5593. if (status & E1000_STATUS_LU)
  5594. wufc &= ~E1000_WUFC_LNKC;
  5595. if (wufc) {
  5596. igb_setup_rctl(adapter);
  5597. igb_set_rx_mode(netdev);
  5598. /* turn on all-multi mode if wake on multicast is enabled */
  5599. if (wufc & E1000_WUFC_MC) {
  5600. rctl = rd32(E1000_RCTL);
  5601. rctl |= E1000_RCTL_MPE;
  5602. wr32(E1000_RCTL, rctl);
  5603. }
  5604. ctrl = rd32(E1000_CTRL);
  5605. /* advertise wake from D3Cold */
  5606. #define E1000_CTRL_ADVD3WUC 0x00100000
  5607. /* phy power management enable */
  5608. #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
  5609. ctrl |= E1000_CTRL_ADVD3WUC;
  5610. wr32(E1000_CTRL, ctrl);
  5611. /* Allow time for pending master requests to run */
  5612. igb_disable_pcie_master(hw);
  5613. wr32(E1000_WUC, E1000_WUC_PME_EN);
  5614. wr32(E1000_WUFC, wufc);
  5615. } else {
  5616. wr32(E1000_WUC, 0);
  5617. wr32(E1000_WUFC, 0);
  5618. }
  5619. *enable_wake = wufc || adapter->en_mng_pt;
  5620. if (!*enable_wake)
  5621. igb_power_down_link(adapter);
  5622. else
  5623. igb_power_up_link(adapter);
  5624. /* Release control of h/w to f/w. If f/w is AMT enabled, this
  5625. * would have already happened in close and is redundant. */
  5626. igb_release_hw_control(adapter);
  5627. pci_disable_device(pdev);
  5628. return 0;
  5629. }
  5630. #ifdef CONFIG_PM
  5631. #ifdef CONFIG_PM_SLEEP
  5632. static int igb_suspend(struct device *dev)
  5633. {
  5634. int retval;
  5635. bool wake;
  5636. struct pci_dev *pdev = to_pci_dev(dev);
  5637. retval = __igb_shutdown(pdev, &wake, 0);
  5638. if (retval)
  5639. return retval;
  5640. if (wake) {
  5641. pci_prepare_to_sleep(pdev);
  5642. } else {
  5643. pci_wake_from_d3(pdev, false);
  5644. pci_set_power_state(pdev, PCI_D3hot);
  5645. }
  5646. return 0;
  5647. }
  5648. #endif /* CONFIG_PM_SLEEP */
  5649. static int igb_resume(struct device *dev)
  5650. {
  5651. struct pci_dev *pdev = to_pci_dev(dev);
  5652. struct net_device *netdev = pci_get_drvdata(pdev);
  5653. struct igb_adapter *adapter = netdev_priv(netdev);
  5654. struct e1000_hw *hw = &adapter->hw;
  5655. u32 err;
  5656. pci_set_power_state(pdev, PCI_D0);
  5657. pci_restore_state(pdev);
  5658. pci_save_state(pdev);
  5659. err = pci_enable_device_mem(pdev);
  5660. if (err) {
  5661. dev_err(&pdev->dev,
  5662. "igb: Cannot enable PCI device from suspend\n");
  5663. return err;
  5664. }
  5665. pci_set_master(pdev);
  5666. pci_enable_wake(pdev, PCI_D3hot, 0);
  5667. pci_enable_wake(pdev, PCI_D3cold, 0);
  5668. if (!rtnl_is_locked()) {
  5669. /*
  5670. * shut up ASSERT_RTNL() warning in
  5671. * netif_set_real_num_tx/rx_queues.
  5672. */
  5673. rtnl_lock();
  5674. err = igb_init_interrupt_scheme(adapter);
  5675. rtnl_unlock();
  5676. } else {
  5677. err = igb_init_interrupt_scheme(adapter);
  5678. }
  5679. if (err) {
  5680. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  5681. return -ENOMEM;
  5682. }
  5683. igb_reset(adapter);
  5684. /* let the f/w know that the h/w is now under the control of the
  5685. * driver. */
  5686. igb_get_hw_control(adapter);
  5687. wr32(E1000_WUS, ~0);
  5688. if (netdev->flags & IFF_UP) {
  5689. err = __igb_open(netdev, true);
  5690. if (err)
  5691. return err;
  5692. }
  5693. netif_device_attach(netdev);
  5694. return 0;
  5695. }
  5696. #ifdef CONFIG_PM_RUNTIME
  5697. static int igb_runtime_idle(struct device *dev)
  5698. {
  5699. struct pci_dev *pdev = to_pci_dev(dev);
  5700. struct net_device *netdev = pci_get_drvdata(pdev);
  5701. struct igb_adapter *adapter = netdev_priv(netdev);
  5702. if (!igb_has_link(adapter))
  5703. pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
  5704. return -EBUSY;
  5705. }
  5706. static int igb_runtime_suspend(struct device *dev)
  5707. {
  5708. struct pci_dev *pdev = to_pci_dev(dev);
  5709. int retval;
  5710. bool wake;
  5711. retval = __igb_shutdown(pdev, &wake, 1);
  5712. if (retval)
  5713. return retval;
  5714. if (wake) {
  5715. pci_prepare_to_sleep(pdev);
  5716. } else {
  5717. pci_wake_from_d3(pdev, false);
  5718. pci_set_power_state(pdev, PCI_D3hot);
  5719. }
  5720. return 0;
  5721. }
  5722. static int igb_runtime_resume(struct device *dev)
  5723. {
  5724. return igb_resume(dev);
  5725. }
  5726. #endif /* CONFIG_PM_RUNTIME */
  5727. #endif
  5728. static void igb_shutdown(struct pci_dev *pdev)
  5729. {
  5730. bool wake;
  5731. __igb_shutdown(pdev, &wake, 0);
  5732. if (system_state == SYSTEM_POWER_OFF) {
  5733. pci_wake_from_d3(pdev, wake);
  5734. pci_set_power_state(pdev, PCI_D3hot);
  5735. }
  5736. }
  5737. #ifdef CONFIG_NET_POLL_CONTROLLER
  5738. /*
  5739. * Polling 'interrupt' - used by things like netconsole to send skbs
  5740. * without having to re-enable interrupts. It's not called while
  5741. * the interrupt routine is executing.
  5742. */
  5743. static void igb_netpoll(struct net_device *netdev)
  5744. {
  5745. struct igb_adapter *adapter = netdev_priv(netdev);
  5746. struct e1000_hw *hw = &adapter->hw;
  5747. struct igb_q_vector *q_vector;
  5748. int i;
  5749. for (i = 0; i < adapter->num_q_vectors; i++) {
  5750. q_vector = adapter->q_vector[i];
  5751. if (adapter->msix_entries)
  5752. wr32(E1000_EIMC, q_vector->eims_value);
  5753. else
  5754. igb_irq_disable(adapter);
  5755. napi_schedule(&q_vector->napi);
  5756. }
  5757. }
  5758. #endif /* CONFIG_NET_POLL_CONTROLLER */
  5759. /**
  5760. * igb_io_error_detected - called when PCI error is detected
  5761. * @pdev: Pointer to PCI device
  5762. * @state: The current pci connection state
  5763. *
  5764. * This function is called after a PCI bus error affecting
  5765. * this device has been detected.
  5766. */
  5767. static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
  5768. pci_channel_state_t state)
  5769. {
  5770. struct net_device *netdev = pci_get_drvdata(pdev);
  5771. struct igb_adapter *adapter = netdev_priv(netdev);
  5772. netif_device_detach(netdev);
  5773. if (state == pci_channel_io_perm_failure)
  5774. return PCI_ERS_RESULT_DISCONNECT;
  5775. if (netif_running(netdev))
  5776. igb_down(adapter);
  5777. pci_disable_device(pdev);
  5778. /* Request a slot slot reset. */
  5779. return PCI_ERS_RESULT_NEED_RESET;
  5780. }
  5781. /**
  5782. * igb_io_slot_reset - called after the pci bus has been reset.
  5783. * @pdev: Pointer to PCI device
  5784. *
  5785. * Restart the card from scratch, as if from a cold-boot. Implementation
  5786. * resembles the first-half of the igb_resume routine.
  5787. */
  5788. static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
  5789. {
  5790. struct net_device *netdev = pci_get_drvdata(pdev);
  5791. struct igb_adapter *adapter = netdev_priv(netdev);
  5792. struct e1000_hw *hw = &adapter->hw;
  5793. pci_ers_result_t result;
  5794. int err;
  5795. if (pci_enable_device_mem(pdev)) {
  5796. dev_err(&pdev->dev,
  5797. "Cannot re-enable PCI device after reset.\n");
  5798. result = PCI_ERS_RESULT_DISCONNECT;
  5799. } else {
  5800. pci_set_master(pdev);
  5801. pci_restore_state(pdev);
  5802. pci_save_state(pdev);
  5803. pci_enable_wake(pdev, PCI_D3hot, 0);
  5804. pci_enable_wake(pdev, PCI_D3cold, 0);
  5805. igb_reset(adapter);
  5806. wr32(E1000_WUS, ~0);
  5807. result = PCI_ERS_RESULT_RECOVERED;
  5808. }
  5809. err = pci_cleanup_aer_uncorrect_error_status(pdev);
  5810. if (err) {
  5811. dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
  5812. "failed 0x%0x\n", err);
  5813. /* non-fatal, continue */
  5814. }
  5815. return result;
  5816. }
  5817. /**
  5818. * igb_io_resume - called when traffic can start flowing again.
  5819. * @pdev: Pointer to PCI device
  5820. *
  5821. * This callback is called when the error recovery driver tells us that
  5822. * its OK to resume normal operation. Implementation resembles the
  5823. * second-half of the igb_resume routine.
  5824. */
  5825. static void igb_io_resume(struct pci_dev *pdev)
  5826. {
  5827. struct net_device *netdev = pci_get_drvdata(pdev);
  5828. struct igb_adapter *adapter = netdev_priv(netdev);
  5829. if (netif_running(netdev)) {
  5830. if (igb_up(adapter)) {
  5831. dev_err(&pdev->dev, "igb_up failed after reset\n");
  5832. return;
  5833. }
  5834. }
  5835. netif_device_attach(netdev);
  5836. /* let the f/w know that the h/w is now under the control of the
  5837. * driver. */
  5838. igb_get_hw_control(adapter);
  5839. }
  5840. static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
  5841. u8 qsel)
  5842. {
  5843. u32 rar_low, rar_high;
  5844. struct e1000_hw *hw = &adapter->hw;
  5845. /* HW expects these in little endian so we reverse the byte order
  5846. * from network order (big endian) to little endian
  5847. */
  5848. rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
  5849. ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
  5850. rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
  5851. /* Indicate to hardware the Address is Valid. */
  5852. rar_high |= E1000_RAH_AV;
  5853. if (hw->mac.type == e1000_82575)
  5854. rar_high |= E1000_RAH_POOL_1 * qsel;
  5855. else
  5856. rar_high |= E1000_RAH_POOL_1 << qsel;
  5857. wr32(E1000_RAL(index), rar_low);
  5858. wrfl();
  5859. wr32(E1000_RAH(index), rar_high);
  5860. wrfl();
  5861. }
  5862. static int igb_set_vf_mac(struct igb_adapter *adapter,
  5863. int vf, unsigned char *mac_addr)
  5864. {
  5865. struct e1000_hw *hw = &adapter->hw;
  5866. /* VF MAC addresses start at end of receive addresses and moves
  5867. * torwards the first, as a result a collision should not be possible */
  5868. int rar_entry = hw->mac.rar_entry_count - (vf + 1);
  5869. memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
  5870. igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
  5871. return 0;
  5872. }
  5873. static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
  5874. {
  5875. struct igb_adapter *adapter = netdev_priv(netdev);
  5876. if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
  5877. return -EINVAL;
  5878. adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
  5879. dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
  5880. dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
  5881. " change effective.");
  5882. if (test_bit(__IGB_DOWN, &adapter->state)) {
  5883. dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
  5884. " but the PF device is not up.\n");
  5885. dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
  5886. " attempting to use the VF device.\n");
  5887. }
  5888. return igb_set_vf_mac(adapter, vf, mac);
  5889. }
  5890. static int igb_link_mbps(int internal_link_speed)
  5891. {
  5892. switch (internal_link_speed) {
  5893. case SPEED_100:
  5894. return 100;
  5895. case SPEED_1000:
  5896. return 1000;
  5897. default:
  5898. return 0;
  5899. }
  5900. }
  5901. static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
  5902. int link_speed)
  5903. {
  5904. int rf_dec, rf_int;
  5905. u32 bcnrc_val;
  5906. if (tx_rate != 0) {
  5907. /* Calculate the rate factor values to set */
  5908. rf_int = link_speed / tx_rate;
  5909. rf_dec = (link_speed - (rf_int * tx_rate));
  5910. rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
  5911. bcnrc_val = E1000_RTTBCNRC_RS_ENA;
  5912. bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
  5913. E1000_RTTBCNRC_RF_INT_MASK);
  5914. bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
  5915. } else {
  5916. bcnrc_val = 0;
  5917. }
  5918. wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
  5919. wr32(E1000_RTTBCNRC, bcnrc_val);
  5920. }
  5921. static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
  5922. {
  5923. int actual_link_speed, i;
  5924. bool reset_rate = false;
  5925. /* VF TX rate limit was not set or not supported */
  5926. if ((adapter->vf_rate_link_speed == 0) ||
  5927. (adapter->hw.mac.type != e1000_82576))
  5928. return;
  5929. actual_link_speed = igb_link_mbps(adapter->link_speed);
  5930. if (actual_link_speed != adapter->vf_rate_link_speed) {
  5931. reset_rate = true;
  5932. adapter->vf_rate_link_speed = 0;
  5933. dev_info(&adapter->pdev->dev,
  5934. "Link speed has been changed. VF Transmit "
  5935. "rate is disabled\n");
  5936. }
  5937. for (i = 0; i < adapter->vfs_allocated_count; i++) {
  5938. if (reset_rate)
  5939. adapter->vf_data[i].tx_rate = 0;
  5940. igb_set_vf_rate_limit(&adapter->hw, i,
  5941. adapter->vf_data[i].tx_rate,
  5942. actual_link_speed);
  5943. }
  5944. }
  5945. static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
  5946. {
  5947. struct igb_adapter *adapter = netdev_priv(netdev);
  5948. struct e1000_hw *hw = &adapter->hw;
  5949. int actual_link_speed;
  5950. if (hw->mac.type != e1000_82576)
  5951. return -EOPNOTSUPP;
  5952. actual_link_speed = igb_link_mbps(adapter->link_speed);
  5953. if ((vf >= adapter->vfs_allocated_count) ||
  5954. (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
  5955. (tx_rate < 0) || (tx_rate > actual_link_speed))
  5956. return -EINVAL;
  5957. adapter->vf_rate_link_speed = actual_link_speed;
  5958. adapter->vf_data[vf].tx_rate = (u16)tx_rate;
  5959. igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
  5960. return 0;
  5961. }
  5962. static int igb_ndo_get_vf_config(struct net_device *netdev,
  5963. int vf, struct ifla_vf_info *ivi)
  5964. {
  5965. struct igb_adapter *adapter = netdev_priv(netdev);
  5966. if (vf >= adapter->vfs_allocated_count)
  5967. return -EINVAL;
  5968. ivi->vf = vf;
  5969. memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
  5970. ivi->tx_rate = adapter->vf_data[vf].tx_rate;
  5971. ivi->vlan = adapter->vf_data[vf].pf_vlan;
  5972. ivi->qos = adapter->vf_data[vf].pf_qos;
  5973. return 0;
  5974. }
  5975. static void igb_vmm_control(struct igb_adapter *adapter)
  5976. {
  5977. struct e1000_hw *hw = &adapter->hw;
  5978. u32 reg;
  5979. switch (hw->mac.type) {
  5980. case e1000_82575:
  5981. default:
  5982. /* replication is not supported for 82575 */
  5983. return;
  5984. case e1000_82576:
  5985. /* notify HW that the MAC is adding vlan tags */
  5986. reg = rd32(E1000_DTXCTL);
  5987. reg |= E1000_DTXCTL_VLAN_ADDED;
  5988. wr32(E1000_DTXCTL, reg);
  5989. case e1000_82580:
  5990. /* enable replication vlan tag stripping */
  5991. reg = rd32(E1000_RPLOLR);
  5992. reg |= E1000_RPLOLR_STRVLAN;
  5993. wr32(E1000_RPLOLR, reg);
  5994. case e1000_i350:
  5995. /* none of the above registers are supported by i350 */
  5996. break;
  5997. }
  5998. if (adapter->vfs_allocated_count) {
  5999. igb_vmdq_set_loopback_pf(hw, true);
  6000. igb_vmdq_set_replication_pf(hw, true);
  6001. igb_vmdq_set_anti_spoofing_pf(hw, true,
  6002. adapter->vfs_allocated_count);
  6003. } else {
  6004. igb_vmdq_set_loopback_pf(hw, false);
  6005. igb_vmdq_set_replication_pf(hw, false);
  6006. }
  6007. }
  6008. static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
  6009. {
  6010. struct e1000_hw *hw = &adapter->hw;
  6011. u32 dmac_thr;
  6012. u16 hwm;
  6013. if (hw->mac.type > e1000_82580) {
  6014. if (adapter->flags & IGB_FLAG_DMAC) {
  6015. u32 reg;
  6016. /* force threshold to 0. */
  6017. wr32(E1000_DMCTXTH, 0);
  6018. /*
  6019. * DMA Coalescing high water mark needs to be greater
  6020. * than the Rx threshold. Set hwm to PBA - max frame
  6021. * size in 16B units, capping it at PBA - 6KB.
  6022. */
  6023. hwm = 64 * pba - adapter->max_frame_size / 16;
  6024. if (hwm < 64 * (pba - 6))
  6025. hwm = 64 * (pba - 6);
  6026. reg = rd32(E1000_FCRTC);
  6027. reg &= ~E1000_FCRTC_RTH_COAL_MASK;
  6028. reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
  6029. & E1000_FCRTC_RTH_COAL_MASK);
  6030. wr32(E1000_FCRTC, reg);
  6031. /*
  6032. * Set the DMA Coalescing Rx threshold to PBA - 2 * max
  6033. * frame size, capping it at PBA - 10KB.
  6034. */
  6035. dmac_thr = pba - adapter->max_frame_size / 512;
  6036. if (dmac_thr < pba - 10)
  6037. dmac_thr = pba - 10;
  6038. reg = rd32(E1000_DMACR);
  6039. reg &= ~E1000_DMACR_DMACTHR_MASK;
  6040. reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
  6041. & E1000_DMACR_DMACTHR_MASK);
  6042. /* transition to L0x or L1 if available..*/
  6043. reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
  6044. /* watchdog timer= +-1000 usec in 32usec intervals */
  6045. reg |= (1000 >> 5);
  6046. wr32(E1000_DMACR, reg);
  6047. /*
  6048. * no lower threshold to disable
  6049. * coalescing(smart fifb)-UTRESH=0
  6050. */
  6051. wr32(E1000_DMCRTRH, 0);
  6052. reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
  6053. wr32(E1000_DMCTLX, reg);
  6054. /*
  6055. * free space in tx packet buffer to wake from
  6056. * DMA coal
  6057. */
  6058. wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
  6059. (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
  6060. /*
  6061. * make low power state decision controlled
  6062. * by DMA coal
  6063. */
  6064. reg = rd32(E1000_PCIEMISC);
  6065. reg &= ~E1000_PCIEMISC_LX_DECISION;
  6066. wr32(E1000_PCIEMISC, reg);
  6067. } /* endif adapter->dmac is not disabled */
  6068. } else if (hw->mac.type == e1000_82580) {
  6069. u32 reg = rd32(E1000_PCIEMISC);
  6070. wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
  6071. wr32(E1000_DMACR, 0);
  6072. }
  6073. }
  6074. /* igb_main.c */