ixgbe_main.c 176 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122
  1. /*******************************************************************************
  2. Intel 10 Gigabit PCI Express Linux driver
  3. Copyright(c) 1999 - 2009 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  18. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  19. *******************************************************************************/
  20. #include <linux/types.h>
  21. #include <linux/module.h>
  22. #include <linux/pci.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/string.h>
  26. #include <linux/in.h>
  27. #include <linux/ip.h>
  28. #include <linux/tcp.h>
  29. #include <linux/pkt_sched.h>
  30. #include <linux/ipv6.h>
  31. #include <net/checksum.h>
  32. #include <net/ip6_checksum.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/if_vlan.h>
  35. #include <scsi/fc/fc_fcoe.h>
  36. #include "ixgbe.h"
  37. #include "ixgbe_common.h"
  38. #include "ixgbe_dcb_82599.h"
  39. char ixgbe_driver_name[] = "ixgbe";
  40. static const char ixgbe_driver_string[] =
  41. "Intel(R) 10 Gigabit PCI Express Network Driver";
  42. #define DRV_VERSION "2.0.44-k2"
  43. const char ixgbe_driver_version[] = DRV_VERSION;
  44. static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
  45. static const struct ixgbe_info *ixgbe_info_tbl[] = {
  46. [board_82598] = &ixgbe_82598_info,
  47. [board_82599] = &ixgbe_82599_info,
  48. };
  49. /* ixgbe_pci_tbl - PCI Device ID Table
  50. *
  51. * Wildcard entries (PCI_ANY_ID) should come last
  52. * Last entry must be all 0s
  53. *
  54. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  55. * Class, Class Mask, private data (not used) }
  56. */
  57. static struct pci_device_id ixgbe_pci_tbl[] = {
  58. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
  59. board_82598 },
  60. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
  61. board_82598 },
  62. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
  63. board_82598 },
  64. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
  65. board_82598 },
  66. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
  67. board_82598 },
  68. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
  69. board_82598 },
  70. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
  71. board_82598 },
  72. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
  73. board_82598 },
  74. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
  75. board_82598 },
  76. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
  77. board_82598 },
  78. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
  79. board_82598 },
  80. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
  81. board_82598 },
  82. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
  83. board_82599 },
  84. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
  85. board_82599 },
  86. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
  87. board_82599 },
  88. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
  89. board_82599 },
  90. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
  91. board_82599 },
  92. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
  93. board_82599 },
  94. /* required last entry */
  95. {0, }
  96. };
  97. MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
  98. #ifdef CONFIG_IXGBE_DCA
  99. static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
  100. void *p);
  101. static struct notifier_block dca_notifier = {
  102. .notifier_call = ixgbe_notify_dca,
  103. .next = NULL,
  104. .priority = 0
  105. };
  106. #endif
  107. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  108. MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
  109. MODULE_LICENSE("GPL");
  110. MODULE_VERSION(DRV_VERSION);
  111. #define DEFAULT_DEBUG_LEVEL_SHIFT 3
  112. static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
  113. {
  114. u32 ctrl_ext;
  115. /* Let firmware take over control of h/w */
  116. ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
  117. IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
  118. ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
  119. }
  120. static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
  121. {
  122. u32 ctrl_ext;
  123. /* Let firmware know the driver has taken over */
  124. ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
  125. IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
  126. ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
  127. }
  128. /*
  129. * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
  130. * @adapter: pointer to adapter struct
  131. * @direction: 0 for Rx, 1 for Tx, -1 for other causes
  132. * @queue: queue to map the corresponding interrupt to
  133. * @msix_vector: the vector to map to the corresponding queue
  134. *
  135. */
  136. static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
  137. u8 queue, u8 msix_vector)
  138. {
  139. u32 ivar, index;
  140. struct ixgbe_hw *hw = &adapter->hw;
  141. switch (hw->mac.type) {
  142. case ixgbe_mac_82598EB:
  143. msix_vector |= IXGBE_IVAR_ALLOC_VAL;
  144. if (direction == -1)
  145. direction = 0;
  146. index = (((direction * 64) + queue) >> 2) & 0x1F;
  147. ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
  148. ivar &= ~(0xFF << (8 * (queue & 0x3)));
  149. ivar |= (msix_vector << (8 * (queue & 0x3)));
  150. IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
  151. break;
  152. case ixgbe_mac_82599EB:
  153. if (direction == -1) {
  154. /* other causes */
  155. msix_vector |= IXGBE_IVAR_ALLOC_VAL;
  156. index = ((queue & 1) * 8);
  157. ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
  158. ivar &= ~(0xFF << index);
  159. ivar |= (msix_vector << index);
  160. IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
  161. break;
  162. } else {
  163. /* tx or rx causes */
  164. msix_vector |= IXGBE_IVAR_ALLOC_VAL;
  165. index = ((16 * (queue & 1)) + (8 * direction));
  166. ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
  167. ivar &= ~(0xFF << index);
  168. ivar |= (msix_vector << index);
  169. IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
  170. break;
  171. }
  172. default:
  173. break;
  174. }
  175. }
  176. static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
  177. u64 qmask)
  178. {
  179. u32 mask;
  180. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  181. mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
  182. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
  183. } else {
  184. mask = (qmask & 0xFFFFFFFF);
  185. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
  186. mask = (qmask >> 32);
  187. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
  188. }
  189. }
  190. static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
  191. struct ixgbe_tx_buffer
  192. *tx_buffer_info)
  193. {
  194. tx_buffer_info->dma = 0;
  195. if (tx_buffer_info->skb) {
  196. skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
  197. DMA_TO_DEVICE);
  198. dev_kfree_skb_any(tx_buffer_info->skb);
  199. tx_buffer_info->skb = NULL;
  200. }
  201. tx_buffer_info->time_stamp = 0;
  202. /* tx_buffer_info must be completely set up in the transmit path */
  203. }
  204. /**
  205. * ixgbe_tx_is_paused - check if the tx ring is paused
  206. * @adapter: the ixgbe adapter
  207. * @tx_ring: the corresponding tx_ring
  208. *
  209. * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
  210. * corresponding TC of this tx_ring when checking TFCS.
  211. *
  212. * Returns : true if paused
  213. */
  214. static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
  215. struct ixgbe_ring *tx_ring)
  216. {
  217. u32 txoff = IXGBE_TFCS_TXOFF;
  218. #ifdef CONFIG_IXGBE_DCB
  219. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  220. int tc;
  221. int reg_idx = tx_ring->reg_idx;
  222. int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
  223. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  224. tc = reg_idx >> 2;
  225. txoff = IXGBE_TFCS_TXOFF0;
  226. } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  227. tc = 0;
  228. txoff = IXGBE_TFCS_TXOFF;
  229. if (dcb_i == 8) {
  230. /* TC0, TC1 */
  231. tc = reg_idx >> 5;
  232. if (tc == 2) /* TC2, TC3 */
  233. tc += (reg_idx - 64) >> 4;
  234. else if (tc == 3) /* TC4, TC5, TC6, TC7 */
  235. tc += 1 + ((reg_idx - 96) >> 3);
  236. } else if (dcb_i == 4) {
  237. /* TC0, TC1 */
  238. tc = reg_idx >> 6;
  239. if (tc == 1) {
  240. tc += (reg_idx - 64) >> 5;
  241. if (tc == 2) /* TC2, TC3 */
  242. tc += (reg_idx - 96) >> 4;
  243. }
  244. }
  245. }
  246. txoff <<= tc;
  247. }
  248. #endif
  249. return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
  250. }
  251. static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
  252. struct ixgbe_ring *tx_ring,
  253. unsigned int eop)
  254. {
  255. struct ixgbe_hw *hw = &adapter->hw;
  256. /* Detect a transmit hang in hardware, this serializes the
  257. * check with the clearing of time_stamp and movement of eop */
  258. adapter->detect_tx_hung = false;
  259. if (tx_ring->tx_buffer_info[eop].time_stamp &&
  260. time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
  261. !ixgbe_tx_is_paused(adapter, tx_ring)) {
  262. /* detected Tx unit hang */
  263. union ixgbe_adv_tx_desc *tx_desc;
  264. tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
  265. DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
  266. " Tx Queue <%d>\n"
  267. " TDH, TDT <%x>, <%x>\n"
  268. " next_to_use <%x>\n"
  269. " next_to_clean <%x>\n"
  270. "tx_buffer_info[next_to_clean]\n"
  271. " time_stamp <%lx>\n"
  272. " jiffies <%lx>\n",
  273. tx_ring->queue_index,
  274. IXGBE_READ_REG(hw, tx_ring->head),
  275. IXGBE_READ_REG(hw, tx_ring->tail),
  276. tx_ring->next_to_use, eop,
  277. tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
  278. return true;
  279. }
  280. return false;
  281. }
  282. #define IXGBE_MAX_TXD_PWR 14
  283. #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
  284. /* Tx Descriptors needed, worst case */
  285. #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
  286. (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
  287. #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
  288. MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
  289. static void ixgbe_tx_timeout(struct net_device *netdev);
  290. /**
  291. * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
  292. * @q_vector: structure containing interrupt and ring information
  293. * @tx_ring: tx ring to clean
  294. **/
  295. static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
  296. struct ixgbe_ring *tx_ring)
  297. {
  298. struct ixgbe_adapter *adapter = q_vector->adapter;
  299. struct net_device *netdev = adapter->netdev;
  300. union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
  301. struct ixgbe_tx_buffer *tx_buffer_info;
  302. unsigned int i, eop, count = 0;
  303. unsigned int total_bytes = 0, total_packets = 0;
  304. i = tx_ring->next_to_clean;
  305. eop = tx_ring->tx_buffer_info[i].next_to_watch;
  306. eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
  307. while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
  308. (count < tx_ring->work_limit)) {
  309. bool cleaned = false;
  310. for ( ; !cleaned; count++) {
  311. struct sk_buff *skb;
  312. tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
  313. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  314. cleaned = (i == eop);
  315. skb = tx_buffer_info->skb;
  316. if (cleaned && skb) {
  317. unsigned int segs, bytecount;
  318. unsigned int hlen = skb_headlen(skb);
  319. /* gso_segs is currently only valid for tcp */
  320. segs = skb_shinfo(skb)->gso_segs ?: 1;
  321. #ifdef IXGBE_FCOE
  322. /* adjust for FCoE Sequence Offload */
  323. if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
  324. && (skb->protocol == htons(ETH_P_FCOE)) &&
  325. skb_is_gso(skb)) {
  326. hlen = skb_transport_offset(skb) +
  327. sizeof(struct fc_frame_header) +
  328. sizeof(struct fcoe_crc_eof);
  329. segs = DIV_ROUND_UP(skb->len - hlen,
  330. skb_shinfo(skb)->gso_size);
  331. }
  332. #endif /* IXGBE_FCOE */
  333. /* multiply data chunks by size of headers */
  334. bytecount = ((segs - 1) * hlen) + skb->len;
  335. total_packets += segs;
  336. total_bytes += bytecount;
  337. }
  338. ixgbe_unmap_and_free_tx_resource(adapter,
  339. tx_buffer_info);
  340. tx_desc->wb.status = 0;
  341. i++;
  342. if (i == tx_ring->count)
  343. i = 0;
  344. }
  345. eop = tx_ring->tx_buffer_info[i].next_to_watch;
  346. eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
  347. }
  348. tx_ring->next_to_clean = i;
  349. #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
  350. if (unlikely(count && netif_carrier_ok(netdev) &&
  351. (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
  352. /* Make sure that anybody stopping the queue after this
  353. * sees the new next_to_clean.
  354. */
  355. smp_mb();
  356. if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
  357. !test_bit(__IXGBE_DOWN, &adapter->state)) {
  358. netif_wake_subqueue(netdev, tx_ring->queue_index);
  359. ++adapter->restart_queue;
  360. }
  361. }
  362. if (adapter->detect_tx_hung) {
  363. if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
  364. /* schedule immediate reset if we believe we hung */
  365. DPRINTK(PROBE, INFO,
  366. "tx hang %d detected, resetting adapter\n",
  367. adapter->tx_timeout_count + 1);
  368. ixgbe_tx_timeout(adapter->netdev);
  369. }
  370. }
  371. /* re-arm the interrupt */
  372. if (count >= tx_ring->work_limit)
  373. ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
  374. tx_ring->total_bytes += total_bytes;
  375. tx_ring->total_packets += total_packets;
  376. tx_ring->stats.packets += total_packets;
  377. tx_ring->stats.bytes += total_bytes;
  378. adapter->net_stats.tx_bytes += total_bytes;
  379. adapter->net_stats.tx_packets += total_packets;
  380. return (count < tx_ring->work_limit);
  381. }
  382. #ifdef CONFIG_IXGBE_DCA
  383. static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
  384. struct ixgbe_ring *rx_ring)
  385. {
  386. u32 rxctrl;
  387. int cpu = get_cpu();
  388. int q = rx_ring - adapter->rx_ring;
  389. if (rx_ring->cpu != cpu) {
  390. rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
  391. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  392. rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
  393. rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
  394. } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  395. rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
  396. rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
  397. IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
  398. }
  399. rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
  400. rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
  401. rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
  402. rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
  403. IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
  404. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
  405. rx_ring->cpu = cpu;
  406. }
  407. put_cpu();
  408. }
  409. static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
  410. struct ixgbe_ring *tx_ring)
  411. {
  412. u32 txctrl;
  413. int cpu = get_cpu();
  414. int q = tx_ring - adapter->tx_ring;
  415. struct ixgbe_hw *hw = &adapter->hw;
  416. if (tx_ring->cpu != cpu) {
  417. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  418. txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
  419. txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
  420. txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
  421. txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
  422. IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
  423. } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  424. txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
  425. txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
  426. txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
  427. IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
  428. txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
  429. IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
  430. }
  431. tx_ring->cpu = cpu;
  432. }
  433. put_cpu();
  434. }
  435. static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
  436. {
  437. int i;
  438. if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
  439. return;
  440. /* always use CB2 mode, difference is masked in the CB driver */
  441. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
  442. for (i = 0; i < adapter->num_tx_queues; i++) {
  443. adapter->tx_ring[i].cpu = -1;
  444. ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
  445. }
  446. for (i = 0; i < adapter->num_rx_queues; i++) {
  447. adapter->rx_ring[i].cpu = -1;
  448. ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
  449. }
  450. }
  451. static int __ixgbe_notify_dca(struct device *dev, void *data)
  452. {
  453. struct net_device *netdev = dev_get_drvdata(dev);
  454. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  455. unsigned long event = *(unsigned long *)data;
  456. switch (event) {
  457. case DCA_PROVIDER_ADD:
  458. /* if we're already enabled, don't do it again */
  459. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  460. break;
  461. if (dca_add_requester(dev) == 0) {
  462. adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
  463. ixgbe_setup_dca(adapter);
  464. break;
  465. }
  466. /* Fall Through since DCA is disabled. */
  467. case DCA_PROVIDER_REMOVE:
  468. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
  469. dca_remove_requester(dev);
  470. adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
  471. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
  472. }
  473. break;
  474. }
  475. return 0;
  476. }
  477. #endif /* CONFIG_IXGBE_DCA */
  478. /**
  479. * ixgbe_receive_skb - Send a completed packet up the stack
  480. * @adapter: board private structure
  481. * @skb: packet to send up
  482. * @status: hardware indication of status of receive
  483. * @rx_ring: rx descriptor ring (for a specific queue) to setup
  484. * @rx_desc: rx descriptor
  485. **/
  486. static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
  487. struct sk_buff *skb, u8 status,
  488. struct ixgbe_ring *ring,
  489. union ixgbe_adv_rx_desc *rx_desc)
  490. {
  491. struct ixgbe_adapter *adapter = q_vector->adapter;
  492. struct napi_struct *napi = &q_vector->napi;
  493. bool is_vlan = (status & IXGBE_RXD_STAT_VP);
  494. u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
  495. skb_record_rx_queue(skb, ring->queue_index);
  496. if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
  497. if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
  498. vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
  499. else
  500. napi_gro_receive(napi, skb);
  501. } else {
  502. if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
  503. vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
  504. else
  505. netif_rx(skb);
  506. }
  507. }
  508. /**
  509. * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
  510. * @adapter: address of board private structure
  511. * @status_err: hardware indication of status of receive
  512. * @skb: skb currently being received and modified
  513. **/
  514. static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
  515. union ixgbe_adv_rx_desc *rx_desc,
  516. struct sk_buff *skb)
  517. {
  518. u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
  519. skb->ip_summed = CHECKSUM_NONE;
  520. /* Rx csum disabled */
  521. if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
  522. return;
  523. /* if IP and error */
  524. if ((status_err & IXGBE_RXD_STAT_IPCS) &&
  525. (status_err & IXGBE_RXDADV_ERR_IPE)) {
  526. adapter->hw_csum_rx_error++;
  527. return;
  528. }
  529. if (!(status_err & IXGBE_RXD_STAT_L4CS))
  530. return;
  531. if (status_err & IXGBE_RXDADV_ERR_TCPE) {
  532. u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
  533. /*
  534. * 82599 errata, UDP frames with a 0 checksum can be marked as
  535. * checksum errors.
  536. */
  537. if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
  538. (adapter->hw.mac.type == ixgbe_mac_82599EB))
  539. return;
  540. adapter->hw_csum_rx_error++;
  541. return;
  542. }
  543. /* It must be a TCP or UDP packet with a valid checksum */
  544. skb->ip_summed = CHECKSUM_UNNECESSARY;
  545. adapter->hw_csum_rx_good++;
  546. }
  547. static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
  548. struct ixgbe_ring *rx_ring, u32 val)
  549. {
  550. /*
  551. * Force memory writes to complete before letting h/w
  552. * know there are new descriptors to fetch. (Only
  553. * applicable for weak-ordered memory model archs,
  554. * such as IA-64).
  555. */
  556. wmb();
  557. IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
  558. }
  559. /**
  560. * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
  561. * @adapter: address of board private structure
  562. **/
  563. static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
  564. struct ixgbe_ring *rx_ring,
  565. int cleaned_count)
  566. {
  567. struct pci_dev *pdev = adapter->pdev;
  568. union ixgbe_adv_rx_desc *rx_desc;
  569. struct ixgbe_rx_buffer *bi;
  570. unsigned int i;
  571. i = rx_ring->next_to_use;
  572. bi = &rx_ring->rx_buffer_info[i];
  573. while (cleaned_count--) {
  574. rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
  575. if (!bi->page_dma &&
  576. (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
  577. if (!bi->page) {
  578. bi->page = alloc_page(GFP_ATOMIC);
  579. if (!bi->page) {
  580. adapter->alloc_rx_page_failed++;
  581. goto no_buffers;
  582. }
  583. bi->page_offset = 0;
  584. } else {
  585. /* use a half page if we're re-using */
  586. bi->page_offset ^= (PAGE_SIZE / 2);
  587. }
  588. bi->page_dma = pci_map_page(pdev, bi->page,
  589. bi->page_offset,
  590. (PAGE_SIZE / 2),
  591. PCI_DMA_FROMDEVICE);
  592. }
  593. if (!bi->skb) {
  594. struct sk_buff *skb;
  595. skb = netdev_alloc_skb(adapter->netdev,
  596. (rx_ring->rx_buf_len +
  597. NET_IP_ALIGN));
  598. if (!skb) {
  599. adapter->alloc_rx_buff_failed++;
  600. goto no_buffers;
  601. }
  602. /*
  603. * Make buffer alignment 2 beyond a 16 byte boundary
  604. * this will result in a 16 byte aligned IP header after
  605. * the 14 byte MAC header is removed
  606. */
  607. skb_reserve(skb, NET_IP_ALIGN);
  608. bi->skb = skb;
  609. bi->dma = pci_map_single(pdev, skb->data,
  610. rx_ring->rx_buf_len,
  611. PCI_DMA_FROMDEVICE);
  612. }
  613. /* Refresh the desc even if buffer_addrs didn't change because
  614. * each write-back erases this info. */
  615. if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
  616. rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
  617. rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
  618. } else {
  619. rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
  620. }
  621. i++;
  622. if (i == rx_ring->count)
  623. i = 0;
  624. bi = &rx_ring->rx_buffer_info[i];
  625. }
  626. no_buffers:
  627. if (rx_ring->next_to_use != i) {
  628. rx_ring->next_to_use = i;
  629. if (i-- == 0)
  630. i = (rx_ring->count - 1);
  631. ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
  632. }
  633. }
  634. static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
  635. {
  636. return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
  637. }
  638. static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
  639. {
  640. return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
  641. }
  642. static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
  643. {
  644. return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
  645. IXGBE_RXDADV_RSCCNT_MASK) >>
  646. IXGBE_RXDADV_RSCCNT_SHIFT;
  647. }
  648. /**
  649. * ixgbe_transform_rsc_queue - change rsc queue into a full packet
  650. * @skb: pointer to the last skb in the rsc queue
  651. *
  652. * This function changes a queue full of hw rsc buffers into a completed
  653. * packet. It uses the ->prev pointers to find the first packet and then
  654. * turns it into the frag list owner.
  655. **/
  656. static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
  657. {
  658. unsigned int frag_list_size = 0;
  659. while (skb->prev) {
  660. struct sk_buff *prev = skb->prev;
  661. frag_list_size += skb->len;
  662. skb->prev = NULL;
  663. skb = prev;
  664. }
  665. skb_shinfo(skb)->frag_list = skb->next;
  666. skb->next = NULL;
  667. skb->len += frag_list_size;
  668. skb->data_len += frag_list_size;
  669. skb->truesize += frag_list_size;
  670. return skb;
  671. }
  672. static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
  673. struct ixgbe_ring *rx_ring,
  674. int *work_done, int work_to_do)
  675. {
  676. struct ixgbe_adapter *adapter = q_vector->adapter;
  677. struct pci_dev *pdev = adapter->pdev;
  678. union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
  679. struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
  680. struct sk_buff *skb;
  681. unsigned int i, rsc_count = 0;
  682. u32 len, staterr;
  683. u16 hdr_info;
  684. bool cleaned = false;
  685. int cleaned_count = 0;
  686. unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  687. #ifdef IXGBE_FCOE
  688. int ddp_bytes = 0;
  689. #endif /* IXGBE_FCOE */
  690. i = rx_ring->next_to_clean;
  691. rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
  692. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  693. rx_buffer_info = &rx_ring->rx_buffer_info[i];
  694. while (staterr & IXGBE_RXD_STAT_DD) {
  695. u32 upper_len = 0;
  696. if (*work_done >= work_to_do)
  697. break;
  698. (*work_done)++;
  699. if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
  700. hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
  701. len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
  702. IXGBE_RXDADV_HDRBUFLEN_SHIFT;
  703. if (hdr_info & IXGBE_RXDADV_SPH)
  704. adapter->rx_hdr_split++;
  705. if (len > IXGBE_RX_HDR_SIZE)
  706. len = IXGBE_RX_HDR_SIZE;
  707. upper_len = le16_to_cpu(rx_desc->wb.upper.length);
  708. } else {
  709. len = le16_to_cpu(rx_desc->wb.upper.length);
  710. }
  711. cleaned = true;
  712. skb = rx_buffer_info->skb;
  713. prefetch(skb->data - NET_IP_ALIGN);
  714. rx_buffer_info->skb = NULL;
  715. if (rx_buffer_info->dma) {
  716. pci_unmap_single(pdev, rx_buffer_info->dma,
  717. rx_ring->rx_buf_len,
  718. PCI_DMA_FROMDEVICE);
  719. rx_buffer_info->dma = 0;
  720. skb_put(skb, len);
  721. }
  722. if (upper_len) {
  723. pci_unmap_page(pdev, rx_buffer_info->page_dma,
  724. PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
  725. rx_buffer_info->page_dma = 0;
  726. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  727. rx_buffer_info->page,
  728. rx_buffer_info->page_offset,
  729. upper_len);
  730. if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
  731. (page_count(rx_buffer_info->page) != 1))
  732. rx_buffer_info->page = NULL;
  733. else
  734. get_page(rx_buffer_info->page);
  735. skb->len += upper_len;
  736. skb->data_len += upper_len;
  737. skb->truesize += upper_len;
  738. }
  739. i++;
  740. if (i == rx_ring->count)
  741. i = 0;
  742. next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
  743. prefetch(next_rxd);
  744. cleaned_count++;
  745. if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
  746. rsc_count = ixgbe_get_rsc_count(rx_desc);
  747. if (rsc_count) {
  748. u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
  749. IXGBE_RXDADV_NEXTP_SHIFT;
  750. next_buffer = &rx_ring->rx_buffer_info[nextp];
  751. rx_ring->rsc_count += (rsc_count - 1);
  752. } else {
  753. next_buffer = &rx_ring->rx_buffer_info[i];
  754. }
  755. if (staterr & IXGBE_RXD_STAT_EOP) {
  756. if (skb->prev)
  757. skb = ixgbe_transform_rsc_queue(skb);
  758. rx_ring->stats.packets++;
  759. rx_ring->stats.bytes += skb->len;
  760. } else {
  761. if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
  762. rx_buffer_info->skb = next_buffer->skb;
  763. rx_buffer_info->dma = next_buffer->dma;
  764. next_buffer->skb = skb;
  765. next_buffer->dma = 0;
  766. } else {
  767. skb->next = next_buffer->skb;
  768. skb->next->prev = skb;
  769. }
  770. adapter->non_eop_descs++;
  771. goto next_desc;
  772. }
  773. if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
  774. dev_kfree_skb_irq(skb);
  775. goto next_desc;
  776. }
  777. ixgbe_rx_checksum(adapter, rx_desc, skb);
  778. /* probably a little skewed due to removing CRC */
  779. total_rx_bytes += skb->len;
  780. total_rx_packets++;
  781. skb->protocol = eth_type_trans(skb, adapter->netdev);
  782. #ifdef IXGBE_FCOE
  783. /* if ddp, not passing to ULD unless for FCP_RSP or error */
  784. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  785. ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
  786. if (!ddp_bytes)
  787. goto next_desc;
  788. }
  789. #endif /* IXGBE_FCOE */
  790. ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
  791. next_desc:
  792. rx_desc->wb.upper.status_error = 0;
  793. /* return some buffers to hardware, one at a time is too slow */
  794. if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
  795. ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
  796. cleaned_count = 0;
  797. }
  798. /* use prefetched values */
  799. rx_desc = next_rxd;
  800. rx_buffer_info = &rx_ring->rx_buffer_info[i];
  801. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  802. }
  803. rx_ring->next_to_clean = i;
  804. cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
  805. if (cleaned_count)
  806. ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
  807. #ifdef IXGBE_FCOE
  808. /* include DDPed FCoE data */
  809. if (ddp_bytes > 0) {
  810. unsigned int mss;
  811. mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
  812. sizeof(struct fc_frame_header) -
  813. sizeof(struct fcoe_crc_eof);
  814. if (mss > 512)
  815. mss &= ~511;
  816. total_rx_bytes += ddp_bytes;
  817. total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
  818. }
  819. #endif /* IXGBE_FCOE */
  820. rx_ring->total_packets += total_rx_packets;
  821. rx_ring->total_bytes += total_rx_bytes;
  822. adapter->net_stats.rx_bytes += total_rx_bytes;
  823. adapter->net_stats.rx_packets += total_rx_packets;
  824. return cleaned;
  825. }
  826. static int ixgbe_clean_rxonly(struct napi_struct *, int);
  827. /**
  828. * ixgbe_configure_msix - Configure MSI-X hardware
  829. * @adapter: board private structure
  830. *
  831. * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
  832. * interrupts.
  833. **/
  834. static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
  835. {
  836. struct ixgbe_q_vector *q_vector;
  837. int i, j, q_vectors, v_idx, r_idx;
  838. u32 mask;
  839. q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  840. /*
  841. * Populate the IVAR table and set the ITR values to the
  842. * corresponding register.
  843. */
  844. for (v_idx = 0; v_idx < q_vectors; v_idx++) {
  845. q_vector = adapter->q_vector[v_idx];
  846. /* XXX for_each_bit(...) */
  847. r_idx = find_first_bit(q_vector->rxr_idx,
  848. adapter->num_rx_queues);
  849. for (i = 0; i < q_vector->rxr_count; i++) {
  850. j = adapter->rx_ring[r_idx].reg_idx;
  851. ixgbe_set_ivar(adapter, 0, j, v_idx);
  852. r_idx = find_next_bit(q_vector->rxr_idx,
  853. adapter->num_rx_queues,
  854. r_idx + 1);
  855. }
  856. r_idx = find_first_bit(q_vector->txr_idx,
  857. adapter->num_tx_queues);
  858. for (i = 0; i < q_vector->txr_count; i++) {
  859. j = adapter->tx_ring[r_idx].reg_idx;
  860. ixgbe_set_ivar(adapter, 1, j, v_idx);
  861. r_idx = find_next_bit(q_vector->txr_idx,
  862. adapter->num_tx_queues,
  863. r_idx + 1);
  864. }
  865. if (q_vector->txr_count && !q_vector->rxr_count)
  866. /* tx only */
  867. q_vector->eitr = adapter->tx_eitr_param;
  868. else if (q_vector->rxr_count)
  869. /* rx or mixed */
  870. q_vector->eitr = adapter->rx_eitr_param;
  871. ixgbe_write_eitr(q_vector);
  872. }
  873. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  874. ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
  875. v_idx);
  876. else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
  877. ixgbe_set_ivar(adapter, -1, 1, v_idx);
  878. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
  879. /* set up to autoclear timer, and the vectors */
  880. mask = IXGBE_EIMS_ENABLE_MASK;
  881. mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
  882. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
  883. }
  884. enum latency_range {
  885. lowest_latency = 0,
  886. low_latency = 1,
  887. bulk_latency = 2,
  888. latency_invalid = 255
  889. };
  890. /**
  891. * ixgbe_update_itr - update the dynamic ITR value based on statistics
  892. * @adapter: pointer to adapter
  893. * @eitr: eitr setting (ints per sec) to give last timeslice
  894. * @itr_setting: current throttle rate in ints/second
  895. * @packets: the number of packets during this measurement interval
  896. * @bytes: the number of bytes during this measurement interval
  897. *
  898. * Stores a new ITR value based on packets and byte
  899. * counts during the last interrupt. The advantage of per interrupt
  900. * computation is faster updates and more accurate ITR for the current
  901. * traffic pattern. Constants in this function were computed
  902. * based on theoretical maximum wire speed and thresholds were set based
  903. * on testing data as well as attempting to minimize response time
  904. * while increasing bulk throughput.
  905. * this functionality is controlled by the InterruptThrottleRate module
  906. * parameter (see ixgbe_param.c)
  907. **/
  908. static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
  909. u32 eitr, u8 itr_setting,
  910. int packets, int bytes)
  911. {
  912. unsigned int retval = itr_setting;
  913. u32 timepassed_us;
  914. u64 bytes_perint;
  915. if (packets == 0)
  916. goto update_itr_done;
  917. /* simple throttlerate management
  918. * 0-20MB/s lowest (100000 ints/s)
  919. * 20-100MB/s low (20000 ints/s)
  920. * 100-1249MB/s bulk (8000 ints/s)
  921. */
  922. /* what was last interrupt timeslice? */
  923. timepassed_us = 1000000/eitr;
  924. bytes_perint = bytes / timepassed_us; /* bytes/usec */
  925. switch (itr_setting) {
  926. case lowest_latency:
  927. if (bytes_perint > adapter->eitr_low)
  928. retval = low_latency;
  929. break;
  930. case low_latency:
  931. if (bytes_perint > adapter->eitr_high)
  932. retval = bulk_latency;
  933. else if (bytes_perint <= adapter->eitr_low)
  934. retval = lowest_latency;
  935. break;
  936. case bulk_latency:
  937. if (bytes_perint <= adapter->eitr_high)
  938. retval = low_latency;
  939. break;
  940. }
  941. update_itr_done:
  942. return retval;
  943. }
  944. /**
  945. * ixgbe_write_eitr - write EITR register in hardware specific way
  946. * @q_vector: structure containing interrupt and ring information
  947. *
  948. * This function is made to be called by ethtool and by the driver
  949. * when it needs to update EITR registers at runtime. Hardware
  950. * specific quirks/differences are taken care of here.
  951. */
  952. void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
  953. {
  954. struct ixgbe_adapter *adapter = q_vector->adapter;
  955. struct ixgbe_hw *hw = &adapter->hw;
  956. int v_idx = q_vector->v_idx;
  957. u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
  958. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  959. /* must write high and low 16 bits to reset counter */
  960. itr_reg |= (itr_reg << 16);
  961. } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  962. /*
  963. * set the WDIS bit to not clear the timer bits and cause an
  964. * immediate assertion of the interrupt
  965. */
  966. itr_reg |= IXGBE_EITR_CNT_WDIS;
  967. }
  968. IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
  969. }
  970. static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
  971. {
  972. struct ixgbe_adapter *adapter = q_vector->adapter;
  973. u32 new_itr;
  974. u8 current_itr, ret_itr;
  975. int i, r_idx;
  976. struct ixgbe_ring *rx_ring, *tx_ring;
  977. r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
  978. for (i = 0; i < q_vector->txr_count; i++) {
  979. tx_ring = &(adapter->tx_ring[r_idx]);
  980. ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
  981. q_vector->tx_itr,
  982. tx_ring->total_packets,
  983. tx_ring->total_bytes);
  984. /* if the result for this queue would decrease interrupt
  985. * rate for this vector then use that result */
  986. q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
  987. q_vector->tx_itr - 1 : ret_itr);
  988. r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
  989. r_idx + 1);
  990. }
  991. r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
  992. for (i = 0; i < q_vector->rxr_count; i++) {
  993. rx_ring = &(adapter->rx_ring[r_idx]);
  994. ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
  995. q_vector->rx_itr,
  996. rx_ring->total_packets,
  997. rx_ring->total_bytes);
  998. /* if the result for this queue would decrease interrupt
  999. * rate for this vector then use that result */
  1000. q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
  1001. q_vector->rx_itr - 1 : ret_itr);
  1002. r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
  1003. r_idx + 1);
  1004. }
  1005. current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
  1006. switch (current_itr) {
  1007. /* counts and packets in update_itr are dependent on these numbers */
  1008. case lowest_latency:
  1009. new_itr = 100000;
  1010. break;
  1011. case low_latency:
  1012. new_itr = 20000; /* aka hwitr = ~200 */
  1013. break;
  1014. case bulk_latency:
  1015. default:
  1016. new_itr = 8000;
  1017. break;
  1018. }
  1019. if (new_itr != q_vector->eitr) {
  1020. /* do an exponential smoothing */
  1021. new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
  1022. /* save the algorithm value here, not the smoothed one */
  1023. q_vector->eitr = new_itr;
  1024. ixgbe_write_eitr(q_vector);
  1025. }
  1026. return;
  1027. }
  1028. static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
  1029. {
  1030. struct ixgbe_hw *hw = &adapter->hw;
  1031. if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
  1032. (eicr & IXGBE_EICR_GPI_SDP1)) {
  1033. DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
  1034. /* write to clear the interrupt */
  1035. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
  1036. }
  1037. }
  1038. static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
  1039. {
  1040. struct ixgbe_hw *hw = &adapter->hw;
  1041. if (eicr & IXGBE_EICR_GPI_SDP1) {
  1042. /* Clear the interrupt */
  1043. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
  1044. schedule_work(&adapter->multispeed_fiber_task);
  1045. } else if (eicr & IXGBE_EICR_GPI_SDP2) {
  1046. /* Clear the interrupt */
  1047. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
  1048. schedule_work(&adapter->sfp_config_module_task);
  1049. } else {
  1050. /* Interrupt isn't for us... */
  1051. return;
  1052. }
  1053. }
  1054. static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
  1055. {
  1056. struct ixgbe_hw *hw = &adapter->hw;
  1057. adapter->lsc_int++;
  1058. adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
  1059. adapter->link_check_timeout = jiffies;
  1060. if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
  1061. IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
  1062. schedule_work(&adapter->watchdog_task);
  1063. }
  1064. }
  1065. static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
  1066. {
  1067. struct net_device *netdev = data;
  1068. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1069. struct ixgbe_hw *hw = &adapter->hw;
  1070. u32 eicr;
  1071. /*
  1072. * Workaround for Silicon errata. Use clear-by-write instead
  1073. * of clear-by-read. Reading with EICS will return the
  1074. * interrupt causes without clearing, which later be done
  1075. * with the write to EICR.
  1076. */
  1077. eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
  1078. IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
  1079. if (eicr & IXGBE_EICR_LSC)
  1080. ixgbe_check_lsc(adapter);
  1081. if (hw->mac.type == ixgbe_mac_82598EB)
  1082. ixgbe_check_fan_failure(adapter, eicr);
  1083. if (hw->mac.type == ixgbe_mac_82599EB) {
  1084. ixgbe_check_sfp_event(adapter, eicr);
  1085. /* Handle Flow Director Full threshold interrupt */
  1086. if (eicr & IXGBE_EICR_FLOW_DIR) {
  1087. int i;
  1088. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
  1089. /* Disable transmits before FDIR Re-initialization */
  1090. netif_tx_stop_all_queues(netdev);
  1091. for (i = 0; i < adapter->num_tx_queues; i++) {
  1092. struct ixgbe_ring *tx_ring =
  1093. &adapter->tx_ring[i];
  1094. if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
  1095. &tx_ring->reinit_state))
  1096. schedule_work(&adapter->fdir_reinit_task);
  1097. }
  1098. }
  1099. }
  1100. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  1101. IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
  1102. return IRQ_HANDLED;
  1103. }
  1104. static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
  1105. u64 qmask)
  1106. {
  1107. u32 mask;
  1108. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  1109. mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
  1110. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
  1111. } else {
  1112. mask = (qmask & 0xFFFFFFFF);
  1113. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
  1114. mask = (qmask >> 32);
  1115. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
  1116. }
  1117. /* skip the flush */
  1118. }
  1119. static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
  1120. u64 qmask)
  1121. {
  1122. u32 mask;
  1123. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  1124. mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
  1125. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
  1126. } else {
  1127. mask = (qmask & 0xFFFFFFFF);
  1128. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
  1129. mask = (qmask >> 32);
  1130. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
  1131. }
  1132. /* skip the flush */
  1133. }
  1134. static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
  1135. {
  1136. struct ixgbe_q_vector *q_vector = data;
  1137. struct ixgbe_adapter *adapter = q_vector->adapter;
  1138. struct ixgbe_ring *tx_ring;
  1139. int i, r_idx;
  1140. if (!q_vector->txr_count)
  1141. return IRQ_HANDLED;
  1142. r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
  1143. for (i = 0; i < q_vector->txr_count; i++) {
  1144. tx_ring = &(adapter->tx_ring[r_idx]);
  1145. tx_ring->total_bytes = 0;
  1146. tx_ring->total_packets = 0;
  1147. r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
  1148. r_idx + 1);
  1149. }
  1150. /* disable interrupts on this vector only */
  1151. ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
  1152. napi_schedule(&q_vector->napi);
  1153. return IRQ_HANDLED;
  1154. }
  1155. /**
  1156. * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
  1157. * @irq: unused
  1158. * @data: pointer to our q_vector struct for this interrupt vector
  1159. **/
  1160. static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
  1161. {
  1162. struct ixgbe_q_vector *q_vector = data;
  1163. struct ixgbe_adapter *adapter = q_vector->adapter;
  1164. struct ixgbe_ring *rx_ring;
  1165. int r_idx;
  1166. int i;
  1167. r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
  1168. for (i = 0; i < q_vector->rxr_count; i++) {
  1169. rx_ring = &(adapter->rx_ring[r_idx]);
  1170. rx_ring->total_bytes = 0;
  1171. rx_ring->total_packets = 0;
  1172. r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
  1173. r_idx + 1);
  1174. }
  1175. if (!q_vector->rxr_count)
  1176. return IRQ_HANDLED;
  1177. r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
  1178. rx_ring = &(adapter->rx_ring[r_idx]);
  1179. /* disable interrupts on this vector only */
  1180. ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
  1181. napi_schedule(&q_vector->napi);
  1182. return IRQ_HANDLED;
  1183. }
  1184. static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
  1185. {
  1186. struct ixgbe_q_vector *q_vector = data;
  1187. struct ixgbe_adapter *adapter = q_vector->adapter;
  1188. struct ixgbe_ring *ring;
  1189. int r_idx;
  1190. int i;
  1191. if (!q_vector->txr_count && !q_vector->rxr_count)
  1192. return IRQ_HANDLED;
  1193. r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
  1194. for (i = 0; i < q_vector->txr_count; i++) {
  1195. ring = &(adapter->tx_ring[r_idx]);
  1196. ring->total_bytes = 0;
  1197. ring->total_packets = 0;
  1198. r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
  1199. r_idx + 1);
  1200. }
  1201. r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
  1202. for (i = 0; i < q_vector->rxr_count; i++) {
  1203. ring = &(adapter->rx_ring[r_idx]);
  1204. ring->total_bytes = 0;
  1205. ring->total_packets = 0;
  1206. r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
  1207. r_idx + 1);
  1208. }
  1209. /* disable interrupts on this vector only */
  1210. ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
  1211. napi_schedule(&q_vector->napi);
  1212. return IRQ_HANDLED;
  1213. }
  1214. /**
  1215. * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
  1216. * @napi: napi struct with our devices info in it
  1217. * @budget: amount of work driver is allowed to do this pass, in packets
  1218. *
  1219. * This function is optimized for cleaning one queue only on a single
  1220. * q_vector!!!
  1221. **/
  1222. static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
  1223. {
  1224. struct ixgbe_q_vector *q_vector =
  1225. container_of(napi, struct ixgbe_q_vector, napi);
  1226. struct ixgbe_adapter *adapter = q_vector->adapter;
  1227. struct ixgbe_ring *rx_ring = NULL;
  1228. int work_done = 0;
  1229. long r_idx;
  1230. r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
  1231. rx_ring = &(adapter->rx_ring[r_idx]);
  1232. #ifdef CONFIG_IXGBE_DCA
  1233. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1234. ixgbe_update_rx_dca(adapter, rx_ring);
  1235. #endif
  1236. ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
  1237. /* If all Rx work done, exit the polling mode */
  1238. if (work_done < budget) {
  1239. napi_complete(napi);
  1240. if (adapter->rx_itr_setting & 1)
  1241. ixgbe_set_itr_msix(q_vector);
  1242. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  1243. ixgbe_irq_enable_queues(adapter,
  1244. ((u64)1 << q_vector->v_idx));
  1245. }
  1246. return work_done;
  1247. }
  1248. /**
  1249. * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
  1250. * @napi: napi struct with our devices info in it
  1251. * @budget: amount of work driver is allowed to do this pass, in packets
  1252. *
  1253. * This function will clean more than one rx queue associated with a
  1254. * q_vector.
  1255. **/
  1256. static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
  1257. {
  1258. struct ixgbe_q_vector *q_vector =
  1259. container_of(napi, struct ixgbe_q_vector, napi);
  1260. struct ixgbe_adapter *adapter = q_vector->adapter;
  1261. struct ixgbe_ring *ring = NULL;
  1262. int work_done = 0, i;
  1263. long r_idx;
  1264. bool tx_clean_complete = true;
  1265. r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
  1266. for (i = 0; i < q_vector->txr_count; i++) {
  1267. ring = &(adapter->tx_ring[r_idx]);
  1268. #ifdef CONFIG_IXGBE_DCA
  1269. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1270. ixgbe_update_tx_dca(adapter, ring);
  1271. #endif
  1272. tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
  1273. r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
  1274. r_idx + 1);
  1275. }
  1276. /* attempt to distribute budget to each queue fairly, but don't allow
  1277. * the budget to go below 1 because we'll exit polling */
  1278. budget /= (q_vector->rxr_count ?: 1);
  1279. budget = max(budget, 1);
  1280. r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
  1281. for (i = 0; i < q_vector->rxr_count; i++) {
  1282. ring = &(adapter->rx_ring[r_idx]);
  1283. #ifdef CONFIG_IXGBE_DCA
  1284. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1285. ixgbe_update_rx_dca(adapter, ring);
  1286. #endif
  1287. ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
  1288. r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
  1289. r_idx + 1);
  1290. }
  1291. r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
  1292. ring = &(adapter->rx_ring[r_idx]);
  1293. /* If all Rx work done, exit the polling mode */
  1294. if (work_done < budget) {
  1295. napi_complete(napi);
  1296. if (adapter->rx_itr_setting & 1)
  1297. ixgbe_set_itr_msix(q_vector);
  1298. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  1299. ixgbe_irq_enable_queues(adapter,
  1300. ((u64)1 << q_vector->v_idx));
  1301. return 0;
  1302. }
  1303. return work_done;
  1304. }
  1305. /**
  1306. * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
  1307. * @napi: napi struct with our devices info in it
  1308. * @budget: amount of work driver is allowed to do this pass, in packets
  1309. *
  1310. * This function is optimized for cleaning one queue only on a single
  1311. * q_vector!!!
  1312. **/
  1313. static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
  1314. {
  1315. struct ixgbe_q_vector *q_vector =
  1316. container_of(napi, struct ixgbe_q_vector, napi);
  1317. struct ixgbe_adapter *adapter = q_vector->adapter;
  1318. struct ixgbe_ring *tx_ring = NULL;
  1319. int work_done = 0;
  1320. long r_idx;
  1321. r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
  1322. tx_ring = &(adapter->tx_ring[r_idx]);
  1323. #ifdef CONFIG_IXGBE_DCA
  1324. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1325. ixgbe_update_tx_dca(adapter, tx_ring);
  1326. #endif
  1327. if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
  1328. work_done = budget;
  1329. /* If all Tx work done, exit the polling mode */
  1330. if (work_done < budget) {
  1331. napi_complete(napi);
  1332. if (adapter->tx_itr_setting & 1)
  1333. ixgbe_set_itr_msix(q_vector);
  1334. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  1335. ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
  1336. }
  1337. return work_done;
  1338. }
  1339. static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
  1340. int r_idx)
  1341. {
  1342. struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
  1343. set_bit(r_idx, q_vector->rxr_idx);
  1344. q_vector->rxr_count++;
  1345. }
  1346. static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
  1347. int t_idx)
  1348. {
  1349. struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
  1350. set_bit(t_idx, q_vector->txr_idx);
  1351. q_vector->txr_count++;
  1352. }
  1353. /**
  1354. * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
  1355. * @adapter: board private structure to initialize
  1356. * @vectors: allotted vector count for descriptor rings
  1357. *
  1358. * This function maps descriptor rings to the queue-specific vectors
  1359. * we were allotted through the MSI-X enabling code. Ideally, we'd have
  1360. * one vector per ring/queue, but on a constrained vector budget, we
  1361. * group the rings as "efficiently" as possible. You would add new
  1362. * mapping configurations in here.
  1363. **/
  1364. static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
  1365. int vectors)
  1366. {
  1367. int v_start = 0;
  1368. int rxr_idx = 0, txr_idx = 0;
  1369. int rxr_remaining = adapter->num_rx_queues;
  1370. int txr_remaining = adapter->num_tx_queues;
  1371. int i, j;
  1372. int rqpv, tqpv;
  1373. int err = 0;
  1374. /* No mapping required if MSI-X is disabled. */
  1375. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  1376. goto out;
  1377. /*
  1378. * The ideal configuration...
  1379. * We have enough vectors to map one per queue.
  1380. */
  1381. if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
  1382. for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
  1383. map_vector_to_rxq(adapter, v_start, rxr_idx);
  1384. for (; txr_idx < txr_remaining; v_start++, txr_idx++)
  1385. map_vector_to_txq(adapter, v_start, txr_idx);
  1386. goto out;
  1387. }
  1388. /*
  1389. * If we don't have enough vectors for a 1-to-1
  1390. * mapping, we'll have to group them so there are
  1391. * multiple queues per vector.
  1392. */
  1393. /* Re-adjusting *qpv takes care of the remainder. */
  1394. for (i = v_start; i < vectors; i++) {
  1395. rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
  1396. for (j = 0; j < rqpv; j++) {
  1397. map_vector_to_rxq(adapter, i, rxr_idx);
  1398. rxr_idx++;
  1399. rxr_remaining--;
  1400. }
  1401. }
  1402. for (i = v_start; i < vectors; i++) {
  1403. tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
  1404. for (j = 0; j < tqpv; j++) {
  1405. map_vector_to_txq(adapter, i, txr_idx);
  1406. txr_idx++;
  1407. txr_remaining--;
  1408. }
  1409. }
  1410. out:
  1411. return err;
  1412. }
  1413. /**
  1414. * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
  1415. * @adapter: board private structure
  1416. *
  1417. * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
  1418. * interrupts from the kernel.
  1419. **/
  1420. static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
  1421. {
  1422. struct net_device *netdev = adapter->netdev;
  1423. irqreturn_t (*handler)(int, void *);
  1424. int i, vector, q_vectors, err;
  1425. int ri=0, ti=0;
  1426. /* Decrement for Other and TCP Timer vectors */
  1427. q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  1428. /* Map the Tx/Rx rings to the vectors we were allotted. */
  1429. err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
  1430. if (err)
  1431. goto out;
  1432. #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
  1433. (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
  1434. &ixgbe_msix_clean_many)
  1435. for (vector = 0; vector < q_vectors; vector++) {
  1436. handler = SET_HANDLER(adapter->q_vector[vector]);
  1437. if(handler == &ixgbe_msix_clean_rx) {
  1438. sprintf(adapter->name[vector], "%s-%s-%d",
  1439. netdev->name, "rx", ri++);
  1440. }
  1441. else if(handler == &ixgbe_msix_clean_tx) {
  1442. sprintf(adapter->name[vector], "%s-%s-%d",
  1443. netdev->name, "tx", ti++);
  1444. }
  1445. else
  1446. sprintf(adapter->name[vector], "%s-%s-%d",
  1447. netdev->name, "TxRx", vector);
  1448. err = request_irq(adapter->msix_entries[vector].vector,
  1449. handler, 0, adapter->name[vector],
  1450. adapter->q_vector[vector]);
  1451. if (err) {
  1452. DPRINTK(PROBE, ERR,
  1453. "request_irq failed for MSIX interrupt "
  1454. "Error: %d\n", err);
  1455. goto free_queue_irqs;
  1456. }
  1457. }
  1458. sprintf(adapter->name[vector], "%s:lsc", netdev->name);
  1459. err = request_irq(adapter->msix_entries[vector].vector,
  1460. &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
  1461. if (err) {
  1462. DPRINTK(PROBE, ERR,
  1463. "request_irq for msix_lsc failed: %d\n", err);
  1464. goto free_queue_irqs;
  1465. }
  1466. return 0;
  1467. free_queue_irqs:
  1468. for (i = vector - 1; i >= 0; i--)
  1469. free_irq(adapter->msix_entries[--vector].vector,
  1470. adapter->q_vector[i]);
  1471. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  1472. pci_disable_msix(adapter->pdev);
  1473. kfree(adapter->msix_entries);
  1474. adapter->msix_entries = NULL;
  1475. out:
  1476. return err;
  1477. }
  1478. static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
  1479. {
  1480. struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
  1481. u8 current_itr;
  1482. u32 new_itr = q_vector->eitr;
  1483. struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
  1484. struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
  1485. q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
  1486. q_vector->tx_itr,
  1487. tx_ring->total_packets,
  1488. tx_ring->total_bytes);
  1489. q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
  1490. q_vector->rx_itr,
  1491. rx_ring->total_packets,
  1492. rx_ring->total_bytes);
  1493. current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
  1494. switch (current_itr) {
  1495. /* counts and packets in update_itr are dependent on these numbers */
  1496. case lowest_latency:
  1497. new_itr = 100000;
  1498. break;
  1499. case low_latency:
  1500. new_itr = 20000; /* aka hwitr = ~200 */
  1501. break;
  1502. case bulk_latency:
  1503. new_itr = 8000;
  1504. break;
  1505. default:
  1506. break;
  1507. }
  1508. if (new_itr != q_vector->eitr) {
  1509. /* do an exponential smoothing */
  1510. new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
  1511. /* save the algorithm value here, not the smoothed one */
  1512. q_vector->eitr = new_itr;
  1513. ixgbe_write_eitr(q_vector);
  1514. }
  1515. return;
  1516. }
  1517. /**
  1518. * ixgbe_irq_enable - Enable default interrupt generation settings
  1519. * @adapter: board private structure
  1520. **/
  1521. static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
  1522. {
  1523. u32 mask;
  1524. mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
  1525. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
  1526. mask |= IXGBE_EIMS_GPI_SDP1;
  1527. if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  1528. mask |= IXGBE_EIMS_ECC;
  1529. mask |= IXGBE_EIMS_GPI_SDP1;
  1530. mask |= IXGBE_EIMS_GPI_SDP2;
  1531. }
  1532. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
  1533. adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
  1534. mask |= IXGBE_EIMS_FLOW_DIR;
  1535. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
  1536. ixgbe_irq_enable_queues(adapter, ~0);
  1537. IXGBE_WRITE_FLUSH(&adapter->hw);
  1538. }
  1539. /**
  1540. * ixgbe_intr - legacy mode Interrupt Handler
  1541. * @irq: interrupt number
  1542. * @data: pointer to a network interface device structure
  1543. **/
  1544. static irqreturn_t ixgbe_intr(int irq, void *data)
  1545. {
  1546. struct net_device *netdev = data;
  1547. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1548. struct ixgbe_hw *hw = &adapter->hw;
  1549. struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
  1550. u32 eicr;
  1551. /*
  1552. * Workaround for silicon errata. Mask the interrupts
  1553. * before the read of EICR.
  1554. */
  1555. IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
  1556. /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
  1557. * therefore no explict interrupt disable is necessary */
  1558. eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
  1559. if (!eicr) {
  1560. /* shared interrupt alert!
  1561. * make sure interrupts are enabled because the read will
  1562. * have disabled interrupts due to EIAM */
  1563. ixgbe_irq_enable(adapter);
  1564. return IRQ_NONE; /* Not our interrupt */
  1565. }
  1566. if (eicr & IXGBE_EICR_LSC)
  1567. ixgbe_check_lsc(adapter);
  1568. if (hw->mac.type == ixgbe_mac_82599EB)
  1569. ixgbe_check_sfp_event(adapter, eicr);
  1570. ixgbe_check_fan_failure(adapter, eicr);
  1571. if (napi_schedule_prep(&(q_vector->napi))) {
  1572. adapter->tx_ring[0].total_packets = 0;
  1573. adapter->tx_ring[0].total_bytes = 0;
  1574. adapter->rx_ring[0].total_packets = 0;
  1575. adapter->rx_ring[0].total_bytes = 0;
  1576. /* would disable interrupts here but EIAM disabled it */
  1577. __napi_schedule(&(q_vector->napi));
  1578. }
  1579. return IRQ_HANDLED;
  1580. }
  1581. static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
  1582. {
  1583. int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  1584. for (i = 0; i < q_vectors; i++) {
  1585. struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
  1586. bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
  1587. bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
  1588. q_vector->rxr_count = 0;
  1589. q_vector->txr_count = 0;
  1590. }
  1591. }
  1592. /**
  1593. * ixgbe_request_irq - initialize interrupts
  1594. * @adapter: board private structure
  1595. *
  1596. * Attempts to configure interrupts using the best available
  1597. * capabilities of the hardware and kernel.
  1598. **/
  1599. static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
  1600. {
  1601. struct net_device *netdev = adapter->netdev;
  1602. int err;
  1603. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  1604. err = ixgbe_request_msix_irqs(adapter);
  1605. } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
  1606. err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
  1607. netdev->name, netdev);
  1608. } else {
  1609. err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
  1610. netdev->name, netdev);
  1611. }
  1612. if (err)
  1613. DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
  1614. return err;
  1615. }
  1616. static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
  1617. {
  1618. struct net_device *netdev = adapter->netdev;
  1619. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  1620. int i, q_vectors;
  1621. q_vectors = adapter->num_msix_vectors;
  1622. i = q_vectors - 1;
  1623. free_irq(adapter->msix_entries[i].vector, netdev);
  1624. i--;
  1625. for (; i >= 0; i--) {
  1626. free_irq(adapter->msix_entries[i].vector,
  1627. adapter->q_vector[i]);
  1628. }
  1629. ixgbe_reset_q_vectors(adapter);
  1630. } else {
  1631. free_irq(adapter->pdev->irq, netdev);
  1632. }
  1633. }
  1634. /**
  1635. * ixgbe_irq_disable - Mask off interrupt generation on the NIC
  1636. * @adapter: board private structure
  1637. **/
  1638. static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
  1639. {
  1640. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  1641. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
  1642. } else {
  1643. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
  1644. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
  1645. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
  1646. }
  1647. IXGBE_WRITE_FLUSH(&adapter->hw);
  1648. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  1649. int i;
  1650. for (i = 0; i < adapter->num_msix_vectors; i++)
  1651. synchronize_irq(adapter->msix_entries[i].vector);
  1652. } else {
  1653. synchronize_irq(adapter->pdev->irq);
  1654. }
  1655. }
  1656. /**
  1657. * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
  1658. *
  1659. **/
  1660. static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
  1661. {
  1662. struct ixgbe_hw *hw = &adapter->hw;
  1663. IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
  1664. EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
  1665. ixgbe_set_ivar(adapter, 0, 0, 0);
  1666. ixgbe_set_ivar(adapter, 1, 0, 0);
  1667. map_vector_to_rxq(adapter, 0, 0);
  1668. map_vector_to_txq(adapter, 0, 0);
  1669. DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
  1670. }
  1671. /**
  1672. * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
  1673. * @adapter: board private structure
  1674. *
  1675. * Configure the Tx unit of the MAC after a reset.
  1676. **/
  1677. static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
  1678. {
  1679. u64 tdba;
  1680. struct ixgbe_hw *hw = &adapter->hw;
  1681. u32 i, j, tdlen, txctrl;
  1682. /* Setup the HW Tx Head and Tail descriptor pointers */
  1683. for (i = 0; i < adapter->num_tx_queues; i++) {
  1684. struct ixgbe_ring *ring = &adapter->tx_ring[i];
  1685. j = ring->reg_idx;
  1686. tdba = ring->dma;
  1687. tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
  1688. IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
  1689. (tdba & DMA_BIT_MASK(32)));
  1690. IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
  1691. IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
  1692. IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
  1693. IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
  1694. adapter->tx_ring[i].head = IXGBE_TDH(j);
  1695. adapter->tx_ring[i].tail = IXGBE_TDT(j);
  1696. /*
  1697. * Disable Tx Head Writeback RO bit, since this hoses
  1698. * bookkeeping if things aren't delivered in order.
  1699. */
  1700. switch (hw->mac.type) {
  1701. case ixgbe_mac_82598EB:
  1702. txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
  1703. break;
  1704. case ixgbe_mac_82599EB:
  1705. default:
  1706. txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
  1707. break;
  1708. }
  1709. txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
  1710. switch (hw->mac.type) {
  1711. case ixgbe_mac_82598EB:
  1712. IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
  1713. break;
  1714. case ixgbe_mac_82599EB:
  1715. default:
  1716. IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
  1717. break;
  1718. }
  1719. }
  1720. if (hw->mac.type == ixgbe_mac_82599EB) {
  1721. u32 rttdcs;
  1722. /* disable the arbiter while setting MTQC */
  1723. rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
  1724. rttdcs |= IXGBE_RTTDCS_ARBDIS;
  1725. IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
  1726. /* We enable 8 traffic classes, DCB only */
  1727. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
  1728. IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
  1729. IXGBE_MTQC_8TC_8TQ));
  1730. else
  1731. IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
  1732. /* re-eable the arbiter */
  1733. rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
  1734. IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
  1735. }
  1736. }
  1737. #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
  1738. static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
  1739. struct ixgbe_ring *rx_ring)
  1740. {
  1741. u32 srrctl;
  1742. int index;
  1743. struct ixgbe_ring_feature *feature = adapter->ring_feature;
  1744. index = rx_ring->reg_idx;
  1745. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  1746. unsigned long mask;
  1747. mask = (unsigned long) feature[RING_F_RSS].mask;
  1748. index = index & mask;
  1749. }
  1750. srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
  1751. srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
  1752. srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
  1753. srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
  1754. IXGBE_SRRCTL_BSIZEHDR_MASK;
  1755. if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
  1756. #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
  1757. srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
  1758. #else
  1759. srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
  1760. #endif
  1761. srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
  1762. } else {
  1763. srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
  1764. IXGBE_SRRCTL_BSIZEPKT_SHIFT;
  1765. srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
  1766. }
  1767. IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
  1768. }
  1769. static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
  1770. {
  1771. u32 mrqc = 0;
  1772. int mask;
  1773. if (!(adapter->hw.mac.type == ixgbe_mac_82599EB))
  1774. return mrqc;
  1775. mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
  1776. #ifdef CONFIG_IXGBE_DCB
  1777. | IXGBE_FLAG_DCB_ENABLED
  1778. #endif
  1779. );
  1780. switch (mask) {
  1781. case (IXGBE_FLAG_RSS_ENABLED):
  1782. mrqc = IXGBE_MRQC_RSSEN;
  1783. break;
  1784. #ifdef CONFIG_IXGBE_DCB
  1785. case (IXGBE_FLAG_DCB_ENABLED):
  1786. mrqc = IXGBE_MRQC_RT8TCEN;
  1787. break;
  1788. #endif /* CONFIG_IXGBE_DCB */
  1789. default:
  1790. break;
  1791. }
  1792. return mrqc;
  1793. }
  1794. /**
  1795. * ixgbe_configure_rscctl - enable RSC for the indicated ring
  1796. * @adapter: address of board private structure
  1797. * @index: index of ring to set
  1798. * @rx_buf_len: rx buffer length
  1799. **/
  1800. static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index,
  1801. int rx_buf_len)
  1802. {
  1803. struct ixgbe_ring *rx_ring;
  1804. struct ixgbe_hw *hw = &adapter->hw;
  1805. int j;
  1806. u32 rscctrl;
  1807. rx_ring = &adapter->rx_ring[index];
  1808. j = rx_ring->reg_idx;
  1809. rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
  1810. rscctrl |= IXGBE_RSCCTL_RSCEN;
  1811. /*
  1812. * we must limit the number of descriptors so that the
  1813. * total size of max desc * buf_len is not greater
  1814. * than 65535
  1815. */
  1816. if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
  1817. #if (MAX_SKB_FRAGS > 16)
  1818. rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
  1819. #elif (MAX_SKB_FRAGS > 8)
  1820. rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
  1821. #elif (MAX_SKB_FRAGS > 4)
  1822. rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
  1823. #else
  1824. rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
  1825. #endif
  1826. } else {
  1827. if (rx_buf_len < IXGBE_RXBUFFER_4096)
  1828. rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
  1829. else if (rx_buf_len < IXGBE_RXBUFFER_8192)
  1830. rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
  1831. else
  1832. rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
  1833. }
  1834. IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
  1835. }
  1836. /**
  1837. * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
  1838. * @adapter: board private structure
  1839. *
  1840. * Configure the Rx unit of the MAC after a reset.
  1841. **/
  1842. static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
  1843. {
  1844. u64 rdba;
  1845. struct ixgbe_hw *hw = &adapter->hw;
  1846. struct ixgbe_ring *rx_ring;
  1847. struct net_device *netdev = adapter->netdev;
  1848. int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
  1849. int i, j;
  1850. u32 rdlen, rxctrl, rxcsum;
  1851. static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
  1852. 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
  1853. 0x6A3E67EA, 0x14364D17, 0x3BED200D};
  1854. u32 fctrl, hlreg0;
  1855. u32 reta = 0, mrqc = 0;
  1856. u32 rdrxctl;
  1857. int rx_buf_len;
  1858. /* Decide whether to use packet split mode or not */
  1859. adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
  1860. /* Set the RX buffer length according to the mode */
  1861. if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
  1862. rx_buf_len = IXGBE_RX_HDR_SIZE;
  1863. if (hw->mac.type == ixgbe_mac_82599EB) {
  1864. /* PSRTYPE must be initialized in 82599 */
  1865. u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
  1866. IXGBE_PSRTYPE_UDPHDR |
  1867. IXGBE_PSRTYPE_IPV4HDR |
  1868. IXGBE_PSRTYPE_IPV6HDR |
  1869. IXGBE_PSRTYPE_L2HDR;
  1870. IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
  1871. }
  1872. } else {
  1873. if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
  1874. (netdev->mtu <= ETH_DATA_LEN))
  1875. rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
  1876. else
  1877. rx_buf_len = ALIGN(max_frame, 1024);
  1878. }
  1879. fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
  1880. fctrl |= IXGBE_FCTRL_BAM;
  1881. fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
  1882. fctrl |= IXGBE_FCTRL_PMCF;
  1883. IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
  1884. hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  1885. if (adapter->netdev->mtu <= ETH_DATA_LEN)
  1886. hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
  1887. else
  1888. hlreg0 |= IXGBE_HLREG0_JUMBOEN;
  1889. #ifdef IXGBE_FCOE
  1890. if (netdev->features & NETIF_F_FCOE_MTU)
  1891. hlreg0 |= IXGBE_HLREG0_JUMBOEN;
  1892. #endif
  1893. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
  1894. rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
  1895. /* disable receives while setting up the descriptors */
  1896. rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  1897. IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
  1898. /*
  1899. * Setup the HW Rx Head and Tail Descriptor Pointers and
  1900. * the Base and Length of the Rx Descriptor Ring
  1901. */
  1902. for (i = 0; i < adapter->num_rx_queues; i++) {
  1903. rx_ring = &adapter->rx_ring[i];
  1904. rdba = rx_ring->dma;
  1905. j = rx_ring->reg_idx;
  1906. IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
  1907. IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
  1908. IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
  1909. IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
  1910. IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
  1911. rx_ring->head = IXGBE_RDH(j);
  1912. rx_ring->tail = IXGBE_RDT(j);
  1913. rx_ring->rx_buf_len = rx_buf_len;
  1914. if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
  1915. rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
  1916. else
  1917. rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
  1918. #ifdef IXGBE_FCOE
  1919. if (netdev->features & NETIF_F_FCOE_MTU) {
  1920. struct ixgbe_ring_feature *f;
  1921. f = &adapter->ring_feature[RING_F_FCOE];
  1922. if ((i >= f->mask) && (i < f->mask + f->indices)) {
  1923. rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
  1924. if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
  1925. rx_ring->rx_buf_len =
  1926. IXGBE_FCOE_JUMBO_FRAME_SIZE;
  1927. }
  1928. }
  1929. #endif /* IXGBE_FCOE */
  1930. ixgbe_configure_srrctl(adapter, rx_ring);
  1931. }
  1932. if (hw->mac.type == ixgbe_mac_82598EB) {
  1933. /*
  1934. * For VMDq support of different descriptor types or
  1935. * buffer sizes through the use of multiple SRRCTL
  1936. * registers, RDRXCTL.MVMEN must be set to 1
  1937. *
  1938. * also, the manual doesn't mention it clearly but DCA hints
  1939. * will only use queue 0's tags unless this bit is set. Side
  1940. * effects of setting this bit are only that SRRCTL must be
  1941. * fully programmed [0..15]
  1942. */
  1943. rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
  1944. rdrxctl |= IXGBE_RDRXCTL_MVMEN;
  1945. IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
  1946. }
  1947. /* Program MRQC for the distribution of queues */
  1948. mrqc = ixgbe_setup_mrqc(adapter);
  1949. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
  1950. /* Fill out redirection table */
  1951. for (i = 0, j = 0; i < 128; i++, j++) {
  1952. if (j == adapter->ring_feature[RING_F_RSS].indices)
  1953. j = 0;
  1954. /* reta = 4-byte sliding window of
  1955. * 0x00..(indices-1)(indices-1)00..etc. */
  1956. reta = (reta << 8) | (j * 0x11);
  1957. if ((i & 3) == 3)
  1958. IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
  1959. }
  1960. /* Fill out hash function seeds */
  1961. for (i = 0; i < 10; i++)
  1962. IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
  1963. if (hw->mac.type == ixgbe_mac_82598EB)
  1964. mrqc |= IXGBE_MRQC_RSSEN;
  1965. /* Perform hash on these packet types */
  1966. mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
  1967. | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
  1968. | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
  1969. | IXGBE_MRQC_RSS_FIELD_IPV6
  1970. | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
  1971. | IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
  1972. }
  1973. IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
  1974. rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
  1975. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
  1976. adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
  1977. /* Disable indicating checksum in descriptor, enables
  1978. * RSS hash */
  1979. rxcsum |= IXGBE_RXCSUM_PCSD;
  1980. }
  1981. if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
  1982. /* Enable IPv4 payload checksum for UDP fragments
  1983. * if PCSD is not set */
  1984. rxcsum |= IXGBE_RXCSUM_IPPCSE;
  1985. }
  1986. IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
  1987. if (hw->mac.type == ixgbe_mac_82599EB) {
  1988. rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
  1989. rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
  1990. rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
  1991. IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
  1992. }
  1993. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
  1994. /* Enable 82599 HW-RSC */
  1995. for (i = 0; i < adapter->num_rx_queues; i++)
  1996. ixgbe_configure_rscctl(adapter, i, rx_buf_len);
  1997. /* Disable RSC for ACK packets */
  1998. IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
  1999. (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
  2000. }
  2001. }
  2002. static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
  2003. {
  2004. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2005. struct ixgbe_hw *hw = &adapter->hw;
  2006. /* add VID to filter table */
  2007. hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
  2008. }
  2009. static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
  2010. {
  2011. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2012. struct ixgbe_hw *hw = &adapter->hw;
  2013. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2014. ixgbe_irq_disable(adapter);
  2015. vlan_group_set_device(adapter->vlgrp, vid, NULL);
  2016. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2017. ixgbe_irq_enable(adapter);
  2018. /* remove VID from filter table */
  2019. hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
  2020. }
  2021. static void ixgbe_vlan_rx_register(struct net_device *netdev,
  2022. struct vlan_group *grp)
  2023. {
  2024. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2025. u32 ctrl;
  2026. int i, j;
  2027. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2028. ixgbe_irq_disable(adapter);
  2029. adapter->vlgrp = grp;
  2030. /*
  2031. * For a DCB driver, always enable VLAN tag stripping so we can
  2032. * still receive traffic from a DCB-enabled host even if we're
  2033. * not in DCB mode.
  2034. */
  2035. ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
  2036. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  2037. ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
  2038. ctrl &= ~IXGBE_VLNCTRL_CFIEN;
  2039. IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
  2040. } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  2041. ctrl |= IXGBE_VLNCTRL_VFE;
  2042. /* enable VLAN tag insert/strip */
  2043. ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
  2044. ctrl &= ~IXGBE_VLNCTRL_CFIEN;
  2045. IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
  2046. for (i = 0; i < adapter->num_rx_queues; i++) {
  2047. j = adapter->rx_ring[i].reg_idx;
  2048. ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
  2049. ctrl |= IXGBE_RXDCTL_VME;
  2050. IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
  2051. }
  2052. }
  2053. ixgbe_vlan_rx_add_vid(netdev, 0);
  2054. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2055. ixgbe_irq_enable(adapter);
  2056. }
  2057. static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
  2058. {
  2059. ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
  2060. if (adapter->vlgrp) {
  2061. u16 vid;
  2062. for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
  2063. if (!vlan_group_get_device(adapter->vlgrp, vid))
  2064. continue;
  2065. ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
  2066. }
  2067. }
  2068. }
  2069. static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
  2070. {
  2071. struct dev_mc_list *mc_ptr;
  2072. u8 *addr = *mc_addr_ptr;
  2073. *vmdq = 0;
  2074. mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
  2075. if (mc_ptr->next)
  2076. *mc_addr_ptr = mc_ptr->next->dmi_addr;
  2077. else
  2078. *mc_addr_ptr = NULL;
  2079. return addr;
  2080. }
  2081. /**
  2082. * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
  2083. * @netdev: network interface device structure
  2084. *
  2085. * The set_rx_method entry point is called whenever the unicast/multicast
  2086. * address list or the network interface flags are updated. This routine is
  2087. * responsible for configuring the hardware for proper unicast, multicast and
  2088. * promiscuous mode.
  2089. **/
  2090. static void ixgbe_set_rx_mode(struct net_device *netdev)
  2091. {
  2092. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2093. struct ixgbe_hw *hw = &adapter->hw;
  2094. u32 fctrl, vlnctrl;
  2095. u8 *addr_list = NULL;
  2096. int addr_count = 0;
  2097. /* Check for Promiscuous and All Multicast modes */
  2098. fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  2099. vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  2100. if (netdev->flags & IFF_PROMISC) {
  2101. hw->addr_ctrl.user_set_promisc = 1;
  2102. fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
  2103. vlnctrl &= ~IXGBE_VLNCTRL_VFE;
  2104. } else {
  2105. if (netdev->flags & IFF_ALLMULTI) {
  2106. fctrl |= IXGBE_FCTRL_MPE;
  2107. fctrl &= ~IXGBE_FCTRL_UPE;
  2108. } else {
  2109. fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
  2110. }
  2111. vlnctrl |= IXGBE_VLNCTRL_VFE;
  2112. hw->addr_ctrl.user_set_promisc = 0;
  2113. }
  2114. IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
  2115. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  2116. /* reprogram secondary unicast list */
  2117. hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
  2118. /* reprogram multicast list */
  2119. addr_count = netdev->mc_count;
  2120. if (addr_count)
  2121. addr_list = netdev->mc_list->dmi_addr;
  2122. hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
  2123. ixgbe_addr_list_itr);
  2124. }
  2125. static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
  2126. {
  2127. int q_idx;
  2128. struct ixgbe_q_vector *q_vector;
  2129. int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  2130. /* legacy and MSI only use one vector */
  2131. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  2132. q_vectors = 1;
  2133. for (q_idx = 0; q_idx < q_vectors; q_idx++) {
  2134. struct napi_struct *napi;
  2135. q_vector = adapter->q_vector[q_idx];
  2136. napi = &q_vector->napi;
  2137. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  2138. if (!q_vector->rxr_count || !q_vector->txr_count) {
  2139. if (q_vector->txr_count == 1)
  2140. napi->poll = &ixgbe_clean_txonly;
  2141. else if (q_vector->rxr_count == 1)
  2142. napi->poll = &ixgbe_clean_rxonly;
  2143. }
  2144. }
  2145. napi_enable(napi);
  2146. }
  2147. }
  2148. static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
  2149. {
  2150. int q_idx;
  2151. struct ixgbe_q_vector *q_vector;
  2152. int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  2153. /* legacy and MSI only use one vector */
  2154. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  2155. q_vectors = 1;
  2156. for (q_idx = 0; q_idx < q_vectors; q_idx++) {
  2157. q_vector = adapter->q_vector[q_idx];
  2158. napi_disable(&q_vector->napi);
  2159. }
  2160. }
  2161. #ifdef CONFIG_IXGBE_DCB
  2162. /*
  2163. * ixgbe_configure_dcb - Configure DCB hardware
  2164. * @adapter: ixgbe adapter struct
  2165. *
  2166. * This is called by the driver on open to configure the DCB hardware.
  2167. * This is also called by the gennetlink interface when reconfiguring
  2168. * the DCB state.
  2169. */
  2170. static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
  2171. {
  2172. struct ixgbe_hw *hw = &adapter->hw;
  2173. u32 txdctl, vlnctrl;
  2174. int i, j;
  2175. ixgbe_dcb_check_config(&adapter->dcb_cfg);
  2176. ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
  2177. ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
  2178. /* reconfigure the hardware */
  2179. ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
  2180. for (i = 0; i < adapter->num_tx_queues; i++) {
  2181. j = adapter->tx_ring[i].reg_idx;
  2182. txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
  2183. /* PThresh workaround for Tx hang with DFP enabled. */
  2184. txdctl |= 32;
  2185. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
  2186. }
  2187. /* Enable VLAN tag insert/strip */
  2188. vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  2189. if (hw->mac.type == ixgbe_mac_82598EB) {
  2190. vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
  2191. vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
  2192. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  2193. } else if (hw->mac.type == ixgbe_mac_82599EB) {
  2194. vlnctrl |= IXGBE_VLNCTRL_VFE;
  2195. vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
  2196. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  2197. for (i = 0; i < adapter->num_rx_queues; i++) {
  2198. j = adapter->rx_ring[i].reg_idx;
  2199. vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
  2200. vlnctrl |= IXGBE_RXDCTL_VME;
  2201. IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
  2202. }
  2203. }
  2204. hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
  2205. }
  2206. #endif
  2207. static void ixgbe_configure(struct ixgbe_adapter *adapter)
  2208. {
  2209. struct net_device *netdev = adapter->netdev;
  2210. struct ixgbe_hw *hw = &adapter->hw;
  2211. int i;
  2212. ixgbe_set_rx_mode(netdev);
  2213. ixgbe_restore_vlan(adapter);
  2214. #ifdef CONFIG_IXGBE_DCB
  2215. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  2216. if (hw->mac.type == ixgbe_mac_82598EB)
  2217. netif_set_gso_max_size(netdev, 32768);
  2218. else
  2219. netif_set_gso_max_size(netdev, 65536);
  2220. ixgbe_configure_dcb(adapter);
  2221. } else {
  2222. netif_set_gso_max_size(netdev, 65536);
  2223. }
  2224. #else
  2225. netif_set_gso_max_size(netdev, 65536);
  2226. #endif
  2227. #ifdef IXGBE_FCOE
  2228. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
  2229. ixgbe_configure_fcoe(adapter);
  2230. #endif /* IXGBE_FCOE */
  2231. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
  2232. for (i = 0; i < adapter->num_tx_queues; i++)
  2233. adapter->tx_ring[i].atr_sample_rate =
  2234. adapter->atr_sample_rate;
  2235. ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
  2236. } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
  2237. ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
  2238. }
  2239. ixgbe_configure_tx(adapter);
  2240. ixgbe_configure_rx(adapter);
  2241. for (i = 0; i < adapter->num_rx_queues; i++)
  2242. ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
  2243. (adapter->rx_ring[i].count - 1));
  2244. }
  2245. static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
  2246. {
  2247. switch (hw->phy.type) {
  2248. case ixgbe_phy_sfp_avago:
  2249. case ixgbe_phy_sfp_ftl:
  2250. case ixgbe_phy_sfp_intel:
  2251. case ixgbe_phy_sfp_unknown:
  2252. case ixgbe_phy_tw_tyco:
  2253. case ixgbe_phy_tw_unknown:
  2254. return true;
  2255. default:
  2256. return false;
  2257. }
  2258. }
  2259. /**
  2260. * ixgbe_sfp_link_config - set up SFP+ link
  2261. * @adapter: pointer to private adapter struct
  2262. **/
  2263. static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
  2264. {
  2265. struct ixgbe_hw *hw = &adapter->hw;
  2266. if (hw->phy.multispeed_fiber) {
  2267. /*
  2268. * In multispeed fiber setups, the device may not have
  2269. * had a physical connection when the driver loaded.
  2270. * If that's the case, the initial link configuration
  2271. * couldn't get the MAC into 10G or 1G mode, so we'll
  2272. * never have a link status change interrupt fire.
  2273. * We need to try and force an autonegotiation
  2274. * session, then bring up link.
  2275. */
  2276. hw->mac.ops.setup_sfp(hw);
  2277. if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
  2278. schedule_work(&adapter->multispeed_fiber_task);
  2279. } else {
  2280. /*
  2281. * Direct Attach Cu and non-multispeed fiber modules
  2282. * still need to be configured properly prior to
  2283. * attempting link.
  2284. */
  2285. if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
  2286. schedule_work(&adapter->sfp_config_module_task);
  2287. }
  2288. }
  2289. /**
  2290. * ixgbe_non_sfp_link_config - set up non-SFP+ link
  2291. * @hw: pointer to private hardware struct
  2292. *
  2293. * Returns 0 on success, negative on failure
  2294. **/
  2295. static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
  2296. {
  2297. u32 autoneg;
  2298. bool negotiation, link_up = false;
  2299. u32 ret = IXGBE_ERR_LINK_SETUP;
  2300. if (hw->mac.ops.check_link)
  2301. ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
  2302. if (ret)
  2303. goto link_cfg_out;
  2304. if (hw->mac.ops.get_link_capabilities)
  2305. ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
  2306. if (ret)
  2307. goto link_cfg_out;
  2308. if (hw->mac.ops.setup_link)
  2309. ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
  2310. link_cfg_out:
  2311. return ret;
  2312. }
  2313. #define IXGBE_MAX_RX_DESC_POLL 10
  2314. static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
  2315. int rxr)
  2316. {
  2317. int j = adapter->rx_ring[rxr].reg_idx;
  2318. int k;
  2319. for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
  2320. if (IXGBE_READ_REG(&adapter->hw,
  2321. IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
  2322. break;
  2323. else
  2324. msleep(1);
  2325. }
  2326. if (k >= IXGBE_MAX_RX_DESC_POLL) {
  2327. DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
  2328. "not set within the polling period\n", rxr);
  2329. }
  2330. ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
  2331. (adapter->rx_ring[rxr].count - 1));
  2332. }
  2333. static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
  2334. {
  2335. struct net_device *netdev = adapter->netdev;
  2336. struct ixgbe_hw *hw = &adapter->hw;
  2337. int i, j = 0;
  2338. int num_rx_rings = adapter->num_rx_queues;
  2339. int err;
  2340. int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
  2341. u32 txdctl, rxdctl, mhadd;
  2342. u32 dmatxctl;
  2343. u32 gpie;
  2344. ixgbe_get_hw_control(adapter);
  2345. if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
  2346. (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
  2347. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  2348. gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
  2349. IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
  2350. } else {
  2351. /* MSI only */
  2352. gpie = 0;
  2353. }
  2354. /* XXX: to interrupt immediately for EICS writes, enable this */
  2355. /* gpie |= IXGBE_GPIE_EIMEN; */
  2356. IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
  2357. }
  2358. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
  2359. /* legacy interrupts, use EIAM to auto-mask when reading EICR,
  2360. * specifically only auto mask tx and rx interrupts */
  2361. IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
  2362. }
  2363. /* Enable fan failure interrupt if media type is copper */
  2364. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
  2365. gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
  2366. gpie |= IXGBE_SDP1_GPIEN;
  2367. IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
  2368. }
  2369. if (hw->mac.type == ixgbe_mac_82599EB) {
  2370. gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
  2371. gpie |= IXGBE_SDP1_GPIEN;
  2372. gpie |= IXGBE_SDP2_GPIEN;
  2373. IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
  2374. }
  2375. #ifdef IXGBE_FCOE
  2376. /* adjust max frame to be able to do baby jumbo for FCoE */
  2377. if ((netdev->features & NETIF_F_FCOE_MTU) &&
  2378. (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
  2379. max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
  2380. #endif /* IXGBE_FCOE */
  2381. mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
  2382. if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
  2383. mhadd &= ~IXGBE_MHADD_MFS_MASK;
  2384. mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
  2385. IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
  2386. }
  2387. for (i = 0; i < adapter->num_tx_queues; i++) {
  2388. j = adapter->tx_ring[i].reg_idx;
  2389. txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
  2390. /* enable WTHRESH=8 descriptors, to encourage burst writeback */
  2391. txdctl |= (8 << 16);
  2392. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
  2393. }
  2394. if (hw->mac.type == ixgbe_mac_82599EB) {
  2395. /* DMATXCTL.EN must be set after all Tx queue config is done */
  2396. dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
  2397. dmatxctl |= IXGBE_DMATXCTL_TE;
  2398. IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
  2399. }
  2400. for (i = 0; i < adapter->num_tx_queues; i++) {
  2401. j = adapter->tx_ring[i].reg_idx;
  2402. txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
  2403. txdctl |= IXGBE_TXDCTL_ENABLE;
  2404. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
  2405. }
  2406. for (i = 0; i < num_rx_rings; i++) {
  2407. j = adapter->rx_ring[i].reg_idx;
  2408. rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
  2409. /* enable PTHRESH=32 descriptors (half the internal cache)
  2410. * and HTHRESH=0 descriptors (to minimize latency on fetch),
  2411. * this also removes a pesky rx_no_buffer_count increment */
  2412. rxdctl |= 0x0020;
  2413. rxdctl |= IXGBE_RXDCTL_ENABLE;
  2414. IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
  2415. if (hw->mac.type == ixgbe_mac_82599EB)
  2416. ixgbe_rx_desc_queue_enable(adapter, i);
  2417. }
  2418. /* enable all receives */
  2419. rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  2420. if (hw->mac.type == ixgbe_mac_82598EB)
  2421. rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
  2422. else
  2423. rxdctl |= IXGBE_RXCTRL_RXEN;
  2424. hw->mac.ops.enable_rx_dma(hw, rxdctl);
  2425. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
  2426. ixgbe_configure_msix(adapter);
  2427. else
  2428. ixgbe_configure_msi_and_legacy(adapter);
  2429. clear_bit(__IXGBE_DOWN, &adapter->state);
  2430. ixgbe_napi_enable_all(adapter);
  2431. /* clear any pending interrupts, may auto mask */
  2432. IXGBE_READ_REG(hw, IXGBE_EICR);
  2433. ixgbe_irq_enable(adapter);
  2434. /*
  2435. * If this adapter has a fan, check to see if we had a failure
  2436. * before we enabled the interrupt.
  2437. */
  2438. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
  2439. u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  2440. if (esdp & IXGBE_ESDP_SDP1)
  2441. DPRINTK(DRV, CRIT,
  2442. "Fan has stopped, replace the adapter\n");
  2443. }
  2444. /*
  2445. * For hot-pluggable SFP+ devices, a new SFP+ module may have
  2446. * arrived before interrupts were enabled but after probe. Such
  2447. * devices wouldn't have their type identified yet. We need to
  2448. * kick off the SFP+ module setup first, then try to bring up link.
  2449. * If we're not hot-pluggable SFP+, we just need to configure link
  2450. * and bring it up.
  2451. */
  2452. if (hw->phy.type == ixgbe_phy_unknown) {
  2453. err = hw->phy.ops.identify(hw);
  2454. if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
  2455. /*
  2456. * Take the device down and schedule the sfp tasklet
  2457. * which will unregister_netdev and log it.
  2458. */
  2459. ixgbe_down(adapter);
  2460. schedule_work(&adapter->sfp_config_module_task);
  2461. return err;
  2462. }
  2463. }
  2464. if (ixgbe_is_sfp(hw)) {
  2465. ixgbe_sfp_link_config(adapter);
  2466. } else {
  2467. err = ixgbe_non_sfp_link_config(hw);
  2468. if (err)
  2469. DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
  2470. }
  2471. for (i = 0; i < adapter->num_tx_queues; i++)
  2472. set_bit(__IXGBE_FDIR_INIT_DONE,
  2473. &(adapter->tx_ring[i].reinit_state));
  2474. /* enable transmits */
  2475. netif_tx_start_all_queues(netdev);
  2476. /* bring the link up in the watchdog, this could race with our first
  2477. * link up interrupt but shouldn't be a problem */
  2478. adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
  2479. adapter->link_check_timeout = jiffies;
  2480. mod_timer(&adapter->watchdog_timer, jiffies);
  2481. return 0;
  2482. }
  2483. void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
  2484. {
  2485. WARN_ON(in_interrupt());
  2486. while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
  2487. msleep(1);
  2488. ixgbe_down(adapter);
  2489. ixgbe_up(adapter);
  2490. clear_bit(__IXGBE_RESETTING, &adapter->state);
  2491. }
  2492. int ixgbe_up(struct ixgbe_adapter *adapter)
  2493. {
  2494. /* hardware has been reset, we need to reload some things */
  2495. ixgbe_configure(adapter);
  2496. return ixgbe_up_complete(adapter);
  2497. }
  2498. void ixgbe_reset(struct ixgbe_adapter *adapter)
  2499. {
  2500. struct ixgbe_hw *hw = &adapter->hw;
  2501. int err;
  2502. err = hw->mac.ops.init_hw(hw);
  2503. switch (err) {
  2504. case 0:
  2505. case IXGBE_ERR_SFP_NOT_PRESENT:
  2506. break;
  2507. case IXGBE_ERR_MASTER_REQUESTS_PENDING:
  2508. dev_err(&adapter->pdev->dev, "master disable timed out\n");
  2509. break;
  2510. case IXGBE_ERR_EEPROM_VERSION:
  2511. /* We are running on a pre-production device, log a warning */
  2512. dev_warn(&adapter->pdev->dev, "This device is a pre-production "
  2513. "adapter/LOM. Please be aware there may be issues "
  2514. "associated with your hardware. If you are "
  2515. "experiencing problems please contact your Intel or "
  2516. "hardware representative who provided you with this "
  2517. "hardware.\n");
  2518. break;
  2519. default:
  2520. dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
  2521. }
  2522. /* reprogram the RAR[0] in case user changed it. */
  2523. hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
  2524. }
  2525. /**
  2526. * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
  2527. * @adapter: board private structure
  2528. * @rx_ring: ring to free buffers from
  2529. **/
  2530. static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
  2531. struct ixgbe_ring *rx_ring)
  2532. {
  2533. struct pci_dev *pdev = adapter->pdev;
  2534. unsigned long size;
  2535. unsigned int i;
  2536. /* Free all the Rx ring sk_buffs */
  2537. for (i = 0; i < rx_ring->count; i++) {
  2538. struct ixgbe_rx_buffer *rx_buffer_info;
  2539. rx_buffer_info = &rx_ring->rx_buffer_info[i];
  2540. if (rx_buffer_info->dma) {
  2541. pci_unmap_single(pdev, rx_buffer_info->dma,
  2542. rx_ring->rx_buf_len,
  2543. PCI_DMA_FROMDEVICE);
  2544. rx_buffer_info->dma = 0;
  2545. }
  2546. if (rx_buffer_info->skb) {
  2547. struct sk_buff *skb = rx_buffer_info->skb;
  2548. rx_buffer_info->skb = NULL;
  2549. do {
  2550. struct sk_buff *this = skb;
  2551. skb = skb->prev;
  2552. dev_kfree_skb(this);
  2553. } while (skb);
  2554. }
  2555. if (!rx_buffer_info->page)
  2556. continue;
  2557. if (rx_buffer_info->page_dma) {
  2558. pci_unmap_page(pdev, rx_buffer_info->page_dma,
  2559. PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
  2560. rx_buffer_info->page_dma = 0;
  2561. }
  2562. put_page(rx_buffer_info->page);
  2563. rx_buffer_info->page = NULL;
  2564. rx_buffer_info->page_offset = 0;
  2565. }
  2566. size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
  2567. memset(rx_ring->rx_buffer_info, 0, size);
  2568. /* Zero out the descriptor ring */
  2569. memset(rx_ring->desc, 0, rx_ring->size);
  2570. rx_ring->next_to_clean = 0;
  2571. rx_ring->next_to_use = 0;
  2572. if (rx_ring->head)
  2573. writel(0, adapter->hw.hw_addr + rx_ring->head);
  2574. if (rx_ring->tail)
  2575. writel(0, adapter->hw.hw_addr + rx_ring->tail);
  2576. }
  2577. /**
  2578. * ixgbe_clean_tx_ring - Free Tx Buffers
  2579. * @adapter: board private structure
  2580. * @tx_ring: ring to be cleaned
  2581. **/
  2582. static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
  2583. struct ixgbe_ring *tx_ring)
  2584. {
  2585. struct ixgbe_tx_buffer *tx_buffer_info;
  2586. unsigned long size;
  2587. unsigned int i;
  2588. /* Free all the Tx ring sk_buffs */
  2589. for (i = 0; i < tx_ring->count; i++) {
  2590. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  2591. ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
  2592. }
  2593. size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
  2594. memset(tx_ring->tx_buffer_info, 0, size);
  2595. /* Zero out the descriptor ring */
  2596. memset(tx_ring->desc, 0, tx_ring->size);
  2597. tx_ring->next_to_use = 0;
  2598. tx_ring->next_to_clean = 0;
  2599. if (tx_ring->head)
  2600. writel(0, adapter->hw.hw_addr + tx_ring->head);
  2601. if (tx_ring->tail)
  2602. writel(0, adapter->hw.hw_addr + tx_ring->tail);
  2603. }
  2604. /**
  2605. * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
  2606. * @adapter: board private structure
  2607. **/
  2608. static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
  2609. {
  2610. int i;
  2611. for (i = 0; i < adapter->num_rx_queues; i++)
  2612. ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
  2613. }
  2614. /**
  2615. * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
  2616. * @adapter: board private structure
  2617. **/
  2618. static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
  2619. {
  2620. int i;
  2621. for (i = 0; i < adapter->num_tx_queues; i++)
  2622. ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
  2623. }
  2624. void ixgbe_down(struct ixgbe_adapter *adapter)
  2625. {
  2626. struct net_device *netdev = adapter->netdev;
  2627. struct ixgbe_hw *hw = &adapter->hw;
  2628. u32 rxctrl;
  2629. u32 txdctl;
  2630. int i, j;
  2631. /* signal that we are down to the interrupt handler */
  2632. set_bit(__IXGBE_DOWN, &adapter->state);
  2633. /* disable receives */
  2634. rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  2635. IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
  2636. netif_tx_disable(netdev);
  2637. IXGBE_WRITE_FLUSH(hw);
  2638. msleep(10);
  2639. netif_tx_stop_all_queues(netdev);
  2640. ixgbe_irq_disable(adapter);
  2641. ixgbe_napi_disable_all(adapter);
  2642. clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
  2643. del_timer_sync(&adapter->sfp_timer);
  2644. del_timer_sync(&adapter->watchdog_timer);
  2645. cancel_work_sync(&adapter->watchdog_task);
  2646. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
  2647. adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
  2648. cancel_work_sync(&adapter->fdir_reinit_task);
  2649. /* disable transmits in the hardware now that interrupts are off */
  2650. for (i = 0; i < adapter->num_tx_queues; i++) {
  2651. j = adapter->tx_ring[i].reg_idx;
  2652. txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
  2653. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
  2654. (txdctl & ~IXGBE_TXDCTL_ENABLE));
  2655. }
  2656. /* Disable the Tx DMA engine on 82599 */
  2657. if (hw->mac.type == ixgbe_mac_82599EB)
  2658. IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
  2659. (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
  2660. ~IXGBE_DMATXCTL_TE));
  2661. netif_carrier_off(netdev);
  2662. if (!pci_channel_offline(adapter->pdev))
  2663. ixgbe_reset(adapter);
  2664. ixgbe_clean_all_tx_rings(adapter);
  2665. ixgbe_clean_all_rx_rings(adapter);
  2666. #ifdef CONFIG_IXGBE_DCA
  2667. /* since we reset the hardware DCA settings were cleared */
  2668. ixgbe_setup_dca(adapter);
  2669. #endif
  2670. }
  2671. /**
  2672. * ixgbe_poll - NAPI Rx polling callback
  2673. * @napi: structure for representing this polling device
  2674. * @budget: how many packets driver is allowed to clean
  2675. *
  2676. * This function is used for legacy and MSI, NAPI mode
  2677. **/
  2678. static int ixgbe_poll(struct napi_struct *napi, int budget)
  2679. {
  2680. struct ixgbe_q_vector *q_vector =
  2681. container_of(napi, struct ixgbe_q_vector, napi);
  2682. struct ixgbe_adapter *adapter = q_vector->adapter;
  2683. int tx_clean_complete, work_done = 0;
  2684. #ifdef CONFIG_IXGBE_DCA
  2685. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
  2686. ixgbe_update_tx_dca(adapter, adapter->tx_ring);
  2687. ixgbe_update_rx_dca(adapter, adapter->rx_ring);
  2688. }
  2689. #endif
  2690. tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
  2691. ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
  2692. if (!tx_clean_complete)
  2693. work_done = budget;
  2694. /* If budget not fully consumed, exit the polling mode */
  2695. if (work_done < budget) {
  2696. napi_complete(napi);
  2697. if (adapter->rx_itr_setting & 1)
  2698. ixgbe_set_itr(adapter);
  2699. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2700. ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
  2701. }
  2702. return work_done;
  2703. }
  2704. /**
  2705. * ixgbe_tx_timeout - Respond to a Tx Hang
  2706. * @netdev: network interface device structure
  2707. **/
  2708. static void ixgbe_tx_timeout(struct net_device *netdev)
  2709. {
  2710. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2711. /* Do the reset outside of interrupt context */
  2712. schedule_work(&adapter->reset_task);
  2713. }
  2714. static void ixgbe_reset_task(struct work_struct *work)
  2715. {
  2716. struct ixgbe_adapter *adapter;
  2717. adapter = container_of(work, struct ixgbe_adapter, reset_task);
  2718. /* If we're already down or resetting, just bail */
  2719. if (test_bit(__IXGBE_DOWN, &adapter->state) ||
  2720. test_bit(__IXGBE_RESETTING, &adapter->state))
  2721. return;
  2722. adapter->tx_timeout_count++;
  2723. ixgbe_reinit_locked(adapter);
  2724. }
  2725. #ifdef CONFIG_IXGBE_DCB
  2726. static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
  2727. {
  2728. bool ret = false;
  2729. struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
  2730. if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
  2731. return ret;
  2732. f->mask = 0x7 << 3;
  2733. adapter->num_rx_queues = f->indices;
  2734. adapter->num_tx_queues = f->indices;
  2735. ret = true;
  2736. return ret;
  2737. }
  2738. #endif
  2739. /**
  2740. * ixgbe_set_rss_queues: Allocate queues for RSS
  2741. * @adapter: board private structure to initialize
  2742. *
  2743. * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
  2744. * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
  2745. *
  2746. **/
  2747. static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
  2748. {
  2749. bool ret = false;
  2750. struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
  2751. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
  2752. f->mask = 0xF;
  2753. adapter->num_rx_queues = f->indices;
  2754. adapter->num_tx_queues = f->indices;
  2755. ret = true;
  2756. } else {
  2757. ret = false;
  2758. }
  2759. return ret;
  2760. }
  2761. /**
  2762. * ixgbe_set_fdir_queues: Allocate queues for Flow Director
  2763. * @adapter: board private structure to initialize
  2764. *
  2765. * Flow Director is an advanced Rx filter, attempting to get Rx flows back
  2766. * to the original CPU that initiated the Tx session. This runs in addition
  2767. * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
  2768. * Rx load across CPUs using RSS.
  2769. *
  2770. **/
  2771. static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
  2772. {
  2773. bool ret = false;
  2774. struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
  2775. f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
  2776. f_fdir->mask = 0;
  2777. /* Flow Director must have RSS enabled */
  2778. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
  2779. ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
  2780. (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
  2781. adapter->num_tx_queues = f_fdir->indices;
  2782. adapter->num_rx_queues = f_fdir->indices;
  2783. ret = true;
  2784. } else {
  2785. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  2786. adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
  2787. }
  2788. return ret;
  2789. }
  2790. #ifdef IXGBE_FCOE
  2791. /**
  2792. * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
  2793. * @adapter: board private structure to initialize
  2794. *
  2795. * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
  2796. * The ring feature mask is not used as a mask for FCoE, as it can take any 8
  2797. * rx queues out of the max number of rx queues, instead, it is used as the
  2798. * index of the first rx queue used by FCoE.
  2799. *
  2800. **/
  2801. static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
  2802. {
  2803. bool ret = false;
  2804. struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
  2805. f->indices = min((int)num_online_cpus(), f->indices);
  2806. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  2807. adapter->num_rx_queues = 1;
  2808. adapter->num_tx_queues = 1;
  2809. #ifdef CONFIG_IXGBE_DCB
  2810. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  2811. DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n");
  2812. ixgbe_set_dcb_queues(adapter);
  2813. }
  2814. #endif
  2815. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
  2816. DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n");
  2817. if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
  2818. (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
  2819. ixgbe_set_fdir_queues(adapter);
  2820. else
  2821. ixgbe_set_rss_queues(adapter);
  2822. }
  2823. /* adding FCoE rx rings to the end */
  2824. f->mask = adapter->num_rx_queues;
  2825. adapter->num_rx_queues += f->indices;
  2826. adapter->num_tx_queues += f->indices;
  2827. ret = true;
  2828. }
  2829. return ret;
  2830. }
  2831. #endif /* IXGBE_FCOE */
  2832. /*
  2833. * ixgbe_set_num_queues: Allocate queues for device, feature dependant
  2834. * @adapter: board private structure to initialize
  2835. *
  2836. * This is the top level queue allocation routine. The order here is very
  2837. * important, starting with the "most" number of features turned on at once,
  2838. * and ending with the smallest set of features. This way large combinations
  2839. * can be allocated if they're turned on, and smaller combinations are the
  2840. * fallthrough conditions.
  2841. *
  2842. **/
  2843. static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
  2844. {
  2845. #ifdef IXGBE_FCOE
  2846. if (ixgbe_set_fcoe_queues(adapter))
  2847. goto done;
  2848. #endif /* IXGBE_FCOE */
  2849. #ifdef CONFIG_IXGBE_DCB
  2850. if (ixgbe_set_dcb_queues(adapter))
  2851. goto done;
  2852. #endif
  2853. if (ixgbe_set_fdir_queues(adapter))
  2854. goto done;
  2855. if (ixgbe_set_rss_queues(adapter))
  2856. goto done;
  2857. /* fallback to base case */
  2858. adapter->num_rx_queues = 1;
  2859. adapter->num_tx_queues = 1;
  2860. done:
  2861. /* Notify the stack of the (possibly) reduced Tx Queue count. */
  2862. adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
  2863. }
  2864. static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
  2865. int vectors)
  2866. {
  2867. int err, vector_threshold;
  2868. /* We'll want at least 3 (vector_threshold):
  2869. * 1) TxQ[0] Cleanup
  2870. * 2) RxQ[0] Cleanup
  2871. * 3) Other (Link Status Change, etc.)
  2872. * 4) TCP Timer (optional)
  2873. */
  2874. vector_threshold = MIN_MSIX_COUNT;
  2875. /* The more we get, the more we will assign to Tx/Rx Cleanup
  2876. * for the separate queues...where Rx Cleanup >= Tx Cleanup.
  2877. * Right now, we simply care about how many we'll get; we'll
  2878. * set them up later while requesting irq's.
  2879. */
  2880. while (vectors >= vector_threshold) {
  2881. err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
  2882. vectors);
  2883. if (!err) /* Success in acquiring all requested vectors. */
  2884. break;
  2885. else if (err < 0)
  2886. vectors = 0; /* Nasty failure, quit now */
  2887. else /* err == number of vectors we should try again with */
  2888. vectors = err;
  2889. }
  2890. if (vectors < vector_threshold) {
  2891. /* Can't allocate enough MSI-X interrupts? Oh well.
  2892. * This just means we'll go with either a single MSI
  2893. * vector or fall back to legacy interrupts.
  2894. */
  2895. DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
  2896. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  2897. kfree(adapter->msix_entries);
  2898. adapter->msix_entries = NULL;
  2899. } else {
  2900. adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
  2901. /*
  2902. * Adjust for only the vectors we'll use, which is minimum
  2903. * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
  2904. * vectors we were allocated.
  2905. */
  2906. adapter->num_msix_vectors = min(vectors,
  2907. adapter->max_msix_q_vectors + NON_Q_VECTORS);
  2908. }
  2909. }
  2910. /**
  2911. * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
  2912. * @adapter: board private structure to initialize
  2913. *
  2914. * Cache the descriptor ring offsets for RSS to the assigned rings.
  2915. *
  2916. **/
  2917. static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
  2918. {
  2919. int i;
  2920. bool ret = false;
  2921. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
  2922. for (i = 0; i < adapter->num_rx_queues; i++)
  2923. adapter->rx_ring[i].reg_idx = i;
  2924. for (i = 0; i < adapter->num_tx_queues; i++)
  2925. adapter->tx_ring[i].reg_idx = i;
  2926. ret = true;
  2927. } else {
  2928. ret = false;
  2929. }
  2930. return ret;
  2931. }
  2932. #ifdef CONFIG_IXGBE_DCB
  2933. /**
  2934. * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
  2935. * @adapter: board private structure to initialize
  2936. *
  2937. * Cache the descriptor ring offsets for DCB to the assigned rings.
  2938. *
  2939. **/
  2940. static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
  2941. {
  2942. int i;
  2943. bool ret = false;
  2944. int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
  2945. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  2946. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  2947. /* the number of queues is assumed to be symmetric */
  2948. for (i = 0; i < dcb_i; i++) {
  2949. adapter->rx_ring[i].reg_idx = i << 3;
  2950. adapter->tx_ring[i].reg_idx = i << 2;
  2951. }
  2952. ret = true;
  2953. } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  2954. if (dcb_i == 8) {
  2955. /*
  2956. * Tx TC0 starts at: descriptor queue 0
  2957. * Tx TC1 starts at: descriptor queue 32
  2958. * Tx TC2 starts at: descriptor queue 64
  2959. * Tx TC3 starts at: descriptor queue 80
  2960. * Tx TC4 starts at: descriptor queue 96
  2961. * Tx TC5 starts at: descriptor queue 104
  2962. * Tx TC6 starts at: descriptor queue 112
  2963. * Tx TC7 starts at: descriptor queue 120
  2964. *
  2965. * Rx TC0-TC7 are offset by 16 queues each
  2966. */
  2967. for (i = 0; i < 3; i++) {
  2968. adapter->tx_ring[i].reg_idx = i << 5;
  2969. adapter->rx_ring[i].reg_idx = i << 4;
  2970. }
  2971. for ( ; i < 5; i++) {
  2972. adapter->tx_ring[i].reg_idx =
  2973. ((i + 2) << 4);
  2974. adapter->rx_ring[i].reg_idx = i << 4;
  2975. }
  2976. for ( ; i < dcb_i; i++) {
  2977. adapter->tx_ring[i].reg_idx =
  2978. ((i + 8) << 3);
  2979. adapter->rx_ring[i].reg_idx = i << 4;
  2980. }
  2981. ret = true;
  2982. } else if (dcb_i == 4) {
  2983. /*
  2984. * Tx TC0 starts at: descriptor queue 0
  2985. * Tx TC1 starts at: descriptor queue 64
  2986. * Tx TC2 starts at: descriptor queue 96
  2987. * Tx TC3 starts at: descriptor queue 112
  2988. *
  2989. * Rx TC0-TC3 are offset by 32 queues each
  2990. */
  2991. adapter->tx_ring[0].reg_idx = 0;
  2992. adapter->tx_ring[1].reg_idx = 64;
  2993. adapter->tx_ring[2].reg_idx = 96;
  2994. adapter->tx_ring[3].reg_idx = 112;
  2995. for (i = 0 ; i < dcb_i; i++)
  2996. adapter->rx_ring[i].reg_idx = i << 5;
  2997. ret = true;
  2998. } else {
  2999. ret = false;
  3000. }
  3001. } else {
  3002. ret = false;
  3003. }
  3004. } else {
  3005. ret = false;
  3006. }
  3007. return ret;
  3008. }
  3009. #endif
  3010. /**
  3011. * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
  3012. * @adapter: board private structure to initialize
  3013. *
  3014. * Cache the descriptor ring offsets for Flow Director to the assigned rings.
  3015. *
  3016. **/
  3017. static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
  3018. {
  3019. int i;
  3020. bool ret = false;
  3021. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
  3022. ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
  3023. (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
  3024. for (i = 0; i < adapter->num_rx_queues; i++)
  3025. adapter->rx_ring[i].reg_idx = i;
  3026. for (i = 0; i < adapter->num_tx_queues; i++)
  3027. adapter->tx_ring[i].reg_idx = i;
  3028. ret = true;
  3029. }
  3030. return ret;
  3031. }
  3032. #ifdef IXGBE_FCOE
  3033. /**
  3034. * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
  3035. * @adapter: board private structure to initialize
  3036. *
  3037. * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
  3038. *
  3039. */
  3040. static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
  3041. {
  3042. int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
  3043. bool ret = false;
  3044. struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
  3045. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  3046. #ifdef CONFIG_IXGBE_DCB
  3047. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  3048. struct ixgbe_fcoe *fcoe = &adapter->fcoe;
  3049. ixgbe_cache_ring_dcb(adapter);
  3050. /* find out queues in TC for FCoE */
  3051. fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
  3052. fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
  3053. /*
  3054. * In 82599, the number of Tx queues for each traffic
  3055. * class for both 8-TC and 4-TC modes are:
  3056. * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
  3057. * 8 TCs: 32 32 16 16 8 8 8 8
  3058. * 4 TCs: 64 64 32 32
  3059. * We have max 8 queues for FCoE, where 8 the is
  3060. * FCoE redirection table size. If TC for FCoE is
  3061. * less than or equal to TC3, we have enough queues
  3062. * to add max of 8 queues for FCoE, so we start FCoE
  3063. * tx descriptor from the next one, i.e., reg_idx + 1.
  3064. * If TC for FCoE is above TC3, implying 8 TC mode,
  3065. * and we need 8 for FCoE, we have to take all queues
  3066. * in that traffic class for FCoE.
  3067. */
  3068. if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
  3069. fcoe_tx_i--;
  3070. }
  3071. #endif /* CONFIG_IXGBE_DCB */
  3072. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
  3073. if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
  3074. (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
  3075. ixgbe_cache_ring_fdir(adapter);
  3076. else
  3077. ixgbe_cache_ring_rss(adapter);
  3078. fcoe_rx_i = f->mask;
  3079. fcoe_tx_i = f->mask;
  3080. }
  3081. for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
  3082. adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
  3083. adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
  3084. }
  3085. ret = true;
  3086. }
  3087. return ret;
  3088. }
  3089. #endif /* IXGBE_FCOE */
  3090. /**
  3091. * ixgbe_cache_ring_register - Descriptor ring to register mapping
  3092. * @adapter: board private structure to initialize
  3093. *
  3094. * Once we know the feature-set enabled for the device, we'll cache
  3095. * the register offset the descriptor ring is assigned to.
  3096. *
  3097. * Note, the order the various feature calls is important. It must start with
  3098. * the "most" features enabled at the same time, then trickle down to the
  3099. * least amount of features turned on at once.
  3100. **/
  3101. static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
  3102. {
  3103. /* start with default case */
  3104. adapter->rx_ring[0].reg_idx = 0;
  3105. adapter->tx_ring[0].reg_idx = 0;
  3106. #ifdef IXGBE_FCOE
  3107. if (ixgbe_cache_ring_fcoe(adapter))
  3108. return;
  3109. #endif /* IXGBE_FCOE */
  3110. #ifdef CONFIG_IXGBE_DCB
  3111. if (ixgbe_cache_ring_dcb(adapter))
  3112. return;
  3113. #endif
  3114. if (ixgbe_cache_ring_fdir(adapter))
  3115. return;
  3116. if (ixgbe_cache_ring_rss(adapter))
  3117. return;
  3118. }
  3119. /**
  3120. * ixgbe_alloc_queues - Allocate memory for all rings
  3121. * @adapter: board private structure to initialize
  3122. *
  3123. * We allocate one ring per queue at run-time since we don't know the
  3124. * number of queues at compile-time. The polling_netdev array is
  3125. * intended for Multiqueue, but should work fine with a single queue.
  3126. **/
  3127. static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
  3128. {
  3129. int i;
  3130. adapter->tx_ring = kcalloc(adapter->num_tx_queues,
  3131. sizeof(struct ixgbe_ring), GFP_KERNEL);
  3132. if (!adapter->tx_ring)
  3133. goto err_tx_ring_allocation;
  3134. adapter->rx_ring = kcalloc(adapter->num_rx_queues,
  3135. sizeof(struct ixgbe_ring), GFP_KERNEL);
  3136. if (!adapter->rx_ring)
  3137. goto err_rx_ring_allocation;
  3138. for (i = 0; i < adapter->num_tx_queues; i++) {
  3139. adapter->tx_ring[i].count = adapter->tx_ring_count;
  3140. adapter->tx_ring[i].queue_index = i;
  3141. }
  3142. for (i = 0; i < adapter->num_rx_queues; i++) {
  3143. adapter->rx_ring[i].count = adapter->rx_ring_count;
  3144. adapter->rx_ring[i].queue_index = i;
  3145. }
  3146. ixgbe_cache_ring_register(adapter);
  3147. return 0;
  3148. err_rx_ring_allocation:
  3149. kfree(adapter->tx_ring);
  3150. err_tx_ring_allocation:
  3151. return -ENOMEM;
  3152. }
  3153. /**
  3154. * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
  3155. * @adapter: board private structure to initialize
  3156. *
  3157. * Attempt to configure the interrupts using the best available
  3158. * capabilities of the hardware and the kernel.
  3159. **/
  3160. static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
  3161. {
  3162. struct ixgbe_hw *hw = &adapter->hw;
  3163. int err = 0;
  3164. int vector, v_budget;
  3165. /*
  3166. * It's easy to be greedy for MSI-X vectors, but it really
  3167. * doesn't do us much good if we have a lot more vectors
  3168. * than CPU's. So let's be conservative and only ask for
  3169. * (roughly) twice the number of vectors as there are CPU's.
  3170. */
  3171. v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
  3172. (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
  3173. /*
  3174. * At the same time, hardware can only support a maximum of
  3175. * hw.mac->max_msix_vectors vectors. With features
  3176. * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
  3177. * descriptor queues supported by our device. Thus, we cap it off in
  3178. * those rare cases where the cpu count also exceeds our vector limit.
  3179. */
  3180. v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
  3181. /* A failure in MSI-X entry allocation isn't fatal, but it does
  3182. * mean we disable MSI-X capabilities of the adapter. */
  3183. adapter->msix_entries = kcalloc(v_budget,
  3184. sizeof(struct msix_entry), GFP_KERNEL);
  3185. if (adapter->msix_entries) {
  3186. for (vector = 0; vector < v_budget; vector++)
  3187. adapter->msix_entries[vector].entry = vector;
  3188. ixgbe_acquire_msix_vectors(adapter, v_budget);
  3189. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
  3190. goto out;
  3191. }
  3192. adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
  3193. adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
  3194. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  3195. adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
  3196. adapter->atr_sample_rate = 0;
  3197. ixgbe_set_num_queues(adapter);
  3198. err = pci_enable_msi(adapter->pdev);
  3199. if (!err) {
  3200. adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
  3201. } else {
  3202. DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
  3203. "falling back to legacy. Error: %d\n", err);
  3204. /* reset err */
  3205. err = 0;
  3206. }
  3207. out:
  3208. return err;
  3209. }
  3210. /**
  3211. * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
  3212. * @adapter: board private structure to initialize
  3213. *
  3214. * We allocate one q_vector per queue interrupt. If allocation fails we
  3215. * return -ENOMEM.
  3216. **/
  3217. static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
  3218. {
  3219. int q_idx, num_q_vectors;
  3220. struct ixgbe_q_vector *q_vector;
  3221. int napi_vectors;
  3222. int (*poll)(struct napi_struct *, int);
  3223. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  3224. num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  3225. napi_vectors = adapter->num_rx_queues;
  3226. poll = &ixgbe_clean_rxtx_many;
  3227. } else {
  3228. num_q_vectors = 1;
  3229. napi_vectors = 1;
  3230. poll = &ixgbe_poll;
  3231. }
  3232. for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
  3233. q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
  3234. if (!q_vector)
  3235. goto err_out;
  3236. q_vector->adapter = adapter;
  3237. if (q_vector->txr_count && !q_vector->rxr_count)
  3238. q_vector->eitr = adapter->tx_eitr_param;
  3239. else
  3240. q_vector->eitr = adapter->rx_eitr_param;
  3241. q_vector->v_idx = q_idx;
  3242. netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
  3243. adapter->q_vector[q_idx] = q_vector;
  3244. }
  3245. return 0;
  3246. err_out:
  3247. while (q_idx) {
  3248. q_idx--;
  3249. q_vector = adapter->q_vector[q_idx];
  3250. netif_napi_del(&q_vector->napi);
  3251. kfree(q_vector);
  3252. adapter->q_vector[q_idx] = NULL;
  3253. }
  3254. return -ENOMEM;
  3255. }
  3256. /**
  3257. * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
  3258. * @adapter: board private structure to initialize
  3259. *
  3260. * This function frees the memory allocated to the q_vectors. In addition if
  3261. * NAPI is enabled it will delete any references to the NAPI struct prior
  3262. * to freeing the q_vector.
  3263. **/
  3264. static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
  3265. {
  3266. int q_idx, num_q_vectors;
  3267. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
  3268. num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  3269. else
  3270. num_q_vectors = 1;
  3271. for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
  3272. struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
  3273. adapter->q_vector[q_idx] = NULL;
  3274. netif_napi_del(&q_vector->napi);
  3275. kfree(q_vector);
  3276. }
  3277. }
  3278. static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
  3279. {
  3280. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  3281. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  3282. pci_disable_msix(adapter->pdev);
  3283. kfree(adapter->msix_entries);
  3284. adapter->msix_entries = NULL;
  3285. } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
  3286. adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
  3287. pci_disable_msi(adapter->pdev);
  3288. }
  3289. return;
  3290. }
  3291. /**
  3292. * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
  3293. * @adapter: board private structure to initialize
  3294. *
  3295. * We determine which interrupt scheme to use based on...
  3296. * - Kernel support (MSI, MSI-X)
  3297. * - which can be user-defined (via MODULE_PARAM)
  3298. * - Hardware queue count (num_*_queues)
  3299. * - defined by miscellaneous hardware support/features (RSS, etc.)
  3300. **/
  3301. int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
  3302. {
  3303. int err;
  3304. /* Number of supported queues */
  3305. ixgbe_set_num_queues(adapter);
  3306. err = ixgbe_set_interrupt_capability(adapter);
  3307. if (err) {
  3308. DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
  3309. goto err_set_interrupt;
  3310. }
  3311. err = ixgbe_alloc_q_vectors(adapter);
  3312. if (err) {
  3313. DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
  3314. "vectors\n");
  3315. goto err_alloc_q_vectors;
  3316. }
  3317. err = ixgbe_alloc_queues(adapter);
  3318. if (err) {
  3319. DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
  3320. goto err_alloc_queues;
  3321. }
  3322. DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
  3323. "Tx Queue count = %u\n",
  3324. (adapter->num_rx_queues > 1) ? "Enabled" :
  3325. "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
  3326. set_bit(__IXGBE_DOWN, &adapter->state);
  3327. return 0;
  3328. err_alloc_queues:
  3329. ixgbe_free_q_vectors(adapter);
  3330. err_alloc_q_vectors:
  3331. ixgbe_reset_interrupt_capability(adapter);
  3332. err_set_interrupt:
  3333. return err;
  3334. }
  3335. /**
  3336. * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
  3337. * @adapter: board private structure to clear interrupt scheme on
  3338. *
  3339. * We go through and clear interrupt specific resources and reset the structure
  3340. * to pre-load conditions
  3341. **/
  3342. void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
  3343. {
  3344. kfree(adapter->tx_ring);
  3345. kfree(adapter->rx_ring);
  3346. adapter->tx_ring = NULL;
  3347. adapter->rx_ring = NULL;
  3348. ixgbe_free_q_vectors(adapter);
  3349. ixgbe_reset_interrupt_capability(adapter);
  3350. }
  3351. /**
  3352. * ixgbe_sfp_timer - worker thread to find a missing module
  3353. * @data: pointer to our adapter struct
  3354. **/
  3355. static void ixgbe_sfp_timer(unsigned long data)
  3356. {
  3357. struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
  3358. /*
  3359. * Do the sfp_timer outside of interrupt context due to the
  3360. * delays that sfp+ detection requires
  3361. */
  3362. schedule_work(&adapter->sfp_task);
  3363. }
  3364. /**
  3365. * ixgbe_sfp_task - worker thread to find a missing module
  3366. * @work: pointer to work_struct containing our data
  3367. **/
  3368. static void ixgbe_sfp_task(struct work_struct *work)
  3369. {
  3370. struct ixgbe_adapter *adapter = container_of(work,
  3371. struct ixgbe_adapter,
  3372. sfp_task);
  3373. struct ixgbe_hw *hw = &adapter->hw;
  3374. if ((hw->phy.type == ixgbe_phy_nl) &&
  3375. (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
  3376. s32 ret = hw->phy.ops.identify_sfp(hw);
  3377. if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
  3378. goto reschedule;
  3379. ret = hw->phy.ops.reset(hw);
  3380. if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
  3381. dev_err(&adapter->pdev->dev, "failed to initialize "
  3382. "because an unsupported SFP+ module type "
  3383. "was detected.\n"
  3384. "Reload the driver after installing a "
  3385. "supported module.\n");
  3386. unregister_netdev(adapter->netdev);
  3387. } else {
  3388. DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
  3389. hw->phy.sfp_type);
  3390. }
  3391. /* don't need this routine any more */
  3392. clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
  3393. }
  3394. return;
  3395. reschedule:
  3396. if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
  3397. mod_timer(&adapter->sfp_timer,
  3398. round_jiffies(jiffies + (2 * HZ)));
  3399. }
  3400. /**
  3401. * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
  3402. * @adapter: board private structure to initialize
  3403. *
  3404. * ixgbe_sw_init initializes the Adapter private data structure.
  3405. * Fields are initialized based on PCI device information and
  3406. * OS network device settings (MTU size).
  3407. **/
  3408. static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
  3409. {
  3410. struct ixgbe_hw *hw = &adapter->hw;
  3411. struct pci_dev *pdev = adapter->pdev;
  3412. unsigned int rss;
  3413. #ifdef CONFIG_IXGBE_DCB
  3414. int j;
  3415. struct tc_configuration *tc;
  3416. #endif
  3417. /* PCI config space info */
  3418. hw->vendor_id = pdev->vendor;
  3419. hw->device_id = pdev->device;
  3420. hw->revision_id = pdev->revision;
  3421. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  3422. hw->subsystem_device_id = pdev->subsystem_device;
  3423. /* Set capability flags */
  3424. rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
  3425. adapter->ring_feature[RING_F_RSS].indices = rss;
  3426. adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
  3427. adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
  3428. if (hw->mac.type == ixgbe_mac_82598EB) {
  3429. if (hw->device_id == IXGBE_DEV_ID_82598AT)
  3430. adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
  3431. adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
  3432. } else if (hw->mac.type == ixgbe_mac_82599EB) {
  3433. adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
  3434. adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
  3435. adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
  3436. adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
  3437. adapter->ring_feature[RING_F_FDIR].indices =
  3438. IXGBE_MAX_FDIR_INDICES;
  3439. adapter->atr_sample_rate = 20;
  3440. adapter->fdir_pballoc = 0;
  3441. #ifdef IXGBE_FCOE
  3442. adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
  3443. adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
  3444. adapter->ring_feature[RING_F_FCOE].indices = 0;
  3445. /* Default traffic class to use for FCoE */
  3446. adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
  3447. #endif /* IXGBE_FCOE */
  3448. }
  3449. #ifdef CONFIG_IXGBE_DCB
  3450. /* Configure DCB traffic classes */
  3451. for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
  3452. tc = &adapter->dcb_cfg.tc_config[j];
  3453. tc->path[DCB_TX_CONFIG].bwg_id = 0;
  3454. tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
  3455. tc->path[DCB_RX_CONFIG].bwg_id = 0;
  3456. tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
  3457. tc->dcb_pfc = pfc_disabled;
  3458. }
  3459. adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
  3460. adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
  3461. adapter->dcb_cfg.rx_pba_cfg = pba_equal;
  3462. adapter->dcb_cfg.pfc_mode_enable = false;
  3463. adapter->dcb_cfg.round_robin_enable = false;
  3464. adapter->dcb_set_bitmap = 0x00;
  3465. ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
  3466. adapter->ring_feature[RING_F_DCB].indices);
  3467. #endif
  3468. /* default flow control settings */
  3469. hw->fc.requested_mode = ixgbe_fc_full;
  3470. hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
  3471. #ifdef CONFIG_DCB
  3472. adapter->last_lfc_mode = hw->fc.current_mode;
  3473. #endif
  3474. hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
  3475. hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
  3476. hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
  3477. hw->fc.send_xon = true;
  3478. hw->fc.disable_fc_autoneg = false;
  3479. /* enable itr by default in dynamic mode */
  3480. adapter->rx_itr_setting = 1;
  3481. adapter->rx_eitr_param = 20000;
  3482. adapter->tx_itr_setting = 1;
  3483. adapter->tx_eitr_param = 10000;
  3484. /* set defaults for eitr in MegaBytes */
  3485. adapter->eitr_low = 10;
  3486. adapter->eitr_high = 20;
  3487. /* set default ring sizes */
  3488. adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
  3489. adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
  3490. /* initialize eeprom parameters */
  3491. if (ixgbe_init_eeprom_params_generic(hw)) {
  3492. dev_err(&pdev->dev, "EEPROM initialization failed\n");
  3493. return -EIO;
  3494. }
  3495. /* enable rx csum by default */
  3496. adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
  3497. set_bit(__IXGBE_DOWN, &adapter->state);
  3498. return 0;
  3499. }
  3500. /**
  3501. * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
  3502. * @adapter: board private structure
  3503. * @tx_ring: tx descriptor ring (for a specific queue) to setup
  3504. *
  3505. * Return 0 on success, negative on failure
  3506. **/
  3507. int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
  3508. struct ixgbe_ring *tx_ring)
  3509. {
  3510. struct pci_dev *pdev = adapter->pdev;
  3511. int size;
  3512. size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
  3513. tx_ring->tx_buffer_info = vmalloc(size);
  3514. if (!tx_ring->tx_buffer_info)
  3515. goto err;
  3516. memset(tx_ring->tx_buffer_info, 0, size);
  3517. /* round up to nearest 4K */
  3518. tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
  3519. tx_ring->size = ALIGN(tx_ring->size, 4096);
  3520. tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
  3521. &tx_ring->dma);
  3522. if (!tx_ring->desc)
  3523. goto err;
  3524. tx_ring->next_to_use = 0;
  3525. tx_ring->next_to_clean = 0;
  3526. tx_ring->work_limit = tx_ring->count;
  3527. return 0;
  3528. err:
  3529. vfree(tx_ring->tx_buffer_info);
  3530. tx_ring->tx_buffer_info = NULL;
  3531. DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
  3532. "descriptor ring\n");
  3533. return -ENOMEM;
  3534. }
  3535. /**
  3536. * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
  3537. * @adapter: board private structure
  3538. *
  3539. * If this function returns with an error, then it's possible one or
  3540. * more of the rings is populated (while the rest are not). It is the
  3541. * callers duty to clean those orphaned rings.
  3542. *
  3543. * Return 0 on success, negative on failure
  3544. **/
  3545. static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
  3546. {
  3547. int i, err = 0;
  3548. for (i = 0; i < adapter->num_tx_queues; i++) {
  3549. err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
  3550. if (!err)
  3551. continue;
  3552. DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
  3553. break;
  3554. }
  3555. return err;
  3556. }
  3557. /**
  3558. * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
  3559. * @adapter: board private structure
  3560. * @rx_ring: rx descriptor ring (for a specific queue) to setup
  3561. *
  3562. * Returns 0 on success, negative on failure
  3563. **/
  3564. int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
  3565. struct ixgbe_ring *rx_ring)
  3566. {
  3567. struct pci_dev *pdev = adapter->pdev;
  3568. int size;
  3569. size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
  3570. rx_ring->rx_buffer_info = vmalloc(size);
  3571. if (!rx_ring->rx_buffer_info) {
  3572. DPRINTK(PROBE, ERR,
  3573. "vmalloc allocation failed for the rx desc ring\n");
  3574. goto alloc_failed;
  3575. }
  3576. memset(rx_ring->rx_buffer_info, 0, size);
  3577. /* Round up to nearest 4K */
  3578. rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
  3579. rx_ring->size = ALIGN(rx_ring->size, 4096);
  3580. rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
  3581. if (!rx_ring->desc) {
  3582. DPRINTK(PROBE, ERR,
  3583. "Memory allocation failed for the rx desc ring\n");
  3584. vfree(rx_ring->rx_buffer_info);
  3585. goto alloc_failed;
  3586. }
  3587. rx_ring->next_to_clean = 0;
  3588. rx_ring->next_to_use = 0;
  3589. return 0;
  3590. alloc_failed:
  3591. return -ENOMEM;
  3592. }
  3593. /**
  3594. * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
  3595. * @adapter: board private structure
  3596. *
  3597. * If this function returns with an error, then it's possible one or
  3598. * more of the rings is populated (while the rest are not). It is the
  3599. * callers duty to clean those orphaned rings.
  3600. *
  3601. * Return 0 on success, negative on failure
  3602. **/
  3603. static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
  3604. {
  3605. int i, err = 0;
  3606. for (i = 0; i < adapter->num_rx_queues; i++) {
  3607. err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
  3608. if (!err)
  3609. continue;
  3610. DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
  3611. break;
  3612. }
  3613. return err;
  3614. }
  3615. /**
  3616. * ixgbe_free_tx_resources - Free Tx Resources per Queue
  3617. * @adapter: board private structure
  3618. * @tx_ring: Tx descriptor ring for a specific queue
  3619. *
  3620. * Free all transmit software resources
  3621. **/
  3622. void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
  3623. struct ixgbe_ring *tx_ring)
  3624. {
  3625. struct pci_dev *pdev = adapter->pdev;
  3626. ixgbe_clean_tx_ring(adapter, tx_ring);
  3627. vfree(tx_ring->tx_buffer_info);
  3628. tx_ring->tx_buffer_info = NULL;
  3629. pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
  3630. tx_ring->desc = NULL;
  3631. }
  3632. /**
  3633. * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
  3634. * @adapter: board private structure
  3635. *
  3636. * Free all transmit software resources
  3637. **/
  3638. static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
  3639. {
  3640. int i;
  3641. for (i = 0; i < adapter->num_tx_queues; i++)
  3642. if (adapter->tx_ring[i].desc)
  3643. ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
  3644. }
  3645. /**
  3646. * ixgbe_free_rx_resources - Free Rx Resources
  3647. * @adapter: board private structure
  3648. * @rx_ring: ring to clean the resources from
  3649. *
  3650. * Free all receive software resources
  3651. **/
  3652. void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
  3653. struct ixgbe_ring *rx_ring)
  3654. {
  3655. struct pci_dev *pdev = adapter->pdev;
  3656. ixgbe_clean_rx_ring(adapter, rx_ring);
  3657. vfree(rx_ring->rx_buffer_info);
  3658. rx_ring->rx_buffer_info = NULL;
  3659. pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
  3660. rx_ring->desc = NULL;
  3661. }
  3662. /**
  3663. * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
  3664. * @adapter: board private structure
  3665. *
  3666. * Free all receive software resources
  3667. **/
  3668. static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
  3669. {
  3670. int i;
  3671. for (i = 0; i < adapter->num_rx_queues; i++)
  3672. if (adapter->rx_ring[i].desc)
  3673. ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
  3674. }
  3675. /**
  3676. * ixgbe_change_mtu - Change the Maximum Transfer Unit
  3677. * @netdev: network interface device structure
  3678. * @new_mtu: new value for maximum frame size
  3679. *
  3680. * Returns 0 on success, negative on failure
  3681. **/
  3682. static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
  3683. {
  3684. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3685. int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
  3686. /* MTU < 68 is an error and causes problems on some kernels */
  3687. if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
  3688. return -EINVAL;
  3689. DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
  3690. netdev->mtu, new_mtu);
  3691. /* must set new MTU before calling down or up */
  3692. netdev->mtu = new_mtu;
  3693. if (netif_running(netdev))
  3694. ixgbe_reinit_locked(adapter);
  3695. return 0;
  3696. }
  3697. /**
  3698. * ixgbe_open - Called when a network interface is made active
  3699. * @netdev: network interface device structure
  3700. *
  3701. * Returns 0 on success, negative value on failure
  3702. *
  3703. * The open entry point is called when a network interface is made
  3704. * active by the system (IFF_UP). At this point all resources needed
  3705. * for transmit and receive operations are allocated, the interrupt
  3706. * handler is registered with the OS, the watchdog timer is started,
  3707. * and the stack is notified that the interface is ready.
  3708. **/
  3709. static int ixgbe_open(struct net_device *netdev)
  3710. {
  3711. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3712. int err;
  3713. /* disallow open during test */
  3714. if (test_bit(__IXGBE_TESTING, &adapter->state))
  3715. return -EBUSY;
  3716. netif_carrier_off(netdev);
  3717. /* allocate transmit descriptors */
  3718. err = ixgbe_setup_all_tx_resources(adapter);
  3719. if (err)
  3720. goto err_setup_tx;
  3721. /* allocate receive descriptors */
  3722. err = ixgbe_setup_all_rx_resources(adapter);
  3723. if (err)
  3724. goto err_setup_rx;
  3725. ixgbe_configure(adapter);
  3726. err = ixgbe_request_irq(adapter);
  3727. if (err)
  3728. goto err_req_irq;
  3729. err = ixgbe_up_complete(adapter);
  3730. if (err)
  3731. goto err_up;
  3732. netif_tx_start_all_queues(netdev);
  3733. return 0;
  3734. err_up:
  3735. ixgbe_release_hw_control(adapter);
  3736. ixgbe_free_irq(adapter);
  3737. err_req_irq:
  3738. err_setup_rx:
  3739. ixgbe_free_all_rx_resources(adapter);
  3740. err_setup_tx:
  3741. ixgbe_free_all_tx_resources(adapter);
  3742. ixgbe_reset(adapter);
  3743. return err;
  3744. }
  3745. /**
  3746. * ixgbe_close - Disables a network interface
  3747. * @netdev: network interface device structure
  3748. *
  3749. * Returns 0, this is not allowed to fail
  3750. *
  3751. * The close entry point is called when an interface is de-activated
  3752. * by the OS. The hardware is still under the drivers control, but
  3753. * needs to be disabled. A global MAC reset is issued to stop the
  3754. * hardware, and all transmit and receive resources are freed.
  3755. **/
  3756. static int ixgbe_close(struct net_device *netdev)
  3757. {
  3758. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3759. ixgbe_down(adapter);
  3760. ixgbe_free_irq(adapter);
  3761. ixgbe_free_all_tx_resources(adapter);
  3762. ixgbe_free_all_rx_resources(adapter);
  3763. ixgbe_release_hw_control(adapter);
  3764. return 0;
  3765. }
  3766. #ifdef CONFIG_PM
  3767. static int ixgbe_resume(struct pci_dev *pdev)
  3768. {
  3769. struct net_device *netdev = pci_get_drvdata(pdev);
  3770. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3771. u32 err;
  3772. pci_set_power_state(pdev, PCI_D0);
  3773. pci_restore_state(pdev);
  3774. err = pci_enable_device_mem(pdev);
  3775. if (err) {
  3776. printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
  3777. "suspend\n");
  3778. return err;
  3779. }
  3780. pci_set_master(pdev);
  3781. pci_wake_from_d3(pdev, false);
  3782. err = ixgbe_init_interrupt_scheme(adapter);
  3783. if (err) {
  3784. printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
  3785. "device\n");
  3786. return err;
  3787. }
  3788. ixgbe_reset(adapter);
  3789. IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
  3790. if (netif_running(netdev)) {
  3791. err = ixgbe_open(adapter->netdev);
  3792. if (err)
  3793. return err;
  3794. }
  3795. netif_device_attach(netdev);
  3796. return 0;
  3797. }
  3798. #endif /* CONFIG_PM */
  3799. static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
  3800. {
  3801. struct net_device *netdev = pci_get_drvdata(pdev);
  3802. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3803. struct ixgbe_hw *hw = &adapter->hw;
  3804. u32 ctrl, fctrl;
  3805. u32 wufc = adapter->wol;
  3806. #ifdef CONFIG_PM
  3807. int retval = 0;
  3808. #endif
  3809. netif_device_detach(netdev);
  3810. if (netif_running(netdev)) {
  3811. ixgbe_down(adapter);
  3812. ixgbe_free_irq(adapter);
  3813. ixgbe_free_all_tx_resources(adapter);
  3814. ixgbe_free_all_rx_resources(adapter);
  3815. }
  3816. ixgbe_clear_interrupt_scheme(adapter);
  3817. #ifdef CONFIG_PM
  3818. retval = pci_save_state(pdev);
  3819. if (retval)
  3820. return retval;
  3821. #endif
  3822. if (wufc) {
  3823. ixgbe_set_rx_mode(netdev);
  3824. /* turn on all-multi mode if wake on multicast is enabled */
  3825. if (wufc & IXGBE_WUFC_MC) {
  3826. fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  3827. fctrl |= IXGBE_FCTRL_MPE;
  3828. IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
  3829. }
  3830. ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
  3831. ctrl |= IXGBE_CTRL_GIO_DIS;
  3832. IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
  3833. IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
  3834. } else {
  3835. IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
  3836. IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
  3837. }
  3838. if (wufc && hw->mac.type == ixgbe_mac_82599EB)
  3839. pci_wake_from_d3(pdev, true);
  3840. else
  3841. pci_wake_from_d3(pdev, false);
  3842. *enable_wake = !!wufc;
  3843. ixgbe_release_hw_control(adapter);
  3844. pci_disable_device(pdev);
  3845. return 0;
  3846. }
  3847. #ifdef CONFIG_PM
  3848. static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
  3849. {
  3850. int retval;
  3851. bool wake;
  3852. retval = __ixgbe_shutdown(pdev, &wake);
  3853. if (retval)
  3854. return retval;
  3855. if (wake) {
  3856. pci_prepare_to_sleep(pdev);
  3857. } else {
  3858. pci_wake_from_d3(pdev, false);
  3859. pci_set_power_state(pdev, PCI_D3hot);
  3860. }
  3861. return 0;
  3862. }
  3863. #endif /* CONFIG_PM */
  3864. static void ixgbe_shutdown(struct pci_dev *pdev)
  3865. {
  3866. bool wake;
  3867. __ixgbe_shutdown(pdev, &wake);
  3868. if (system_state == SYSTEM_POWER_OFF) {
  3869. pci_wake_from_d3(pdev, wake);
  3870. pci_set_power_state(pdev, PCI_D3hot);
  3871. }
  3872. }
  3873. /**
  3874. * ixgbe_update_stats - Update the board statistics counters.
  3875. * @adapter: board private structure
  3876. **/
  3877. void ixgbe_update_stats(struct ixgbe_adapter *adapter)
  3878. {
  3879. struct ixgbe_hw *hw = &adapter->hw;
  3880. u64 total_mpc = 0;
  3881. u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
  3882. if (hw->mac.type == ixgbe_mac_82599EB) {
  3883. u64 rsc_count = 0;
  3884. for (i = 0; i < 16; i++)
  3885. adapter->hw_rx_no_dma_resources +=
  3886. IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
  3887. for (i = 0; i < adapter->num_rx_queues; i++)
  3888. rsc_count += adapter->rx_ring[i].rsc_count;
  3889. adapter->rsc_count = rsc_count;
  3890. }
  3891. adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
  3892. for (i = 0; i < 8; i++) {
  3893. /* for packet buffers not used, the register should read 0 */
  3894. mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
  3895. missed_rx += mpc;
  3896. adapter->stats.mpc[i] += mpc;
  3897. total_mpc += adapter->stats.mpc[i];
  3898. if (hw->mac.type == ixgbe_mac_82598EB)
  3899. adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
  3900. adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
  3901. adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
  3902. adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
  3903. adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
  3904. if (hw->mac.type == ixgbe_mac_82599EB) {
  3905. adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
  3906. IXGBE_PXONRXCNT(i));
  3907. adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
  3908. IXGBE_PXOFFRXCNT(i));
  3909. adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
  3910. } else {
  3911. adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
  3912. IXGBE_PXONRXC(i));
  3913. adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
  3914. IXGBE_PXOFFRXC(i));
  3915. }
  3916. adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
  3917. IXGBE_PXONTXC(i));
  3918. adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
  3919. IXGBE_PXOFFTXC(i));
  3920. }
  3921. adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
  3922. /* work around hardware counting issue */
  3923. adapter->stats.gprc -= missed_rx;
  3924. /* 82598 hardware only has a 32 bit counter in the high register */
  3925. if (hw->mac.type == ixgbe_mac_82599EB) {
  3926. u64 tmp;
  3927. adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
  3928. tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
  3929. adapter->stats.gorc += (tmp << 32);
  3930. adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
  3931. tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
  3932. adapter->stats.gotc += (tmp << 32);
  3933. adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
  3934. IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
  3935. adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
  3936. adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
  3937. adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
  3938. adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
  3939. #ifdef IXGBE_FCOE
  3940. adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
  3941. adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
  3942. adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
  3943. adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
  3944. adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
  3945. adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
  3946. #endif /* IXGBE_FCOE */
  3947. } else {
  3948. adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
  3949. adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
  3950. adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
  3951. adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
  3952. adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
  3953. }
  3954. bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
  3955. adapter->stats.bprc += bprc;
  3956. adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
  3957. if (hw->mac.type == ixgbe_mac_82598EB)
  3958. adapter->stats.mprc -= bprc;
  3959. adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
  3960. adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
  3961. adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
  3962. adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
  3963. adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
  3964. adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
  3965. adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
  3966. adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
  3967. lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
  3968. adapter->stats.lxontxc += lxon;
  3969. lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
  3970. adapter->stats.lxofftxc += lxoff;
  3971. adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
  3972. adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
  3973. adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
  3974. /*
  3975. * 82598 errata - tx of flow control packets is included in tx counters
  3976. */
  3977. xon_off_tot = lxon + lxoff;
  3978. adapter->stats.gptc -= xon_off_tot;
  3979. adapter->stats.mptc -= xon_off_tot;
  3980. adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
  3981. adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
  3982. adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
  3983. adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
  3984. adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
  3985. adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
  3986. adapter->stats.ptc64 -= xon_off_tot;
  3987. adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
  3988. adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
  3989. adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
  3990. adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
  3991. adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
  3992. adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
  3993. /* Fill out the OS statistics structure */
  3994. adapter->net_stats.multicast = adapter->stats.mprc;
  3995. /* Rx Errors */
  3996. adapter->net_stats.rx_errors = adapter->stats.crcerrs +
  3997. adapter->stats.rlec;
  3998. adapter->net_stats.rx_dropped = 0;
  3999. adapter->net_stats.rx_length_errors = adapter->stats.rlec;
  4000. adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
  4001. adapter->net_stats.rx_missed_errors = total_mpc;
  4002. }
  4003. /**
  4004. * ixgbe_watchdog - Timer Call-back
  4005. * @data: pointer to adapter cast into an unsigned long
  4006. **/
  4007. static void ixgbe_watchdog(unsigned long data)
  4008. {
  4009. struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
  4010. struct ixgbe_hw *hw = &adapter->hw;
  4011. u64 eics = 0;
  4012. int i;
  4013. /*
  4014. * Do the watchdog outside of interrupt context due to the lovely
  4015. * delays that some of the newer hardware requires
  4016. */
  4017. if (test_bit(__IXGBE_DOWN, &adapter->state))
  4018. goto watchdog_short_circuit;
  4019. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
  4020. /*
  4021. * for legacy and MSI interrupts don't set any bits
  4022. * that are enabled for EIAM, because this operation
  4023. * would set *both* EIMS and EICS for any bit in EIAM
  4024. */
  4025. IXGBE_WRITE_REG(hw, IXGBE_EICS,
  4026. (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
  4027. goto watchdog_reschedule;
  4028. }
  4029. /* get one bit for every active tx/rx interrupt vector */
  4030. for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
  4031. struct ixgbe_q_vector *qv = adapter->q_vector[i];
  4032. if (qv->rxr_count || qv->txr_count)
  4033. eics |= ((u64)1 << i);
  4034. }
  4035. /* Cause software interrupt to ensure rx rings are cleaned */
  4036. ixgbe_irq_rearm_queues(adapter, eics);
  4037. watchdog_reschedule:
  4038. /* Reset the timer */
  4039. mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
  4040. watchdog_short_circuit:
  4041. schedule_work(&adapter->watchdog_task);
  4042. }
  4043. /**
  4044. * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
  4045. * @work: pointer to work_struct containing our data
  4046. **/
  4047. static void ixgbe_multispeed_fiber_task(struct work_struct *work)
  4048. {
  4049. struct ixgbe_adapter *adapter = container_of(work,
  4050. struct ixgbe_adapter,
  4051. multispeed_fiber_task);
  4052. struct ixgbe_hw *hw = &adapter->hw;
  4053. u32 autoneg;
  4054. bool negotiation;
  4055. adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
  4056. autoneg = hw->phy.autoneg_advertised;
  4057. if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
  4058. hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
  4059. if (hw->mac.ops.setup_link)
  4060. hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
  4061. adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
  4062. adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
  4063. }
  4064. /**
  4065. * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
  4066. * @work: pointer to work_struct containing our data
  4067. **/
  4068. static void ixgbe_sfp_config_module_task(struct work_struct *work)
  4069. {
  4070. struct ixgbe_adapter *adapter = container_of(work,
  4071. struct ixgbe_adapter,
  4072. sfp_config_module_task);
  4073. struct ixgbe_hw *hw = &adapter->hw;
  4074. u32 err;
  4075. adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
  4076. /* Time for electrical oscillations to settle down */
  4077. msleep(100);
  4078. err = hw->phy.ops.identify_sfp(hw);
  4079. if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
  4080. dev_err(&adapter->pdev->dev, "failed to initialize because "
  4081. "an unsupported SFP+ module type was detected.\n"
  4082. "Reload the driver after installing a supported "
  4083. "module.\n");
  4084. unregister_netdev(adapter->netdev);
  4085. return;
  4086. }
  4087. hw->mac.ops.setup_sfp(hw);
  4088. if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
  4089. /* This will also work for DA Twinax connections */
  4090. schedule_work(&adapter->multispeed_fiber_task);
  4091. adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
  4092. }
  4093. /**
  4094. * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
  4095. * @work: pointer to work_struct containing our data
  4096. **/
  4097. static void ixgbe_fdir_reinit_task(struct work_struct *work)
  4098. {
  4099. struct ixgbe_adapter *adapter = container_of(work,
  4100. struct ixgbe_adapter,
  4101. fdir_reinit_task);
  4102. struct ixgbe_hw *hw = &adapter->hw;
  4103. int i;
  4104. if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
  4105. for (i = 0; i < adapter->num_tx_queues; i++)
  4106. set_bit(__IXGBE_FDIR_INIT_DONE,
  4107. &(adapter->tx_ring[i].reinit_state));
  4108. } else {
  4109. DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
  4110. "ignored adding FDIR ATR filters \n");
  4111. }
  4112. /* Done FDIR Re-initialization, enable transmits */
  4113. netif_tx_start_all_queues(adapter->netdev);
  4114. }
  4115. /**
  4116. * ixgbe_watchdog_task - worker thread to bring link up
  4117. * @work: pointer to work_struct containing our data
  4118. **/
  4119. static void ixgbe_watchdog_task(struct work_struct *work)
  4120. {
  4121. struct ixgbe_adapter *adapter = container_of(work,
  4122. struct ixgbe_adapter,
  4123. watchdog_task);
  4124. struct net_device *netdev = adapter->netdev;
  4125. struct ixgbe_hw *hw = &adapter->hw;
  4126. u32 link_speed = adapter->link_speed;
  4127. bool link_up = adapter->link_up;
  4128. int i;
  4129. struct ixgbe_ring *tx_ring;
  4130. int some_tx_pending = 0;
  4131. adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
  4132. if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
  4133. hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
  4134. if (link_up) {
  4135. #ifdef CONFIG_DCB
  4136. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  4137. for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
  4138. hw->mac.ops.fc_enable(hw, i);
  4139. } else {
  4140. hw->mac.ops.fc_enable(hw, 0);
  4141. }
  4142. #else
  4143. hw->mac.ops.fc_enable(hw, 0);
  4144. #endif
  4145. }
  4146. if (link_up ||
  4147. time_after(jiffies, (adapter->link_check_timeout +
  4148. IXGBE_TRY_LINK_TIMEOUT))) {
  4149. adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
  4150. IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
  4151. }
  4152. adapter->link_up = link_up;
  4153. adapter->link_speed = link_speed;
  4154. }
  4155. if (link_up) {
  4156. if (!netif_carrier_ok(netdev)) {
  4157. bool flow_rx, flow_tx;
  4158. if (hw->mac.type == ixgbe_mac_82599EB) {
  4159. u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
  4160. u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
  4161. flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
  4162. flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
  4163. } else {
  4164. u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  4165. u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
  4166. flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
  4167. flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
  4168. }
  4169. printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
  4170. "Flow Control: %s\n",
  4171. netdev->name,
  4172. (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
  4173. "10 Gbps" :
  4174. (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
  4175. "1 Gbps" : "unknown speed")),
  4176. ((flow_rx && flow_tx) ? "RX/TX" :
  4177. (flow_rx ? "RX" :
  4178. (flow_tx ? "TX" : "None"))));
  4179. netif_carrier_on(netdev);
  4180. } else {
  4181. /* Force detection of hung controller */
  4182. adapter->detect_tx_hung = true;
  4183. }
  4184. } else {
  4185. adapter->link_up = false;
  4186. adapter->link_speed = 0;
  4187. if (netif_carrier_ok(netdev)) {
  4188. printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
  4189. netdev->name);
  4190. netif_carrier_off(netdev);
  4191. }
  4192. }
  4193. if (!netif_carrier_ok(netdev)) {
  4194. for (i = 0; i < adapter->num_tx_queues; i++) {
  4195. tx_ring = &adapter->tx_ring[i];
  4196. if (tx_ring->next_to_use != tx_ring->next_to_clean) {
  4197. some_tx_pending = 1;
  4198. break;
  4199. }
  4200. }
  4201. if (some_tx_pending) {
  4202. /* We've lost link, so the controller stops DMA,
  4203. * but we've got queued Tx work that's never going
  4204. * to get done, so reset controller to flush Tx.
  4205. * (Do the reset outside of interrupt context).
  4206. */
  4207. schedule_work(&adapter->reset_task);
  4208. }
  4209. }
  4210. ixgbe_update_stats(adapter);
  4211. adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
  4212. }
  4213. static int ixgbe_tso(struct ixgbe_adapter *adapter,
  4214. struct ixgbe_ring *tx_ring, struct sk_buff *skb,
  4215. u32 tx_flags, u8 *hdr_len)
  4216. {
  4217. struct ixgbe_adv_tx_context_desc *context_desc;
  4218. unsigned int i;
  4219. int err;
  4220. struct ixgbe_tx_buffer *tx_buffer_info;
  4221. u32 vlan_macip_lens = 0, type_tucmd_mlhl;
  4222. u32 mss_l4len_idx, l4len;
  4223. if (skb_is_gso(skb)) {
  4224. if (skb_header_cloned(skb)) {
  4225. err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  4226. if (err)
  4227. return err;
  4228. }
  4229. l4len = tcp_hdrlen(skb);
  4230. *hdr_len += l4len;
  4231. if (skb->protocol == htons(ETH_P_IP)) {
  4232. struct iphdr *iph = ip_hdr(skb);
  4233. iph->tot_len = 0;
  4234. iph->check = 0;
  4235. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  4236. iph->daddr, 0,
  4237. IPPROTO_TCP,
  4238. 0);
  4239. adapter->hw_tso_ctxt++;
  4240. } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
  4241. ipv6_hdr(skb)->payload_len = 0;
  4242. tcp_hdr(skb)->check =
  4243. ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  4244. &ipv6_hdr(skb)->daddr,
  4245. 0, IPPROTO_TCP, 0);
  4246. adapter->hw_tso6_ctxt++;
  4247. }
  4248. i = tx_ring->next_to_use;
  4249. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  4250. context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
  4251. /* VLAN MACLEN IPLEN */
  4252. if (tx_flags & IXGBE_TX_FLAGS_VLAN)
  4253. vlan_macip_lens |=
  4254. (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
  4255. vlan_macip_lens |= ((skb_network_offset(skb)) <<
  4256. IXGBE_ADVTXD_MACLEN_SHIFT);
  4257. *hdr_len += skb_network_offset(skb);
  4258. vlan_macip_lens |=
  4259. (skb_transport_header(skb) - skb_network_header(skb));
  4260. *hdr_len +=
  4261. (skb_transport_header(skb) - skb_network_header(skb));
  4262. context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
  4263. context_desc->seqnum_seed = 0;
  4264. /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
  4265. type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
  4266. IXGBE_ADVTXD_DTYP_CTXT);
  4267. if (skb->protocol == htons(ETH_P_IP))
  4268. type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
  4269. type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
  4270. context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
  4271. /* MSS L4LEN IDX */
  4272. mss_l4len_idx =
  4273. (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
  4274. mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
  4275. /* use index 1 for TSO */
  4276. mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
  4277. context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
  4278. tx_buffer_info->time_stamp = jiffies;
  4279. tx_buffer_info->next_to_watch = i;
  4280. i++;
  4281. if (i == tx_ring->count)
  4282. i = 0;
  4283. tx_ring->next_to_use = i;
  4284. return true;
  4285. }
  4286. return false;
  4287. }
  4288. static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
  4289. struct ixgbe_ring *tx_ring,
  4290. struct sk_buff *skb, u32 tx_flags)
  4291. {
  4292. struct ixgbe_adv_tx_context_desc *context_desc;
  4293. unsigned int i;
  4294. struct ixgbe_tx_buffer *tx_buffer_info;
  4295. u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
  4296. if (skb->ip_summed == CHECKSUM_PARTIAL ||
  4297. (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
  4298. i = tx_ring->next_to_use;
  4299. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  4300. context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
  4301. if (tx_flags & IXGBE_TX_FLAGS_VLAN)
  4302. vlan_macip_lens |=
  4303. (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
  4304. vlan_macip_lens |= (skb_network_offset(skb) <<
  4305. IXGBE_ADVTXD_MACLEN_SHIFT);
  4306. if (skb->ip_summed == CHECKSUM_PARTIAL)
  4307. vlan_macip_lens |= (skb_transport_header(skb) -
  4308. skb_network_header(skb));
  4309. context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
  4310. context_desc->seqnum_seed = 0;
  4311. type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
  4312. IXGBE_ADVTXD_DTYP_CTXT);
  4313. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  4314. switch (skb->protocol) {
  4315. case cpu_to_be16(ETH_P_IP):
  4316. type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
  4317. if (ip_hdr(skb)->protocol == IPPROTO_TCP)
  4318. type_tucmd_mlhl |=
  4319. IXGBE_ADVTXD_TUCMD_L4T_TCP;
  4320. else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
  4321. type_tucmd_mlhl |=
  4322. IXGBE_ADVTXD_TUCMD_L4T_SCTP;
  4323. break;
  4324. case cpu_to_be16(ETH_P_IPV6):
  4325. /* XXX what about other V6 headers?? */
  4326. if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
  4327. type_tucmd_mlhl |=
  4328. IXGBE_ADVTXD_TUCMD_L4T_TCP;
  4329. else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
  4330. type_tucmd_mlhl |=
  4331. IXGBE_ADVTXD_TUCMD_L4T_SCTP;
  4332. break;
  4333. default:
  4334. if (unlikely(net_ratelimit())) {
  4335. DPRINTK(PROBE, WARNING,
  4336. "partial checksum but proto=%x!\n",
  4337. skb->protocol);
  4338. }
  4339. break;
  4340. }
  4341. }
  4342. context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
  4343. /* use index zero for tx checksum offload */
  4344. context_desc->mss_l4len_idx = 0;
  4345. tx_buffer_info->time_stamp = jiffies;
  4346. tx_buffer_info->next_to_watch = i;
  4347. adapter->hw_csum_tx_good++;
  4348. i++;
  4349. if (i == tx_ring->count)
  4350. i = 0;
  4351. tx_ring->next_to_use = i;
  4352. return true;
  4353. }
  4354. return false;
  4355. }
  4356. static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
  4357. struct ixgbe_ring *tx_ring,
  4358. struct sk_buff *skb, u32 tx_flags,
  4359. unsigned int first)
  4360. {
  4361. struct ixgbe_tx_buffer *tx_buffer_info;
  4362. unsigned int len;
  4363. unsigned int total = skb->len;
  4364. unsigned int offset = 0, size, count = 0, i;
  4365. unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
  4366. unsigned int f;
  4367. dma_addr_t *map;
  4368. i = tx_ring->next_to_use;
  4369. if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
  4370. dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
  4371. return 0;
  4372. }
  4373. map = skb_shinfo(skb)->dma_maps;
  4374. if (tx_flags & IXGBE_TX_FLAGS_FCOE)
  4375. /* excluding fcoe_crc_eof for FCoE */
  4376. total -= sizeof(struct fcoe_crc_eof);
  4377. len = min(skb_headlen(skb), total);
  4378. while (len) {
  4379. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  4380. size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
  4381. tx_buffer_info->length = size;
  4382. tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
  4383. tx_buffer_info->time_stamp = jiffies;
  4384. tx_buffer_info->next_to_watch = i;
  4385. len -= size;
  4386. total -= size;
  4387. offset += size;
  4388. count++;
  4389. if (len) {
  4390. i++;
  4391. if (i == tx_ring->count)
  4392. i = 0;
  4393. }
  4394. }
  4395. for (f = 0; f < nr_frags; f++) {
  4396. struct skb_frag_struct *frag;
  4397. frag = &skb_shinfo(skb)->frags[f];
  4398. len = min((unsigned int)frag->size, total);
  4399. offset = 0;
  4400. while (len) {
  4401. i++;
  4402. if (i == tx_ring->count)
  4403. i = 0;
  4404. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  4405. size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
  4406. tx_buffer_info->length = size;
  4407. tx_buffer_info->dma = map[f] + offset;
  4408. tx_buffer_info->time_stamp = jiffies;
  4409. tx_buffer_info->next_to_watch = i;
  4410. len -= size;
  4411. total -= size;
  4412. offset += size;
  4413. count++;
  4414. }
  4415. if (total == 0)
  4416. break;
  4417. }
  4418. tx_ring->tx_buffer_info[i].skb = skb;
  4419. tx_ring->tx_buffer_info[first].next_to_watch = i;
  4420. return count;
  4421. }
  4422. static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
  4423. struct ixgbe_ring *tx_ring,
  4424. int tx_flags, int count, u32 paylen, u8 hdr_len)
  4425. {
  4426. union ixgbe_adv_tx_desc *tx_desc = NULL;
  4427. struct ixgbe_tx_buffer *tx_buffer_info;
  4428. u32 olinfo_status = 0, cmd_type_len = 0;
  4429. unsigned int i;
  4430. u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
  4431. cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
  4432. cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
  4433. if (tx_flags & IXGBE_TX_FLAGS_VLAN)
  4434. cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
  4435. if (tx_flags & IXGBE_TX_FLAGS_TSO) {
  4436. cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
  4437. olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
  4438. IXGBE_ADVTXD_POPTS_SHIFT;
  4439. /* use index 1 context for tso */
  4440. olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
  4441. if (tx_flags & IXGBE_TX_FLAGS_IPV4)
  4442. olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
  4443. IXGBE_ADVTXD_POPTS_SHIFT;
  4444. } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
  4445. olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
  4446. IXGBE_ADVTXD_POPTS_SHIFT;
  4447. if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
  4448. olinfo_status |= IXGBE_ADVTXD_CC;
  4449. olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
  4450. if (tx_flags & IXGBE_TX_FLAGS_FSO)
  4451. cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
  4452. }
  4453. olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
  4454. i = tx_ring->next_to_use;
  4455. while (count--) {
  4456. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  4457. tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
  4458. tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
  4459. tx_desc->read.cmd_type_len =
  4460. cpu_to_le32(cmd_type_len | tx_buffer_info->length);
  4461. tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
  4462. i++;
  4463. if (i == tx_ring->count)
  4464. i = 0;
  4465. }
  4466. tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
  4467. /*
  4468. * Force memory writes to complete before letting h/w
  4469. * know there are new descriptors to fetch. (Only
  4470. * applicable for weak-ordered memory model archs,
  4471. * such as IA-64).
  4472. */
  4473. wmb();
  4474. tx_ring->next_to_use = i;
  4475. writel(i, adapter->hw.hw_addr + tx_ring->tail);
  4476. }
  4477. static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
  4478. int queue, u32 tx_flags)
  4479. {
  4480. /* Right now, we support IPv4 only */
  4481. struct ixgbe_atr_input atr_input;
  4482. struct tcphdr *th;
  4483. struct iphdr *iph = ip_hdr(skb);
  4484. struct ethhdr *eth = (struct ethhdr *)skb->data;
  4485. u16 vlan_id, src_port, dst_port, flex_bytes;
  4486. u32 src_ipv4_addr, dst_ipv4_addr;
  4487. u8 l4type = 0;
  4488. /* check if we're UDP or TCP */
  4489. if (iph->protocol == IPPROTO_TCP) {
  4490. th = tcp_hdr(skb);
  4491. src_port = th->source;
  4492. dst_port = th->dest;
  4493. l4type |= IXGBE_ATR_L4TYPE_TCP;
  4494. /* l4type IPv4 type is 0, no need to assign */
  4495. } else {
  4496. /* Unsupported L4 header, just bail here */
  4497. return;
  4498. }
  4499. memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
  4500. vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
  4501. IXGBE_TX_FLAGS_VLAN_SHIFT;
  4502. src_ipv4_addr = iph->saddr;
  4503. dst_ipv4_addr = iph->daddr;
  4504. flex_bytes = eth->h_proto;
  4505. ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
  4506. ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
  4507. ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
  4508. ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
  4509. ixgbe_atr_set_l4type_82599(&atr_input, l4type);
  4510. /* src and dst are inverted, think how the receiver sees them */
  4511. ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
  4512. ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
  4513. /* This assumes the Rx queue and Tx queue are bound to the same CPU */
  4514. ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
  4515. }
  4516. static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
  4517. struct ixgbe_ring *tx_ring, int size)
  4518. {
  4519. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4520. netif_stop_subqueue(netdev, tx_ring->queue_index);
  4521. /* Herbert's original patch had:
  4522. * smp_mb__after_netif_stop_queue();
  4523. * but since that doesn't exist yet, just open code it. */
  4524. smp_mb();
  4525. /* We need to check again in a case another CPU has just
  4526. * made room available. */
  4527. if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
  4528. return -EBUSY;
  4529. /* A reprieve! - use start_queue because it doesn't call schedule */
  4530. netif_start_subqueue(netdev, tx_ring->queue_index);
  4531. ++adapter->restart_queue;
  4532. return 0;
  4533. }
  4534. static int ixgbe_maybe_stop_tx(struct net_device *netdev,
  4535. struct ixgbe_ring *tx_ring, int size)
  4536. {
  4537. if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
  4538. return 0;
  4539. return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
  4540. }
  4541. static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
  4542. {
  4543. struct ixgbe_adapter *adapter = netdev_priv(dev);
  4544. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
  4545. return smp_processor_id();
  4546. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
  4547. return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
  4548. return skb_tx_hash(dev, skb);
  4549. }
  4550. static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
  4551. struct net_device *netdev)
  4552. {
  4553. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4554. struct ixgbe_ring *tx_ring;
  4555. unsigned int first;
  4556. unsigned int tx_flags = 0;
  4557. u8 hdr_len = 0;
  4558. int r_idx = 0, tso;
  4559. int count = 0;
  4560. unsigned int f;
  4561. if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
  4562. tx_flags |= vlan_tx_tag_get(skb);
  4563. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  4564. tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
  4565. tx_flags |= (skb->queue_mapping << 13);
  4566. }
  4567. tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
  4568. tx_flags |= IXGBE_TX_FLAGS_VLAN;
  4569. } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  4570. if (skb->priority != TC_PRIO_CONTROL) {
  4571. tx_flags |= (skb->queue_mapping << 13);
  4572. tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
  4573. tx_flags |= IXGBE_TX_FLAGS_VLAN;
  4574. } else {
  4575. skb->queue_mapping =
  4576. adapter->ring_feature[RING_F_DCB].indices-1;
  4577. }
  4578. }
  4579. r_idx = skb->queue_mapping;
  4580. tx_ring = &adapter->tx_ring[r_idx];
  4581. if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
  4582. (skb->protocol == htons(ETH_P_FCOE))) {
  4583. tx_flags |= IXGBE_TX_FLAGS_FCOE;
  4584. #ifdef IXGBE_FCOE
  4585. r_idx = smp_processor_id();
  4586. r_idx &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
  4587. r_idx += adapter->ring_feature[RING_F_FCOE].mask;
  4588. tx_ring = &adapter->tx_ring[r_idx];
  4589. #endif
  4590. }
  4591. /* four things can cause us to need a context descriptor */
  4592. if (skb_is_gso(skb) ||
  4593. (skb->ip_summed == CHECKSUM_PARTIAL) ||
  4594. (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
  4595. (tx_flags & IXGBE_TX_FLAGS_FCOE))
  4596. count++;
  4597. count += TXD_USE_COUNT(skb_headlen(skb));
  4598. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
  4599. count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
  4600. if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
  4601. adapter->tx_busy++;
  4602. return NETDEV_TX_BUSY;
  4603. }
  4604. first = tx_ring->next_to_use;
  4605. if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
  4606. #ifdef IXGBE_FCOE
  4607. /* setup tx offload for FCoE */
  4608. tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
  4609. if (tso < 0) {
  4610. dev_kfree_skb_any(skb);
  4611. return NETDEV_TX_OK;
  4612. }
  4613. if (tso)
  4614. tx_flags |= IXGBE_TX_FLAGS_FSO;
  4615. #endif /* IXGBE_FCOE */
  4616. } else {
  4617. if (skb->protocol == htons(ETH_P_IP))
  4618. tx_flags |= IXGBE_TX_FLAGS_IPV4;
  4619. tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
  4620. if (tso < 0) {
  4621. dev_kfree_skb_any(skb);
  4622. return NETDEV_TX_OK;
  4623. }
  4624. if (tso)
  4625. tx_flags |= IXGBE_TX_FLAGS_TSO;
  4626. else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
  4627. (skb->ip_summed == CHECKSUM_PARTIAL))
  4628. tx_flags |= IXGBE_TX_FLAGS_CSUM;
  4629. }
  4630. count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
  4631. if (count) {
  4632. /* add the ATR filter if ATR is on */
  4633. if (tx_ring->atr_sample_rate) {
  4634. ++tx_ring->atr_count;
  4635. if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
  4636. test_bit(__IXGBE_FDIR_INIT_DONE,
  4637. &tx_ring->reinit_state)) {
  4638. ixgbe_atr(adapter, skb, tx_ring->queue_index,
  4639. tx_flags);
  4640. tx_ring->atr_count = 0;
  4641. }
  4642. }
  4643. ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
  4644. hdr_len);
  4645. ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
  4646. } else {
  4647. dev_kfree_skb_any(skb);
  4648. tx_ring->tx_buffer_info[first].time_stamp = 0;
  4649. tx_ring->next_to_use = first;
  4650. }
  4651. return NETDEV_TX_OK;
  4652. }
  4653. /**
  4654. * ixgbe_get_stats - Get System Network Statistics
  4655. * @netdev: network interface device structure
  4656. *
  4657. * Returns the address of the device statistics structure.
  4658. * The statistics are actually updated from the timer callback.
  4659. **/
  4660. static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
  4661. {
  4662. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4663. /* only return the current stats */
  4664. return &adapter->net_stats;
  4665. }
  4666. /**
  4667. * ixgbe_set_mac - Change the Ethernet Address of the NIC
  4668. * @netdev: network interface device structure
  4669. * @p: pointer to an address structure
  4670. *
  4671. * Returns 0 on success, negative on failure
  4672. **/
  4673. static int ixgbe_set_mac(struct net_device *netdev, void *p)
  4674. {
  4675. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4676. struct ixgbe_hw *hw = &adapter->hw;
  4677. struct sockaddr *addr = p;
  4678. if (!is_valid_ether_addr(addr->sa_data))
  4679. return -EADDRNOTAVAIL;
  4680. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  4681. memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
  4682. hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
  4683. return 0;
  4684. }
  4685. static int
  4686. ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
  4687. {
  4688. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4689. struct ixgbe_hw *hw = &adapter->hw;
  4690. u16 value;
  4691. int rc;
  4692. if (prtad != hw->phy.mdio.prtad)
  4693. return -EINVAL;
  4694. rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
  4695. if (!rc)
  4696. rc = value;
  4697. return rc;
  4698. }
  4699. static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
  4700. u16 addr, u16 value)
  4701. {
  4702. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4703. struct ixgbe_hw *hw = &adapter->hw;
  4704. if (prtad != hw->phy.mdio.prtad)
  4705. return -EINVAL;
  4706. return hw->phy.ops.write_reg(hw, addr, devad, value);
  4707. }
  4708. static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
  4709. {
  4710. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4711. return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
  4712. }
  4713. /**
  4714. * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
  4715. * netdev->dev_addrs
  4716. * @netdev: network interface device structure
  4717. *
  4718. * Returns non-zero on failure
  4719. **/
  4720. static int ixgbe_add_sanmac_netdev(struct net_device *dev)
  4721. {
  4722. int err = 0;
  4723. struct ixgbe_adapter *adapter = netdev_priv(dev);
  4724. struct ixgbe_mac_info *mac = &adapter->hw.mac;
  4725. if (is_valid_ether_addr(mac->san_addr)) {
  4726. rtnl_lock();
  4727. err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
  4728. rtnl_unlock();
  4729. }
  4730. return err;
  4731. }
  4732. /**
  4733. * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
  4734. * netdev->dev_addrs
  4735. * @netdev: network interface device structure
  4736. *
  4737. * Returns non-zero on failure
  4738. **/
  4739. static int ixgbe_del_sanmac_netdev(struct net_device *dev)
  4740. {
  4741. int err = 0;
  4742. struct ixgbe_adapter *adapter = netdev_priv(dev);
  4743. struct ixgbe_mac_info *mac = &adapter->hw.mac;
  4744. if (is_valid_ether_addr(mac->san_addr)) {
  4745. rtnl_lock();
  4746. err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
  4747. rtnl_unlock();
  4748. }
  4749. return err;
  4750. }
  4751. #ifdef CONFIG_NET_POLL_CONTROLLER
  4752. /*
  4753. * Polling 'interrupt' - used by things like netconsole to send skbs
  4754. * without having to re-enable interrupts. It's not called while
  4755. * the interrupt routine is executing.
  4756. */
  4757. static void ixgbe_netpoll(struct net_device *netdev)
  4758. {
  4759. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4760. int i;
  4761. adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
  4762. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  4763. int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  4764. for (i = 0; i < num_q_vectors; i++) {
  4765. struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
  4766. ixgbe_msix_clean_many(0, q_vector);
  4767. }
  4768. } else {
  4769. ixgbe_intr(adapter->pdev->irq, netdev);
  4770. }
  4771. adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
  4772. }
  4773. #endif
  4774. static const struct net_device_ops ixgbe_netdev_ops = {
  4775. .ndo_open = ixgbe_open,
  4776. .ndo_stop = ixgbe_close,
  4777. .ndo_start_xmit = ixgbe_xmit_frame,
  4778. .ndo_select_queue = ixgbe_select_queue,
  4779. .ndo_get_stats = ixgbe_get_stats,
  4780. .ndo_set_rx_mode = ixgbe_set_rx_mode,
  4781. .ndo_set_multicast_list = ixgbe_set_rx_mode,
  4782. .ndo_validate_addr = eth_validate_addr,
  4783. .ndo_set_mac_address = ixgbe_set_mac,
  4784. .ndo_change_mtu = ixgbe_change_mtu,
  4785. .ndo_tx_timeout = ixgbe_tx_timeout,
  4786. .ndo_vlan_rx_register = ixgbe_vlan_rx_register,
  4787. .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
  4788. .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
  4789. .ndo_do_ioctl = ixgbe_ioctl,
  4790. #ifdef CONFIG_NET_POLL_CONTROLLER
  4791. .ndo_poll_controller = ixgbe_netpoll,
  4792. #endif
  4793. #ifdef IXGBE_FCOE
  4794. .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
  4795. .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
  4796. .ndo_fcoe_enable = ixgbe_fcoe_enable,
  4797. .ndo_fcoe_disable = ixgbe_fcoe_disable,
  4798. #endif /* IXGBE_FCOE */
  4799. };
  4800. /**
  4801. * ixgbe_probe - Device Initialization Routine
  4802. * @pdev: PCI device information struct
  4803. * @ent: entry in ixgbe_pci_tbl
  4804. *
  4805. * Returns 0 on success, negative on failure
  4806. *
  4807. * ixgbe_probe initializes an adapter identified by a pci_dev structure.
  4808. * The OS initialization, configuring of the adapter private structure,
  4809. * and a hardware reset occur.
  4810. **/
  4811. static int __devinit ixgbe_probe(struct pci_dev *pdev,
  4812. const struct pci_device_id *ent)
  4813. {
  4814. struct net_device *netdev;
  4815. struct ixgbe_adapter *adapter = NULL;
  4816. struct ixgbe_hw *hw;
  4817. const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
  4818. static int cards_found;
  4819. int i, err, pci_using_dac;
  4820. #ifdef IXGBE_FCOE
  4821. u16 device_caps;
  4822. #endif
  4823. u32 part_num, eec;
  4824. err = pci_enable_device_mem(pdev);
  4825. if (err)
  4826. return err;
  4827. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
  4828. !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  4829. pci_using_dac = 1;
  4830. } else {
  4831. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  4832. if (err) {
  4833. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  4834. if (err) {
  4835. dev_err(&pdev->dev, "No usable DMA "
  4836. "configuration, aborting\n");
  4837. goto err_dma;
  4838. }
  4839. }
  4840. pci_using_dac = 0;
  4841. }
  4842. err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
  4843. IORESOURCE_MEM), ixgbe_driver_name);
  4844. if (err) {
  4845. dev_err(&pdev->dev,
  4846. "pci_request_selected_regions failed 0x%x\n", err);
  4847. goto err_pci_reg;
  4848. }
  4849. pci_enable_pcie_error_reporting(pdev);
  4850. pci_set_master(pdev);
  4851. pci_save_state(pdev);
  4852. netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
  4853. if (!netdev) {
  4854. err = -ENOMEM;
  4855. goto err_alloc_etherdev;
  4856. }
  4857. SET_NETDEV_DEV(netdev, &pdev->dev);
  4858. pci_set_drvdata(pdev, netdev);
  4859. adapter = netdev_priv(netdev);
  4860. adapter->netdev = netdev;
  4861. adapter->pdev = pdev;
  4862. hw = &adapter->hw;
  4863. hw->back = adapter;
  4864. adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
  4865. hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
  4866. pci_resource_len(pdev, 0));
  4867. if (!hw->hw_addr) {
  4868. err = -EIO;
  4869. goto err_ioremap;
  4870. }
  4871. for (i = 1; i <= 5; i++) {
  4872. if (pci_resource_len(pdev, i) == 0)
  4873. continue;
  4874. }
  4875. netdev->netdev_ops = &ixgbe_netdev_ops;
  4876. ixgbe_set_ethtool_ops(netdev);
  4877. netdev->watchdog_timeo = 5 * HZ;
  4878. strcpy(netdev->name, pci_name(pdev));
  4879. adapter->bd_number = cards_found;
  4880. /* Setup hw api */
  4881. memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
  4882. hw->mac.type = ii->mac;
  4883. /* EEPROM */
  4884. memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
  4885. eec = IXGBE_READ_REG(hw, IXGBE_EEC);
  4886. /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
  4887. if (!(eec & (1 << 8)))
  4888. hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
  4889. /* PHY */
  4890. memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
  4891. hw->phy.sfp_type = ixgbe_sfp_type_unknown;
  4892. /* ixgbe_identify_phy_generic will set prtad and mmds properly */
  4893. hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
  4894. hw->phy.mdio.mmds = 0;
  4895. hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
  4896. hw->phy.mdio.dev = netdev;
  4897. hw->phy.mdio.mdio_read = ixgbe_mdio_read;
  4898. hw->phy.mdio.mdio_write = ixgbe_mdio_write;
  4899. /* set up this timer and work struct before calling get_invariants
  4900. * which might start the timer
  4901. */
  4902. init_timer(&adapter->sfp_timer);
  4903. adapter->sfp_timer.function = &ixgbe_sfp_timer;
  4904. adapter->sfp_timer.data = (unsigned long) adapter;
  4905. INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
  4906. /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
  4907. INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
  4908. /* a new SFP+ module arrival, called from GPI SDP2 context */
  4909. INIT_WORK(&adapter->sfp_config_module_task,
  4910. ixgbe_sfp_config_module_task);
  4911. ii->get_invariants(hw);
  4912. /* setup the private structure */
  4913. err = ixgbe_sw_init(adapter);
  4914. if (err)
  4915. goto err_sw_init;
  4916. /*
  4917. * If there is a fan on this device and it has failed log the
  4918. * failure.
  4919. */
  4920. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
  4921. u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  4922. if (esdp & IXGBE_ESDP_SDP1)
  4923. DPRINTK(PROBE, CRIT,
  4924. "Fan has stopped, replace the adapter\n");
  4925. }
  4926. /* reset_hw fills in the perm_addr as well */
  4927. err = hw->mac.ops.reset_hw(hw);
  4928. if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
  4929. hw->mac.type == ixgbe_mac_82598EB) {
  4930. /*
  4931. * Start a kernel thread to watch for a module to arrive.
  4932. * Only do this for 82598, since 82599 will generate
  4933. * interrupts on module arrival.
  4934. */
  4935. set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
  4936. mod_timer(&adapter->sfp_timer,
  4937. round_jiffies(jiffies + (2 * HZ)));
  4938. err = 0;
  4939. } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
  4940. dev_err(&adapter->pdev->dev, "failed to initialize because "
  4941. "an unsupported SFP+ module type was detected.\n"
  4942. "Reload the driver after installing a supported "
  4943. "module.\n");
  4944. goto err_sw_init;
  4945. } else if (err) {
  4946. dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
  4947. goto err_sw_init;
  4948. }
  4949. netdev->features = NETIF_F_SG |
  4950. NETIF_F_IP_CSUM |
  4951. NETIF_F_HW_VLAN_TX |
  4952. NETIF_F_HW_VLAN_RX |
  4953. NETIF_F_HW_VLAN_FILTER;
  4954. netdev->features |= NETIF_F_IPV6_CSUM;
  4955. netdev->features |= NETIF_F_TSO;
  4956. netdev->features |= NETIF_F_TSO6;
  4957. netdev->features |= NETIF_F_GRO;
  4958. if (adapter->hw.mac.type == ixgbe_mac_82599EB)
  4959. netdev->features |= NETIF_F_SCTP_CSUM;
  4960. netdev->vlan_features |= NETIF_F_TSO;
  4961. netdev->vlan_features |= NETIF_F_TSO6;
  4962. netdev->vlan_features |= NETIF_F_IP_CSUM;
  4963. netdev->vlan_features |= NETIF_F_IPV6_CSUM;
  4964. netdev->vlan_features |= NETIF_F_SG;
  4965. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
  4966. adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
  4967. #ifdef CONFIG_IXGBE_DCB
  4968. netdev->dcbnl_ops = &dcbnl_ops;
  4969. #endif
  4970. #ifdef IXGBE_FCOE
  4971. if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
  4972. if (hw->mac.ops.get_device_caps) {
  4973. hw->mac.ops.get_device_caps(hw, &device_caps);
  4974. if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
  4975. adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
  4976. }
  4977. }
  4978. #endif /* IXGBE_FCOE */
  4979. if (pci_using_dac)
  4980. netdev->features |= NETIF_F_HIGHDMA;
  4981. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
  4982. netdev->features |= NETIF_F_LRO;
  4983. /* make sure the EEPROM is good */
  4984. if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
  4985. dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
  4986. err = -EIO;
  4987. goto err_eeprom;
  4988. }
  4989. memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
  4990. memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
  4991. if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
  4992. dev_err(&pdev->dev, "invalid MAC address\n");
  4993. err = -EIO;
  4994. goto err_eeprom;
  4995. }
  4996. init_timer(&adapter->watchdog_timer);
  4997. adapter->watchdog_timer.function = &ixgbe_watchdog;
  4998. adapter->watchdog_timer.data = (unsigned long)adapter;
  4999. INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
  5000. INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
  5001. err = ixgbe_init_interrupt_scheme(adapter);
  5002. if (err)
  5003. goto err_sw_init;
  5004. switch (pdev->device) {
  5005. case IXGBE_DEV_ID_82599_KX4:
  5006. adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
  5007. IXGBE_WUFC_MC | IXGBE_WUFC_BC);
  5008. /* Enable ACPI wakeup in GRC */
  5009. IXGBE_WRITE_REG(hw, IXGBE_GRC,
  5010. (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
  5011. break;
  5012. default:
  5013. adapter->wol = 0;
  5014. break;
  5015. }
  5016. device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
  5017. /* pick up the PCI bus settings for reporting later */
  5018. hw->mac.ops.get_bus_info(hw);
  5019. /* print bus type/speed/width info */
  5020. dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
  5021. ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
  5022. (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
  5023. ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
  5024. (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
  5025. (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
  5026. "Unknown"),
  5027. netdev->dev_addr);
  5028. ixgbe_read_pba_num_generic(hw, &part_num);
  5029. if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
  5030. dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n",
  5031. hw->mac.type, hw->phy.type, hw->phy.sfp_type,
  5032. (part_num >> 8), (part_num & 0xff));
  5033. else
  5034. dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
  5035. hw->mac.type, hw->phy.type,
  5036. (part_num >> 8), (part_num & 0xff));
  5037. if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
  5038. dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
  5039. "this card is not sufficient for optimal "
  5040. "performance.\n");
  5041. dev_warn(&pdev->dev, "For optimal performance a x8 "
  5042. "PCI-Express slot is required.\n");
  5043. }
  5044. /* save off EEPROM version number */
  5045. hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
  5046. /* reset the hardware with the new settings */
  5047. err = hw->mac.ops.start_hw(hw);
  5048. if (err == IXGBE_ERR_EEPROM_VERSION) {
  5049. /* We are running on a pre-production device, log a warning */
  5050. dev_warn(&pdev->dev, "This device is a pre-production "
  5051. "adapter/LOM. Please be aware there may be issues "
  5052. "associated with your hardware. If you are "
  5053. "experiencing problems please contact your Intel or "
  5054. "hardware representative who provided you with this "
  5055. "hardware.\n");
  5056. }
  5057. strcpy(netdev->name, "eth%d");
  5058. err = register_netdev(netdev);
  5059. if (err)
  5060. goto err_register;
  5061. /* carrier off reporting is important to ethtool even BEFORE open */
  5062. netif_carrier_off(netdev);
  5063. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
  5064. adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
  5065. INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
  5066. #ifdef CONFIG_IXGBE_DCA
  5067. if (dca_add_requester(&pdev->dev) == 0) {
  5068. adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
  5069. ixgbe_setup_dca(adapter);
  5070. }
  5071. #endif
  5072. /* add san mac addr to netdev */
  5073. ixgbe_add_sanmac_netdev(netdev);
  5074. dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
  5075. cards_found++;
  5076. return 0;
  5077. err_register:
  5078. ixgbe_release_hw_control(adapter);
  5079. ixgbe_clear_interrupt_scheme(adapter);
  5080. err_sw_init:
  5081. err_eeprom:
  5082. clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
  5083. del_timer_sync(&adapter->sfp_timer);
  5084. cancel_work_sync(&adapter->sfp_task);
  5085. cancel_work_sync(&adapter->multispeed_fiber_task);
  5086. cancel_work_sync(&adapter->sfp_config_module_task);
  5087. iounmap(hw->hw_addr);
  5088. err_ioremap:
  5089. free_netdev(netdev);
  5090. err_alloc_etherdev:
  5091. pci_release_selected_regions(pdev, pci_select_bars(pdev,
  5092. IORESOURCE_MEM));
  5093. err_pci_reg:
  5094. err_dma:
  5095. pci_disable_device(pdev);
  5096. return err;
  5097. }
  5098. /**
  5099. * ixgbe_remove - Device Removal Routine
  5100. * @pdev: PCI device information struct
  5101. *
  5102. * ixgbe_remove is called by the PCI subsystem to alert the driver
  5103. * that it should release a PCI device. The could be caused by a
  5104. * Hot-Plug event, or because the driver is going to be removed from
  5105. * memory.
  5106. **/
  5107. static void __devexit ixgbe_remove(struct pci_dev *pdev)
  5108. {
  5109. struct net_device *netdev = pci_get_drvdata(pdev);
  5110. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5111. set_bit(__IXGBE_DOWN, &adapter->state);
  5112. /* clear the module not found bit to make sure the worker won't
  5113. * reschedule
  5114. */
  5115. clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
  5116. del_timer_sync(&adapter->watchdog_timer);
  5117. del_timer_sync(&adapter->sfp_timer);
  5118. cancel_work_sync(&adapter->watchdog_task);
  5119. cancel_work_sync(&adapter->sfp_task);
  5120. cancel_work_sync(&adapter->multispeed_fiber_task);
  5121. cancel_work_sync(&adapter->sfp_config_module_task);
  5122. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
  5123. adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
  5124. cancel_work_sync(&adapter->fdir_reinit_task);
  5125. flush_scheduled_work();
  5126. #ifdef CONFIG_IXGBE_DCA
  5127. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
  5128. adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
  5129. dca_remove_requester(&pdev->dev);
  5130. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
  5131. }
  5132. #endif
  5133. #ifdef IXGBE_FCOE
  5134. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
  5135. ixgbe_cleanup_fcoe(adapter);
  5136. #endif /* IXGBE_FCOE */
  5137. /* remove the added san mac */
  5138. ixgbe_del_sanmac_netdev(netdev);
  5139. if (netdev->reg_state == NETREG_REGISTERED)
  5140. unregister_netdev(netdev);
  5141. ixgbe_clear_interrupt_scheme(adapter);
  5142. ixgbe_release_hw_control(adapter);
  5143. iounmap(adapter->hw.hw_addr);
  5144. pci_release_selected_regions(pdev, pci_select_bars(pdev,
  5145. IORESOURCE_MEM));
  5146. DPRINTK(PROBE, INFO, "complete\n");
  5147. free_netdev(netdev);
  5148. pci_disable_pcie_error_reporting(pdev);
  5149. pci_disable_device(pdev);
  5150. }
  5151. /**
  5152. * ixgbe_io_error_detected - called when PCI error is detected
  5153. * @pdev: Pointer to PCI device
  5154. * @state: The current pci connection state
  5155. *
  5156. * This function is called after a PCI bus error affecting
  5157. * this device has been detected.
  5158. */
  5159. static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
  5160. pci_channel_state_t state)
  5161. {
  5162. struct net_device *netdev = pci_get_drvdata(pdev);
  5163. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5164. netif_device_detach(netdev);
  5165. if (state == pci_channel_io_perm_failure)
  5166. return PCI_ERS_RESULT_DISCONNECT;
  5167. if (netif_running(netdev))
  5168. ixgbe_down(adapter);
  5169. pci_disable_device(pdev);
  5170. /* Request a slot reset. */
  5171. return PCI_ERS_RESULT_NEED_RESET;
  5172. }
  5173. /**
  5174. * ixgbe_io_slot_reset - called after the pci bus has been reset.
  5175. * @pdev: Pointer to PCI device
  5176. *
  5177. * Restart the card from scratch, as if from a cold-boot.
  5178. */
  5179. static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
  5180. {
  5181. struct net_device *netdev = pci_get_drvdata(pdev);
  5182. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5183. pci_ers_result_t result;
  5184. int err;
  5185. if (pci_enable_device_mem(pdev)) {
  5186. DPRINTK(PROBE, ERR,
  5187. "Cannot re-enable PCI device after reset.\n");
  5188. result = PCI_ERS_RESULT_DISCONNECT;
  5189. } else {
  5190. pci_set_master(pdev);
  5191. pci_restore_state(pdev);
  5192. pci_save_state(pdev);
  5193. pci_wake_from_d3(pdev, false);
  5194. ixgbe_reset(adapter);
  5195. IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
  5196. result = PCI_ERS_RESULT_RECOVERED;
  5197. }
  5198. err = pci_cleanup_aer_uncorrect_error_status(pdev);
  5199. if (err) {
  5200. dev_err(&pdev->dev,
  5201. "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
  5202. /* non-fatal, continue */
  5203. }
  5204. return result;
  5205. }
  5206. /**
  5207. * ixgbe_io_resume - called when traffic can start flowing again.
  5208. * @pdev: Pointer to PCI device
  5209. *
  5210. * This callback is called when the error recovery driver tells us that
  5211. * its OK to resume normal operation.
  5212. */
  5213. static void ixgbe_io_resume(struct pci_dev *pdev)
  5214. {
  5215. struct net_device *netdev = pci_get_drvdata(pdev);
  5216. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5217. if (netif_running(netdev)) {
  5218. if (ixgbe_up(adapter)) {
  5219. DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
  5220. return;
  5221. }
  5222. }
  5223. netif_device_attach(netdev);
  5224. }
  5225. static struct pci_error_handlers ixgbe_err_handler = {
  5226. .error_detected = ixgbe_io_error_detected,
  5227. .slot_reset = ixgbe_io_slot_reset,
  5228. .resume = ixgbe_io_resume,
  5229. };
  5230. static struct pci_driver ixgbe_driver = {
  5231. .name = ixgbe_driver_name,
  5232. .id_table = ixgbe_pci_tbl,
  5233. .probe = ixgbe_probe,
  5234. .remove = __devexit_p(ixgbe_remove),
  5235. #ifdef CONFIG_PM
  5236. .suspend = ixgbe_suspend,
  5237. .resume = ixgbe_resume,
  5238. #endif
  5239. .shutdown = ixgbe_shutdown,
  5240. .err_handler = &ixgbe_err_handler
  5241. };
  5242. /**
  5243. * ixgbe_init_module - Driver Registration Routine
  5244. *
  5245. * ixgbe_init_module is the first routine called when the driver is
  5246. * loaded. All it does is register with the PCI subsystem.
  5247. **/
  5248. static int __init ixgbe_init_module(void)
  5249. {
  5250. int ret;
  5251. printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
  5252. ixgbe_driver_string, ixgbe_driver_version);
  5253. printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
  5254. #ifdef CONFIG_IXGBE_DCA
  5255. dca_register_notify(&dca_notifier);
  5256. #endif
  5257. ret = pci_register_driver(&ixgbe_driver);
  5258. return ret;
  5259. }
  5260. module_init(ixgbe_init_module);
  5261. /**
  5262. * ixgbe_exit_module - Driver Exit Cleanup Routine
  5263. *
  5264. * ixgbe_exit_module is called just before the driver is removed
  5265. * from memory.
  5266. **/
  5267. static void __exit ixgbe_exit_module(void)
  5268. {
  5269. #ifdef CONFIG_IXGBE_DCA
  5270. dca_unregister_notify(&dca_notifier);
  5271. #endif
  5272. pci_unregister_driver(&ixgbe_driver);
  5273. }
  5274. #ifdef CONFIG_IXGBE_DCA
  5275. static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
  5276. void *p)
  5277. {
  5278. int ret_val;
  5279. ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
  5280. __ixgbe_notify_dca);
  5281. return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
  5282. }
  5283. #endif /* CONFIG_IXGBE_DCA */
  5284. #ifdef DEBUG
  5285. /**
  5286. * ixgbe_get_hw_dev_name - return device name string
  5287. * used by hardware layer to print debugging information
  5288. **/
  5289. char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
  5290. {
  5291. struct ixgbe_adapter *adapter = hw->back;
  5292. return adapter->netdev->name;
  5293. }
  5294. #endif
  5295. module_exit(ixgbe_exit_module);
  5296. /* ixgbe_main.c */