ixgbe_main.c 189 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602
  1. /*******************************************************************************
  2. Intel 10 Gigabit PCI Express Linux driver
  3. Copyright(c) 1999 - 2010 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  18. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  19. *******************************************************************************/
  20. #include <linux/types.h>
  21. #include <linux/module.h>
  22. #include <linux/pci.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/string.h>
  26. #include <linux/in.h>
  27. #include <linux/ip.h>
  28. #include <linux/tcp.h>
  29. #include <linux/pkt_sched.h>
  30. #include <linux/ipv6.h>
  31. #include <net/checksum.h>
  32. #include <net/ip6_checksum.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/if_vlan.h>
  35. #include <scsi/fc/fc_fcoe.h>
  36. #include "ixgbe.h"
  37. #include "ixgbe_common.h"
  38. #include "ixgbe_dcb_82599.h"
  39. #include "ixgbe_sriov.h"
  40. char ixgbe_driver_name[] = "ixgbe";
  41. static const char ixgbe_driver_string[] =
  42. "Intel(R) 10 Gigabit PCI Express Network Driver";
  43. #define DRV_VERSION "2.0.62-k2"
  44. const char ixgbe_driver_version[] = DRV_VERSION;
  45. static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
  46. static const struct ixgbe_info *ixgbe_info_tbl[] = {
  47. [board_82598] = &ixgbe_82598_info,
  48. [board_82599] = &ixgbe_82599_info,
  49. };
  50. /* ixgbe_pci_tbl - PCI Device ID Table
  51. *
  52. * Wildcard entries (PCI_ANY_ID) should come last
  53. * Last entry must be all 0s
  54. *
  55. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  56. * Class, Class Mask, private data (not used) }
  57. */
  58. static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
  59. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
  60. board_82598 },
  61. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
  62. board_82598 },
  63. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
  64. board_82598 },
  65. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
  66. board_82598 },
  67. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
  68. board_82598 },
  69. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
  70. board_82598 },
  71. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
  72. board_82598 },
  73. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
  74. board_82598 },
  75. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
  76. board_82598 },
  77. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
  78. board_82598 },
  79. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
  80. board_82598 },
  81. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
  82. board_82598 },
  83. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
  84. board_82599 },
  85. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
  86. board_82599 },
  87. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
  88. board_82599 },
  89. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
  90. board_82599 },
  91. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
  92. board_82599 },
  93. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
  94. board_82599 },
  95. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
  96. board_82599 },
  97. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
  98. board_82599 },
  99. /* required last entry */
  100. {0, }
  101. };
  102. MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
  103. #ifdef CONFIG_IXGBE_DCA
  104. static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
  105. void *p);
  106. static struct notifier_block dca_notifier = {
  107. .notifier_call = ixgbe_notify_dca,
  108. .next = NULL,
  109. .priority = 0
  110. };
  111. #endif
  112. #ifdef CONFIG_PCI_IOV
  113. static unsigned int max_vfs;
  114. module_param(max_vfs, uint, 0);
  115. MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
  116. "per physical function");
  117. #endif /* CONFIG_PCI_IOV */
  118. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  119. MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
  120. MODULE_LICENSE("GPL");
  121. MODULE_VERSION(DRV_VERSION);
  122. #define DEFAULT_DEBUG_LEVEL_SHIFT 3
  123. static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
  124. {
  125. struct ixgbe_hw *hw = &adapter->hw;
  126. u32 gcr;
  127. u32 gpie;
  128. u32 vmdctl;
  129. #ifdef CONFIG_PCI_IOV
  130. /* disable iov and allow time for transactions to clear */
  131. pci_disable_sriov(adapter->pdev);
  132. #endif
  133. /* turn off device IOV mode */
  134. gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
  135. gcr &= ~(IXGBE_GCR_EXT_SRIOV);
  136. IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
  137. gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
  138. gpie &= ~IXGBE_GPIE_VTMODE_MASK;
  139. IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
  140. /* set default pool back to 0 */
  141. vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
  142. vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
  143. IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
  144. /* take a breather then clean up driver data */
  145. msleep(100);
  146. if (adapter->vfinfo)
  147. kfree(adapter->vfinfo);
  148. adapter->vfinfo = NULL;
  149. adapter->num_vfs = 0;
  150. adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
  151. }
  152. static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
  153. {
  154. u32 ctrl_ext;
  155. /* Let firmware take over control of h/w */
  156. ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
  157. IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
  158. ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
  159. }
  160. static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
  161. {
  162. u32 ctrl_ext;
  163. /* Let firmware know the driver has taken over */
  164. ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
  165. IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
  166. ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
  167. }
  168. /*
  169. * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
  170. * @adapter: pointer to adapter struct
  171. * @direction: 0 for Rx, 1 for Tx, -1 for other causes
  172. * @queue: queue to map the corresponding interrupt to
  173. * @msix_vector: the vector to map to the corresponding queue
  174. *
  175. */
  176. static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
  177. u8 queue, u8 msix_vector)
  178. {
  179. u32 ivar, index;
  180. struct ixgbe_hw *hw = &adapter->hw;
  181. switch (hw->mac.type) {
  182. case ixgbe_mac_82598EB:
  183. msix_vector |= IXGBE_IVAR_ALLOC_VAL;
  184. if (direction == -1)
  185. direction = 0;
  186. index = (((direction * 64) + queue) >> 2) & 0x1F;
  187. ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
  188. ivar &= ~(0xFF << (8 * (queue & 0x3)));
  189. ivar |= (msix_vector << (8 * (queue & 0x3)));
  190. IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
  191. break;
  192. case ixgbe_mac_82599EB:
  193. if (direction == -1) {
  194. /* other causes */
  195. msix_vector |= IXGBE_IVAR_ALLOC_VAL;
  196. index = ((queue & 1) * 8);
  197. ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
  198. ivar &= ~(0xFF << index);
  199. ivar |= (msix_vector << index);
  200. IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
  201. break;
  202. } else {
  203. /* tx or rx causes */
  204. msix_vector |= IXGBE_IVAR_ALLOC_VAL;
  205. index = ((16 * (queue & 1)) + (8 * direction));
  206. ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
  207. ivar &= ~(0xFF << index);
  208. ivar |= (msix_vector << index);
  209. IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
  210. break;
  211. }
  212. default:
  213. break;
  214. }
  215. }
  216. static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
  217. u64 qmask)
  218. {
  219. u32 mask;
  220. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  221. mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
  222. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
  223. } else {
  224. mask = (qmask & 0xFFFFFFFF);
  225. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
  226. mask = (qmask >> 32);
  227. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
  228. }
  229. }
  230. static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
  231. struct ixgbe_tx_buffer
  232. *tx_buffer_info)
  233. {
  234. if (tx_buffer_info->dma) {
  235. if (tx_buffer_info->mapped_as_page)
  236. pci_unmap_page(adapter->pdev,
  237. tx_buffer_info->dma,
  238. tx_buffer_info->length,
  239. PCI_DMA_TODEVICE);
  240. else
  241. pci_unmap_single(adapter->pdev,
  242. tx_buffer_info->dma,
  243. tx_buffer_info->length,
  244. PCI_DMA_TODEVICE);
  245. tx_buffer_info->dma = 0;
  246. }
  247. if (tx_buffer_info->skb) {
  248. dev_kfree_skb_any(tx_buffer_info->skb);
  249. tx_buffer_info->skb = NULL;
  250. }
  251. tx_buffer_info->time_stamp = 0;
  252. /* tx_buffer_info must be completely set up in the transmit path */
  253. }
  254. /**
  255. * ixgbe_tx_is_paused - check if the tx ring is paused
  256. * @adapter: the ixgbe adapter
  257. * @tx_ring: the corresponding tx_ring
  258. *
  259. * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
  260. * corresponding TC of this tx_ring when checking TFCS.
  261. *
  262. * Returns : true if paused
  263. */
  264. static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
  265. struct ixgbe_ring *tx_ring)
  266. {
  267. u32 txoff = IXGBE_TFCS_TXOFF;
  268. #ifdef CONFIG_IXGBE_DCB
  269. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  270. int tc;
  271. int reg_idx = tx_ring->reg_idx;
  272. int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
  273. switch (adapter->hw.mac.type) {
  274. case ixgbe_mac_82598EB:
  275. tc = reg_idx >> 2;
  276. txoff = IXGBE_TFCS_TXOFF0;
  277. break;
  278. case ixgbe_mac_82599EB:
  279. tc = 0;
  280. txoff = IXGBE_TFCS_TXOFF;
  281. if (dcb_i == 8) {
  282. /* TC0, TC1 */
  283. tc = reg_idx >> 5;
  284. if (tc == 2) /* TC2, TC3 */
  285. tc += (reg_idx - 64) >> 4;
  286. else if (tc == 3) /* TC4, TC5, TC6, TC7 */
  287. tc += 1 + ((reg_idx - 96) >> 3);
  288. } else if (dcb_i == 4) {
  289. /* TC0, TC1 */
  290. tc = reg_idx >> 6;
  291. if (tc == 1) {
  292. tc += (reg_idx - 64) >> 5;
  293. if (tc == 2) /* TC2, TC3 */
  294. tc += (reg_idx - 96) >> 4;
  295. }
  296. }
  297. break;
  298. default:
  299. tc = 0;
  300. }
  301. txoff <<= tc;
  302. }
  303. #endif
  304. return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
  305. }
  306. static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
  307. struct ixgbe_ring *tx_ring,
  308. unsigned int eop)
  309. {
  310. struct ixgbe_hw *hw = &adapter->hw;
  311. /* Detect a transmit hang in hardware, this serializes the
  312. * check with the clearing of time_stamp and movement of eop */
  313. adapter->detect_tx_hung = false;
  314. if (tx_ring->tx_buffer_info[eop].time_stamp &&
  315. time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
  316. !ixgbe_tx_is_paused(adapter, tx_ring)) {
  317. /* detected Tx unit hang */
  318. union ixgbe_adv_tx_desc *tx_desc;
  319. tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
  320. DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
  321. " Tx Queue <%d>\n"
  322. " TDH, TDT <%x>, <%x>\n"
  323. " next_to_use <%x>\n"
  324. " next_to_clean <%x>\n"
  325. "tx_buffer_info[next_to_clean]\n"
  326. " time_stamp <%lx>\n"
  327. " jiffies <%lx>\n",
  328. tx_ring->queue_index,
  329. IXGBE_READ_REG(hw, tx_ring->head),
  330. IXGBE_READ_REG(hw, tx_ring->tail),
  331. tx_ring->next_to_use, eop,
  332. tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
  333. return true;
  334. }
  335. return false;
  336. }
  337. #define IXGBE_MAX_TXD_PWR 14
  338. #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
  339. /* Tx Descriptors needed, worst case */
  340. #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
  341. (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
  342. #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
  343. MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
  344. static void ixgbe_tx_timeout(struct net_device *netdev);
  345. /**
  346. * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
  347. * @q_vector: structure containing interrupt and ring information
  348. * @tx_ring: tx ring to clean
  349. **/
  350. static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
  351. struct ixgbe_ring *tx_ring)
  352. {
  353. struct ixgbe_adapter *adapter = q_vector->adapter;
  354. struct net_device *netdev = adapter->netdev;
  355. union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
  356. struct ixgbe_tx_buffer *tx_buffer_info;
  357. unsigned int i, eop, count = 0;
  358. unsigned int total_bytes = 0, total_packets = 0;
  359. i = tx_ring->next_to_clean;
  360. eop = tx_ring->tx_buffer_info[i].next_to_watch;
  361. eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
  362. while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
  363. (count < tx_ring->work_limit)) {
  364. bool cleaned = false;
  365. for ( ; !cleaned; count++) {
  366. struct sk_buff *skb;
  367. tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
  368. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  369. cleaned = (i == eop);
  370. skb = tx_buffer_info->skb;
  371. if (cleaned && skb) {
  372. unsigned int segs, bytecount;
  373. unsigned int hlen = skb_headlen(skb);
  374. /* gso_segs is currently only valid for tcp */
  375. segs = skb_shinfo(skb)->gso_segs ?: 1;
  376. #ifdef IXGBE_FCOE
  377. /* adjust for FCoE Sequence Offload */
  378. if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
  379. && (skb->protocol == htons(ETH_P_FCOE)) &&
  380. skb_is_gso(skb)) {
  381. hlen = skb_transport_offset(skb) +
  382. sizeof(struct fc_frame_header) +
  383. sizeof(struct fcoe_crc_eof);
  384. segs = DIV_ROUND_UP(skb->len - hlen,
  385. skb_shinfo(skb)->gso_size);
  386. }
  387. #endif /* IXGBE_FCOE */
  388. /* multiply data chunks by size of headers */
  389. bytecount = ((segs - 1) * hlen) + skb->len;
  390. total_packets += segs;
  391. total_bytes += bytecount;
  392. }
  393. ixgbe_unmap_and_free_tx_resource(adapter,
  394. tx_buffer_info);
  395. tx_desc->wb.status = 0;
  396. i++;
  397. if (i == tx_ring->count)
  398. i = 0;
  399. }
  400. eop = tx_ring->tx_buffer_info[i].next_to_watch;
  401. eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
  402. }
  403. tx_ring->next_to_clean = i;
  404. #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
  405. if (unlikely(count && netif_carrier_ok(netdev) &&
  406. (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
  407. /* Make sure that anybody stopping the queue after this
  408. * sees the new next_to_clean.
  409. */
  410. smp_mb();
  411. if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
  412. !test_bit(__IXGBE_DOWN, &adapter->state)) {
  413. netif_wake_subqueue(netdev, tx_ring->queue_index);
  414. ++tx_ring->restart_queue;
  415. }
  416. }
  417. if (adapter->detect_tx_hung) {
  418. if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
  419. /* schedule immediate reset if we believe we hung */
  420. DPRINTK(PROBE, INFO,
  421. "tx hang %d detected, resetting adapter\n",
  422. adapter->tx_timeout_count + 1);
  423. ixgbe_tx_timeout(adapter->netdev);
  424. }
  425. }
  426. /* re-arm the interrupt */
  427. if (count >= tx_ring->work_limit)
  428. ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
  429. tx_ring->total_bytes += total_bytes;
  430. tx_ring->total_packets += total_packets;
  431. tx_ring->stats.packets += total_packets;
  432. tx_ring->stats.bytes += total_bytes;
  433. return (count < tx_ring->work_limit);
  434. }
  435. #ifdef CONFIG_IXGBE_DCA
  436. static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
  437. struct ixgbe_ring *rx_ring)
  438. {
  439. u32 rxctrl;
  440. int cpu = get_cpu();
  441. int q = rx_ring->reg_idx;
  442. if (rx_ring->cpu != cpu) {
  443. rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
  444. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  445. rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
  446. rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
  447. } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  448. rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
  449. rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
  450. IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
  451. }
  452. rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
  453. rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
  454. rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
  455. rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
  456. IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
  457. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
  458. rx_ring->cpu = cpu;
  459. }
  460. put_cpu();
  461. }
  462. static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
  463. struct ixgbe_ring *tx_ring)
  464. {
  465. u32 txctrl;
  466. int cpu = get_cpu();
  467. int q = tx_ring->reg_idx;
  468. struct ixgbe_hw *hw = &adapter->hw;
  469. if (tx_ring->cpu != cpu) {
  470. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  471. txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
  472. txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
  473. txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
  474. txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
  475. IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
  476. } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  477. txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
  478. txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
  479. txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
  480. IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
  481. txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
  482. IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
  483. }
  484. tx_ring->cpu = cpu;
  485. }
  486. put_cpu();
  487. }
  488. static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
  489. {
  490. int i;
  491. if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
  492. return;
  493. /* always use CB2 mode, difference is masked in the CB driver */
  494. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
  495. for (i = 0; i < adapter->num_tx_queues; i++) {
  496. adapter->tx_ring[i]->cpu = -1;
  497. ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
  498. }
  499. for (i = 0; i < adapter->num_rx_queues; i++) {
  500. adapter->rx_ring[i]->cpu = -1;
  501. ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
  502. }
  503. }
  504. static int __ixgbe_notify_dca(struct device *dev, void *data)
  505. {
  506. struct net_device *netdev = dev_get_drvdata(dev);
  507. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  508. unsigned long event = *(unsigned long *)data;
  509. switch (event) {
  510. case DCA_PROVIDER_ADD:
  511. /* if we're already enabled, don't do it again */
  512. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  513. break;
  514. if (dca_add_requester(dev) == 0) {
  515. adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
  516. ixgbe_setup_dca(adapter);
  517. break;
  518. }
  519. /* Fall Through since DCA is disabled. */
  520. case DCA_PROVIDER_REMOVE:
  521. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
  522. dca_remove_requester(dev);
  523. adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
  524. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
  525. }
  526. break;
  527. }
  528. return 0;
  529. }
  530. #endif /* CONFIG_IXGBE_DCA */
  531. /**
  532. * ixgbe_receive_skb - Send a completed packet up the stack
  533. * @adapter: board private structure
  534. * @skb: packet to send up
  535. * @status: hardware indication of status of receive
  536. * @rx_ring: rx descriptor ring (for a specific queue) to setup
  537. * @rx_desc: rx descriptor
  538. **/
  539. static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
  540. struct sk_buff *skb, u8 status,
  541. struct ixgbe_ring *ring,
  542. union ixgbe_adv_rx_desc *rx_desc)
  543. {
  544. struct ixgbe_adapter *adapter = q_vector->adapter;
  545. struct napi_struct *napi = &q_vector->napi;
  546. bool is_vlan = (status & IXGBE_RXD_STAT_VP);
  547. u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
  548. skb_record_rx_queue(skb, ring->queue_index);
  549. if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
  550. if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
  551. vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
  552. else
  553. napi_gro_receive(napi, skb);
  554. } else {
  555. if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
  556. vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
  557. else
  558. netif_rx(skb);
  559. }
  560. }
  561. /**
  562. * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
  563. * @adapter: address of board private structure
  564. * @status_err: hardware indication of status of receive
  565. * @skb: skb currently being received and modified
  566. **/
  567. static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
  568. union ixgbe_adv_rx_desc *rx_desc,
  569. struct sk_buff *skb)
  570. {
  571. u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
  572. skb->ip_summed = CHECKSUM_NONE;
  573. /* Rx csum disabled */
  574. if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
  575. return;
  576. /* if IP and error */
  577. if ((status_err & IXGBE_RXD_STAT_IPCS) &&
  578. (status_err & IXGBE_RXDADV_ERR_IPE)) {
  579. adapter->hw_csum_rx_error++;
  580. return;
  581. }
  582. if (!(status_err & IXGBE_RXD_STAT_L4CS))
  583. return;
  584. if (status_err & IXGBE_RXDADV_ERR_TCPE) {
  585. u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
  586. /*
  587. * 82599 errata, UDP frames with a 0 checksum can be marked as
  588. * checksum errors.
  589. */
  590. if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
  591. (adapter->hw.mac.type == ixgbe_mac_82599EB))
  592. return;
  593. adapter->hw_csum_rx_error++;
  594. return;
  595. }
  596. /* It must be a TCP or UDP packet with a valid checksum */
  597. skb->ip_summed = CHECKSUM_UNNECESSARY;
  598. }
  599. static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
  600. struct ixgbe_ring *rx_ring, u32 val)
  601. {
  602. /*
  603. * Force memory writes to complete before letting h/w
  604. * know there are new descriptors to fetch. (Only
  605. * applicable for weak-ordered memory model archs,
  606. * such as IA-64).
  607. */
  608. wmb();
  609. IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
  610. }
  611. /**
  612. * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
  613. * @adapter: address of board private structure
  614. **/
  615. static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
  616. struct ixgbe_ring *rx_ring,
  617. int cleaned_count)
  618. {
  619. struct pci_dev *pdev = adapter->pdev;
  620. union ixgbe_adv_rx_desc *rx_desc;
  621. struct ixgbe_rx_buffer *bi;
  622. unsigned int i;
  623. i = rx_ring->next_to_use;
  624. bi = &rx_ring->rx_buffer_info[i];
  625. while (cleaned_count--) {
  626. rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
  627. if (!bi->page_dma &&
  628. (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
  629. if (!bi->page) {
  630. bi->page = alloc_page(GFP_ATOMIC);
  631. if (!bi->page) {
  632. adapter->alloc_rx_page_failed++;
  633. goto no_buffers;
  634. }
  635. bi->page_offset = 0;
  636. } else {
  637. /* use a half page if we're re-using */
  638. bi->page_offset ^= (PAGE_SIZE / 2);
  639. }
  640. bi->page_dma = pci_map_page(pdev, bi->page,
  641. bi->page_offset,
  642. (PAGE_SIZE / 2),
  643. PCI_DMA_FROMDEVICE);
  644. }
  645. if (!bi->skb) {
  646. struct sk_buff *skb;
  647. /* netdev_alloc_skb reserves 32 bytes up front!! */
  648. uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES;
  649. skb = netdev_alloc_skb(adapter->netdev, bufsz);
  650. if (!skb) {
  651. adapter->alloc_rx_buff_failed++;
  652. goto no_buffers;
  653. }
  654. /* advance the data pointer to the next cache line */
  655. skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES)
  656. - skb->data));
  657. bi->skb = skb;
  658. bi->dma = pci_map_single(pdev, skb->data,
  659. rx_ring->rx_buf_len,
  660. PCI_DMA_FROMDEVICE);
  661. }
  662. /* Refresh the desc even if buffer_addrs didn't change because
  663. * each write-back erases this info. */
  664. if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
  665. rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
  666. rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
  667. } else {
  668. rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
  669. }
  670. i++;
  671. if (i == rx_ring->count)
  672. i = 0;
  673. bi = &rx_ring->rx_buffer_info[i];
  674. }
  675. no_buffers:
  676. if (rx_ring->next_to_use != i) {
  677. rx_ring->next_to_use = i;
  678. if (i-- == 0)
  679. i = (rx_ring->count - 1);
  680. ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
  681. }
  682. }
  683. static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
  684. {
  685. return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
  686. }
  687. static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
  688. {
  689. return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
  690. }
  691. static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
  692. {
  693. return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
  694. IXGBE_RXDADV_RSCCNT_MASK) >>
  695. IXGBE_RXDADV_RSCCNT_SHIFT;
  696. }
  697. /**
  698. * ixgbe_transform_rsc_queue - change rsc queue into a full packet
  699. * @skb: pointer to the last skb in the rsc queue
  700. * @count: pointer to number of packets coalesced in this context
  701. *
  702. * This function changes a queue full of hw rsc buffers into a completed
  703. * packet. It uses the ->prev pointers to find the first packet and then
  704. * turns it into the frag list owner.
  705. **/
  706. static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
  707. u64 *count)
  708. {
  709. unsigned int frag_list_size = 0;
  710. while (skb->prev) {
  711. struct sk_buff *prev = skb->prev;
  712. frag_list_size += skb->len;
  713. skb->prev = NULL;
  714. skb = prev;
  715. *count += 1;
  716. }
  717. skb_shinfo(skb)->frag_list = skb->next;
  718. skb->next = NULL;
  719. skb->len += frag_list_size;
  720. skb->data_len += frag_list_size;
  721. skb->truesize += frag_list_size;
  722. return skb;
  723. }
  724. struct ixgbe_rsc_cb {
  725. dma_addr_t dma;
  726. };
  727. #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
  728. static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
  729. struct ixgbe_ring *rx_ring,
  730. int *work_done, int work_to_do)
  731. {
  732. struct ixgbe_adapter *adapter = q_vector->adapter;
  733. struct net_device *netdev = adapter->netdev;
  734. struct pci_dev *pdev = adapter->pdev;
  735. union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
  736. struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
  737. struct sk_buff *skb;
  738. unsigned int i, rsc_count = 0;
  739. u32 len, staterr;
  740. u16 hdr_info;
  741. bool cleaned = false;
  742. int cleaned_count = 0;
  743. unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  744. #ifdef IXGBE_FCOE
  745. int ddp_bytes = 0;
  746. #endif /* IXGBE_FCOE */
  747. i = rx_ring->next_to_clean;
  748. rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
  749. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  750. rx_buffer_info = &rx_ring->rx_buffer_info[i];
  751. while (staterr & IXGBE_RXD_STAT_DD) {
  752. u32 upper_len = 0;
  753. if (*work_done >= work_to_do)
  754. break;
  755. (*work_done)++;
  756. rmb(); /* read descriptor and rx_buffer_info after status DD */
  757. if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
  758. hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
  759. len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
  760. IXGBE_RXDADV_HDRBUFLEN_SHIFT;
  761. if (len > IXGBE_RX_HDR_SIZE)
  762. len = IXGBE_RX_HDR_SIZE;
  763. upper_len = le16_to_cpu(rx_desc->wb.upper.length);
  764. } else {
  765. len = le16_to_cpu(rx_desc->wb.upper.length);
  766. }
  767. cleaned = true;
  768. skb = rx_buffer_info->skb;
  769. prefetch(skb->data);
  770. rx_buffer_info->skb = NULL;
  771. if (rx_buffer_info->dma) {
  772. if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
  773. (!(staterr & IXGBE_RXD_STAT_EOP)) &&
  774. (!(skb->prev)))
  775. /*
  776. * When HWRSC is enabled, delay unmapping
  777. * of the first packet. It carries the
  778. * header information, HW may still
  779. * access the header after the writeback.
  780. * Only unmap it when EOP is reached
  781. */
  782. IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
  783. else
  784. pci_unmap_single(pdev, rx_buffer_info->dma,
  785. rx_ring->rx_buf_len,
  786. PCI_DMA_FROMDEVICE);
  787. rx_buffer_info->dma = 0;
  788. skb_put(skb, len);
  789. }
  790. if (upper_len) {
  791. pci_unmap_page(pdev, rx_buffer_info->page_dma,
  792. PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
  793. rx_buffer_info->page_dma = 0;
  794. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  795. rx_buffer_info->page,
  796. rx_buffer_info->page_offset,
  797. upper_len);
  798. if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
  799. (page_count(rx_buffer_info->page) != 1))
  800. rx_buffer_info->page = NULL;
  801. else
  802. get_page(rx_buffer_info->page);
  803. skb->len += upper_len;
  804. skb->data_len += upper_len;
  805. skb->truesize += upper_len;
  806. }
  807. i++;
  808. if (i == rx_ring->count)
  809. i = 0;
  810. next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
  811. prefetch(next_rxd);
  812. cleaned_count++;
  813. if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
  814. rsc_count = ixgbe_get_rsc_count(rx_desc);
  815. if (rsc_count) {
  816. u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
  817. IXGBE_RXDADV_NEXTP_SHIFT;
  818. next_buffer = &rx_ring->rx_buffer_info[nextp];
  819. } else {
  820. next_buffer = &rx_ring->rx_buffer_info[i];
  821. }
  822. if (staterr & IXGBE_RXD_STAT_EOP) {
  823. if (skb->prev)
  824. skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
  825. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
  826. if (IXGBE_RSC_CB(skb)->dma)
  827. pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma,
  828. rx_ring->rx_buf_len,
  829. PCI_DMA_FROMDEVICE);
  830. if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
  831. rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
  832. else
  833. rx_ring->rsc_count++;
  834. rx_ring->rsc_flush++;
  835. }
  836. rx_ring->stats.packets++;
  837. rx_ring->stats.bytes += skb->len;
  838. } else {
  839. if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
  840. rx_buffer_info->skb = next_buffer->skb;
  841. rx_buffer_info->dma = next_buffer->dma;
  842. next_buffer->skb = skb;
  843. next_buffer->dma = 0;
  844. } else {
  845. skb->next = next_buffer->skb;
  846. skb->next->prev = skb;
  847. }
  848. rx_ring->non_eop_descs++;
  849. goto next_desc;
  850. }
  851. if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
  852. dev_kfree_skb_irq(skb);
  853. goto next_desc;
  854. }
  855. ixgbe_rx_checksum(adapter, rx_desc, skb);
  856. /* probably a little skewed due to removing CRC */
  857. total_rx_bytes += skb->len;
  858. total_rx_packets++;
  859. skb->protocol = eth_type_trans(skb, adapter->netdev);
  860. #ifdef IXGBE_FCOE
  861. /* if ddp, not passing to ULD unless for FCP_RSP or error */
  862. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  863. ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
  864. if (!ddp_bytes)
  865. goto next_desc;
  866. }
  867. #endif /* IXGBE_FCOE */
  868. ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
  869. next_desc:
  870. rx_desc->wb.upper.status_error = 0;
  871. /* return some buffers to hardware, one at a time is too slow */
  872. if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
  873. ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
  874. cleaned_count = 0;
  875. }
  876. /* use prefetched values */
  877. rx_desc = next_rxd;
  878. rx_buffer_info = &rx_ring->rx_buffer_info[i];
  879. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  880. }
  881. rx_ring->next_to_clean = i;
  882. cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
  883. if (cleaned_count)
  884. ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
  885. #ifdef IXGBE_FCOE
  886. /* include DDPed FCoE data */
  887. if (ddp_bytes > 0) {
  888. unsigned int mss;
  889. mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
  890. sizeof(struct fc_frame_header) -
  891. sizeof(struct fcoe_crc_eof);
  892. if (mss > 512)
  893. mss &= ~511;
  894. total_rx_bytes += ddp_bytes;
  895. total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
  896. }
  897. #endif /* IXGBE_FCOE */
  898. rx_ring->total_packets += total_rx_packets;
  899. rx_ring->total_bytes += total_rx_bytes;
  900. netdev->stats.rx_bytes += total_rx_bytes;
  901. netdev->stats.rx_packets += total_rx_packets;
  902. return cleaned;
  903. }
  904. static int ixgbe_clean_rxonly(struct napi_struct *, int);
  905. /**
  906. * ixgbe_configure_msix - Configure MSI-X hardware
  907. * @adapter: board private structure
  908. *
  909. * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
  910. * interrupts.
  911. **/
  912. static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
  913. {
  914. struct ixgbe_q_vector *q_vector;
  915. int i, j, q_vectors, v_idx, r_idx;
  916. u32 mask;
  917. q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  918. /*
  919. * Populate the IVAR table and set the ITR values to the
  920. * corresponding register.
  921. */
  922. for (v_idx = 0; v_idx < q_vectors; v_idx++) {
  923. q_vector = adapter->q_vector[v_idx];
  924. /* XXX for_each_set_bit(...) */
  925. r_idx = find_first_bit(q_vector->rxr_idx,
  926. adapter->num_rx_queues);
  927. for (i = 0; i < q_vector->rxr_count; i++) {
  928. j = adapter->rx_ring[r_idx]->reg_idx;
  929. ixgbe_set_ivar(adapter, 0, j, v_idx);
  930. r_idx = find_next_bit(q_vector->rxr_idx,
  931. adapter->num_rx_queues,
  932. r_idx + 1);
  933. }
  934. r_idx = find_first_bit(q_vector->txr_idx,
  935. adapter->num_tx_queues);
  936. for (i = 0; i < q_vector->txr_count; i++) {
  937. j = adapter->tx_ring[r_idx]->reg_idx;
  938. ixgbe_set_ivar(adapter, 1, j, v_idx);
  939. r_idx = find_next_bit(q_vector->txr_idx,
  940. adapter->num_tx_queues,
  941. r_idx + 1);
  942. }
  943. if (q_vector->txr_count && !q_vector->rxr_count)
  944. /* tx only */
  945. q_vector->eitr = adapter->tx_eitr_param;
  946. else if (q_vector->rxr_count)
  947. /* rx or mixed */
  948. q_vector->eitr = adapter->rx_eitr_param;
  949. ixgbe_write_eitr(q_vector);
  950. }
  951. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  952. ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
  953. v_idx);
  954. else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
  955. ixgbe_set_ivar(adapter, -1, 1, v_idx);
  956. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
  957. /* set up to autoclear timer, and the vectors */
  958. mask = IXGBE_EIMS_ENABLE_MASK;
  959. if (adapter->num_vfs)
  960. mask &= ~(IXGBE_EIMS_OTHER |
  961. IXGBE_EIMS_MAILBOX |
  962. IXGBE_EIMS_LSC);
  963. else
  964. mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
  965. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
  966. }
  967. enum latency_range {
  968. lowest_latency = 0,
  969. low_latency = 1,
  970. bulk_latency = 2,
  971. latency_invalid = 255
  972. };
  973. /**
  974. * ixgbe_update_itr - update the dynamic ITR value based on statistics
  975. * @adapter: pointer to adapter
  976. * @eitr: eitr setting (ints per sec) to give last timeslice
  977. * @itr_setting: current throttle rate in ints/second
  978. * @packets: the number of packets during this measurement interval
  979. * @bytes: the number of bytes during this measurement interval
  980. *
  981. * Stores a new ITR value based on packets and byte
  982. * counts during the last interrupt. The advantage of per interrupt
  983. * computation is faster updates and more accurate ITR for the current
  984. * traffic pattern. Constants in this function were computed
  985. * based on theoretical maximum wire speed and thresholds were set based
  986. * on testing data as well as attempting to minimize response time
  987. * while increasing bulk throughput.
  988. * this functionality is controlled by the InterruptThrottleRate module
  989. * parameter (see ixgbe_param.c)
  990. **/
  991. static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
  992. u32 eitr, u8 itr_setting,
  993. int packets, int bytes)
  994. {
  995. unsigned int retval = itr_setting;
  996. u32 timepassed_us;
  997. u64 bytes_perint;
  998. if (packets == 0)
  999. goto update_itr_done;
  1000. /* simple throttlerate management
  1001. * 0-20MB/s lowest (100000 ints/s)
  1002. * 20-100MB/s low (20000 ints/s)
  1003. * 100-1249MB/s bulk (8000 ints/s)
  1004. */
  1005. /* what was last interrupt timeslice? */
  1006. timepassed_us = 1000000/eitr;
  1007. bytes_perint = bytes / timepassed_us; /* bytes/usec */
  1008. switch (itr_setting) {
  1009. case lowest_latency:
  1010. if (bytes_perint > adapter->eitr_low)
  1011. retval = low_latency;
  1012. break;
  1013. case low_latency:
  1014. if (bytes_perint > adapter->eitr_high)
  1015. retval = bulk_latency;
  1016. else if (bytes_perint <= adapter->eitr_low)
  1017. retval = lowest_latency;
  1018. break;
  1019. case bulk_latency:
  1020. if (bytes_perint <= adapter->eitr_high)
  1021. retval = low_latency;
  1022. break;
  1023. }
  1024. update_itr_done:
  1025. return retval;
  1026. }
  1027. /**
  1028. * ixgbe_write_eitr - write EITR register in hardware specific way
  1029. * @q_vector: structure containing interrupt and ring information
  1030. *
  1031. * This function is made to be called by ethtool and by the driver
  1032. * when it needs to update EITR registers at runtime. Hardware
  1033. * specific quirks/differences are taken care of here.
  1034. */
  1035. void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
  1036. {
  1037. struct ixgbe_adapter *adapter = q_vector->adapter;
  1038. struct ixgbe_hw *hw = &adapter->hw;
  1039. int v_idx = q_vector->v_idx;
  1040. u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
  1041. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  1042. /* must write high and low 16 bits to reset counter */
  1043. itr_reg |= (itr_reg << 16);
  1044. } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  1045. /*
  1046. * set the WDIS bit to not clear the timer bits and cause an
  1047. * immediate assertion of the interrupt
  1048. */
  1049. itr_reg |= IXGBE_EITR_CNT_WDIS;
  1050. }
  1051. IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
  1052. }
  1053. static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
  1054. {
  1055. struct ixgbe_adapter *adapter = q_vector->adapter;
  1056. u32 new_itr;
  1057. u8 current_itr, ret_itr;
  1058. int i, r_idx;
  1059. struct ixgbe_ring *rx_ring, *tx_ring;
  1060. r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
  1061. for (i = 0; i < q_vector->txr_count; i++) {
  1062. tx_ring = adapter->tx_ring[r_idx];
  1063. ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
  1064. q_vector->tx_itr,
  1065. tx_ring->total_packets,
  1066. tx_ring->total_bytes);
  1067. /* if the result for this queue would decrease interrupt
  1068. * rate for this vector then use that result */
  1069. q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
  1070. q_vector->tx_itr - 1 : ret_itr);
  1071. r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
  1072. r_idx + 1);
  1073. }
  1074. r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
  1075. for (i = 0; i < q_vector->rxr_count; i++) {
  1076. rx_ring = adapter->rx_ring[r_idx];
  1077. ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
  1078. q_vector->rx_itr,
  1079. rx_ring->total_packets,
  1080. rx_ring->total_bytes);
  1081. /* if the result for this queue would decrease interrupt
  1082. * rate for this vector then use that result */
  1083. q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
  1084. q_vector->rx_itr - 1 : ret_itr);
  1085. r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
  1086. r_idx + 1);
  1087. }
  1088. current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
  1089. switch (current_itr) {
  1090. /* counts and packets in update_itr are dependent on these numbers */
  1091. case lowest_latency:
  1092. new_itr = 100000;
  1093. break;
  1094. case low_latency:
  1095. new_itr = 20000; /* aka hwitr = ~200 */
  1096. break;
  1097. case bulk_latency:
  1098. default:
  1099. new_itr = 8000;
  1100. break;
  1101. }
  1102. if (new_itr != q_vector->eitr) {
  1103. /* do an exponential smoothing */
  1104. new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
  1105. /* save the algorithm value here, not the smoothed one */
  1106. q_vector->eitr = new_itr;
  1107. ixgbe_write_eitr(q_vector);
  1108. }
  1109. return;
  1110. }
  1111. static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
  1112. {
  1113. struct ixgbe_hw *hw = &adapter->hw;
  1114. if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
  1115. (eicr & IXGBE_EICR_GPI_SDP1)) {
  1116. DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
  1117. /* write to clear the interrupt */
  1118. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
  1119. }
  1120. }
  1121. static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
  1122. {
  1123. struct ixgbe_hw *hw = &adapter->hw;
  1124. if (eicr & IXGBE_EICR_GPI_SDP1) {
  1125. /* Clear the interrupt */
  1126. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
  1127. schedule_work(&adapter->multispeed_fiber_task);
  1128. } else if (eicr & IXGBE_EICR_GPI_SDP2) {
  1129. /* Clear the interrupt */
  1130. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
  1131. schedule_work(&adapter->sfp_config_module_task);
  1132. } else {
  1133. /* Interrupt isn't for us... */
  1134. return;
  1135. }
  1136. }
  1137. static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
  1138. {
  1139. struct ixgbe_hw *hw = &adapter->hw;
  1140. adapter->lsc_int++;
  1141. adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
  1142. adapter->link_check_timeout = jiffies;
  1143. if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
  1144. IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
  1145. IXGBE_WRITE_FLUSH(hw);
  1146. schedule_work(&adapter->watchdog_task);
  1147. }
  1148. }
  1149. static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
  1150. {
  1151. struct net_device *netdev = data;
  1152. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1153. struct ixgbe_hw *hw = &adapter->hw;
  1154. u32 eicr;
  1155. /*
  1156. * Workaround for Silicon errata. Use clear-by-write instead
  1157. * of clear-by-read. Reading with EICS will return the
  1158. * interrupt causes without clearing, which later be done
  1159. * with the write to EICR.
  1160. */
  1161. eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
  1162. IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
  1163. if (eicr & IXGBE_EICR_LSC)
  1164. ixgbe_check_lsc(adapter);
  1165. if (eicr & IXGBE_EICR_MAILBOX)
  1166. ixgbe_msg_task(adapter);
  1167. if (hw->mac.type == ixgbe_mac_82598EB)
  1168. ixgbe_check_fan_failure(adapter, eicr);
  1169. if (hw->mac.type == ixgbe_mac_82599EB) {
  1170. ixgbe_check_sfp_event(adapter, eicr);
  1171. /* Handle Flow Director Full threshold interrupt */
  1172. if (eicr & IXGBE_EICR_FLOW_DIR) {
  1173. int i;
  1174. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
  1175. /* Disable transmits before FDIR Re-initialization */
  1176. netif_tx_stop_all_queues(netdev);
  1177. for (i = 0; i < adapter->num_tx_queues; i++) {
  1178. struct ixgbe_ring *tx_ring =
  1179. adapter->tx_ring[i];
  1180. if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
  1181. &tx_ring->reinit_state))
  1182. schedule_work(&adapter->fdir_reinit_task);
  1183. }
  1184. }
  1185. }
  1186. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  1187. IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
  1188. return IRQ_HANDLED;
  1189. }
  1190. static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
  1191. u64 qmask)
  1192. {
  1193. u32 mask;
  1194. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  1195. mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
  1196. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
  1197. } else {
  1198. mask = (qmask & 0xFFFFFFFF);
  1199. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
  1200. mask = (qmask >> 32);
  1201. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
  1202. }
  1203. /* skip the flush */
  1204. }
  1205. static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
  1206. u64 qmask)
  1207. {
  1208. u32 mask;
  1209. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  1210. mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
  1211. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
  1212. } else {
  1213. mask = (qmask & 0xFFFFFFFF);
  1214. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
  1215. mask = (qmask >> 32);
  1216. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
  1217. }
  1218. /* skip the flush */
  1219. }
  1220. static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
  1221. {
  1222. struct ixgbe_q_vector *q_vector = data;
  1223. struct ixgbe_adapter *adapter = q_vector->adapter;
  1224. struct ixgbe_ring *tx_ring;
  1225. int i, r_idx;
  1226. if (!q_vector->txr_count)
  1227. return IRQ_HANDLED;
  1228. r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
  1229. for (i = 0; i < q_vector->txr_count; i++) {
  1230. tx_ring = adapter->tx_ring[r_idx];
  1231. tx_ring->total_bytes = 0;
  1232. tx_ring->total_packets = 0;
  1233. r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
  1234. r_idx + 1);
  1235. }
  1236. /* EIAM disabled interrupts (on this vector) for us */
  1237. napi_schedule(&q_vector->napi);
  1238. return IRQ_HANDLED;
  1239. }
  1240. /**
  1241. * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
  1242. * @irq: unused
  1243. * @data: pointer to our q_vector struct for this interrupt vector
  1244. **/
  1245. static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
  1246. {
  1247. struct ixgbe_q_vector *q_vector = data;
  1248. struct ixgbe_adapter *adapter = q_vector->adapter;
  1249. struct ixgbe_ring *rx_ring;
  1250. int r_idx;
  1251. int i;
  1252. r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
  1253. for (i = 0; i < q_vector->rxr_count; i++) {
  1254. rx_ring = adapter->rx_ring[r_idx];
  1255. rx_ring->total_bytes = 0;
  1256. rx_ring->total_packets = 0;
  1257. r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
  1258. r_idx + 1);
  1259. }
  1260. if (!q_vector->rxr_count)
  1261. return IRQ_HANDLED;
  1262. /* disable interrupts on this vector only */
  1263. /* EIAM disabled interrupts (on this vector) for us */
  1264. napi_schedule(&q_vector->napi);
  1265. return IRQ_HANDLED;
  1266. }
  1267. static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
  1268. {
  1269. struct ixgbe_q_vector *q_vector = data;
  1270. struct ixgbe_adapter *adapter = q_vector->adapter;
  1271. struct ixgbe_ring *ring;
  1272. int r_idx;
  1273. int i;
  1274. if (!q_vector->txr_count && !q_vector->rxr_count)
  1275. return IRQ_HANDLED;
  1276. r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
  1277. for (i = 0; i < q_vector->txr_count; i++) {
  1278. ring = adapter->tx_ring[r_idx];
  1279. ring->total_bytes = 0;
  1280. ring->total_packets = 0;
  1281. r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
  1282. r_idx + 1);
  1283. }
  1284. r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
  1285. for (i = 0; i < q_vector->rxr_count; i++) {
  1286. ring = adapter->rx_ring[r_idx];
  1287. ring->total_bytes = 0;
  1288. ring->total_packets = 0;
  1289. r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
  1290. r_idx + 1);
  1291. }
  1292. /* EIAM disabled interrupts (on this vector) for us */
  1293. napi_schedule(&q_vector->napi);
  1294. return IRQ_HANDLED;
  1295. }
  1296. /**
  1297. * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
  1298. * @napi: napi struct with our devices info in it
  1299. * @budget: amount of work driver is allowed to do this pass, in packets
  1300. *
  1301. * This function is optimized for cleaning one queue only on a single
  1302. * q_vector!!!
  1303. **/
  1304. static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
  1305. {
  1306. struct ixgbe_q_vector *q_vector =
  1307. container_of(napi, struct ixgbe_q_vector, napi);
  1308. struct ixgbe_adapter *adapter = q_vector->adapter;
  1309. struct ixgbe_ring *rx_ring = NULL;
  1310. int work_done = 0;
  1311. long r_idx;
  1312. r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
  1313. rx_ring = adapter->rx_ring[r_idx];
  1314. #ifdef CONFIG_IXGBE_DCA
  1315. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1316. ixgbe_update_rx_dca(adapter, rx_ring);
  1317. #endif
  1318. ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
  1319. /* If all Rx work done, exit the polling mode */
  1320. if (work_done < budget) {
  1321. napi_complete(napi);
  1322. if (adapter->rx_itr_setting & 1)
  1323. ixgbe_set_itr_msix(q_vector);
  1324. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  1325. ixgbe_irq_enable_queues(adapter,
  1326. ((u64)1 << q_vector->v_idx));
  1327. }
  1328. return work_done;
  1329. }
  1330. /**
  1331. * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
  1332. * @napi: napi struct with our devices info in it
  1333. * @budget: amount of work driver is allowed to do this pass, in packets
  1334. *
  1335. * This function will clean more than one rx queue associated with a
  1336. * q_vector.
  1337. **/
  1338. static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
  1339. {
  1340. struct ixgbe_q_vector *q_vector =
  1341. container_of(napi, struct ixgbe_q_vector, napi);
  1342. struct ixgbe_adapter *adapter = q_vector->adapter;
  1343. struct ixgbe_ring *ring = NULL;
  1344. int work_done = 0, i;
  1345. long r_idx;
  1346. bool tx_clean_complete = true;
  1347. r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
  1348. for (i = 0; i < q_vector->txr_count; i++) {
  1349. ring = adapter->tx_ring[r_idx];
  1350. #ifdef CONFIG_IXGBE_DCA
  1351. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1352. ixgbe_update_tx_dca(adapter, ring);
  1353. #endif
  1354. tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
  1355. r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
  1356. r_idx + 1);
  1357. }
  1358. /* attempt to distribute budget to each queue fairly, but don't allow
  1359. * the budget to go below 1 because we'll exit polling */
  1360. budget /= (q_vector->rxr_count ?: 1);
  1361. budget = max(budget, 1);
  1362. r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
  1363. for (i = 0; i < q_vector->rxr_count; i++) {
  1364. ring = adapter->rx_ring[r_idx];
  1365. #ifdef CONFIG_IXGBE_DCA
  1366. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1367. ixgbe_update_rx_dca(adapter, ring);
  1368. #endif
  1369. ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
  1370. r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
  1371. r_idx + 1);
  1372. }
  1373. r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
  1374. ring = adapter->rx_ring[r_idx];
  1375. /* If all Rx work done, exit the polling mode */
  1376. if (work_done < budget) {
  1377. napi_complete(napi);
  1378. if (adapter->rx_itr_setting & 1)
  1379. ixgbe_set_itr_msix(q_vector);
  1380. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  1381. ixgbe_irq_enable_queues(adapter,
  1382. ((u64)1 << q_vector->v_idx));
  1383. return 0;
  1384. }
  1385. return work_done;
  1386. }
  1387. /**
  1388. * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
  1389. * @napi: napi struct with our devices info in it
  1390. * @budget: amount of work driver is allowed to do this pass, in packets
  1391. *
  1392. * This function is optimized for cleaning one queue only on a single
  1393. * q_vector!!!
  1394. **/
  1395. static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
  1396. {
  1397. struct ixgbe_q_vector *q_vector =
  1398. container_of(napi, struct ixgbe_q_vector, napi);
  1399. struct ixgbe_adapter *adapter = q_vector->adapter;
  1400. struct ixgbe_ring *tx_ring = NULL;
  1401. int work_done = 0;
  1402. long r_idx;
  1403. r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
  1404. tx_ring = adapter->tx_ring[r_idx];
  1405. #ifdef CONFIG_IXGBE_DCA
  1406. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1407. ixgbe_update_tx_dca(adapter, tx_ring);
  1408. #endif
  1409. if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
  1410. work_done = budget;
  1411. /* If all Tx work done, exit the polling mode */
  1412. if (work_done < budget) {
  1413. napi_complete(napi);
  1414. if (adapter->tx_itr_setting & 1)
  1415. ixgbe_set_itr_msix(q_vector);
  1416. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  1417. ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
  1418. }
  1419. return work_done;
  1420. }
  1421. static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
  1422. int r_idx)
  1423. {
  1424. struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
  1425. set_bit(r_idx, q_vector->rxr_idx);
  1426. q_vector->rxr_count++;
  1427. }
  1428. static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
  1429. int t_idx)
  1430. {
  1431. struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
  1432. set_bit(t_idx, q_vector->txr_idx);
  1433. q_vector->txr_count++;
  1434. }
  1435. /**
  1436. * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
  1437. * @adapter: board private structure to initialize
  1438. * @vectors: allotted vector count for descriptor rings
  1439. *
  1440. * This function maps descriptor rings to the queue-specific vectors
  1441. * we were allotted through the MSI-X enabling code. Ideally, we'd have
  1442. * one vector per ring/queue, but on a constrained vector budget, we
  1443. * group the rings as "efficiently" as possible. You would add new
  1444. * mapping configurations in here.
  1445. **/
  1446. static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
  1447. int vectors)
  1448. {
  1449. int v_start = 0;
  1450. int rxr_idx = 0, txr_idx = 0;
  1451. int rxr_remaining = adapter->num_rx_queues;
  1452. int txr_remaining = adapter->num_tx_queues;
  1453. int i, j;
  1454. int rqpv, tqpv;
  1455. int err = 0;
  1456. /* No mapping required if MSI-X is disabled. */
  1457. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  1458. goto out;
  1459. /*
  1460. * The ideal configuration...
  1461. * We have enough vectors to map one per queue.
  1462. */
  1463. if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
  1464. for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
  1465. map_vector_to_rxq(adapter, v_start, rxr_idx);
  1466. for (; txr_idx < txr_remaining; v_start++, txr_idx++)
  1467. map_vector_to_txq(adapter, v_start, txr_idx);
  1468. goto out;
  1469. }
  1470. /*
  1471. * If we don't have enough vectors for a 1-to-1
  1472. * mapping, we'll have to group them so there are
  1473. * multiple queues per vector.
  1474. */
  1475. /* Re-adjusting *qpv takes care of the remainder. */
  1476. for (i = v_start; i < vectors; i++) {
  1477. rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
  1478. for (j = 0; j < rqpv; j++) {
  1479. map_vector_to_rxq(adapter, i, rxr_idx);
  1480. rxr_idx++;
  1481. rxr_remaining--;
  1482. }
  1483. }
  1484. for (i = v_start; i < vectors; i++) {
  1485. tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
  1486. for (j = 0; j < tqpv; j++) {
  1487. map_vector_to_txq(adapter, i, txr_idx);
  1488. txr_idx++;
  1489. txr_remaining--;
  1490. }
  1491. }
  1492. out:
  1493. return err;
  1494. }
  1495. /**
  1496. * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
  1497. * @adapter: board private structure
  1498. *
  1499. * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
  1500. * interrupts from the kernel.
  1501. **/
  1502. static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
  1503. {
  1504. struct net_device *netdev = adapter->netdev;
  1505. irqreturn_t (*handler)(int, void *);
  1506. int i, vector, q_vectors, err;
  1507. int ri=0, ti=0;
  1508. /* Decrement for Other and TCP Timer vectors */
  1509. q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  1510. /* Map the Tx/Rx rings to the vectors we were allotted. */
  1511. err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
  1512. if (err)
  1513. goto out;
  1514. #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
  1515. (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
  1516. &ixgbe_msix_clean_many)
  1517. for (vector = 0; vector < q_vectors; vector++) {
  1518. handler = SET_HANDLER(adapter->q_vector[vector]);
  1519. if(handler == &ixgbe_msix_clean_rx) {
  1520. sprintf(adapter->name[vector], "%s-%s-%d",
  1521. netdev->name, "rx", ri++);
  1522. }
  1523. else if(handler == &ixgbe_msix_clean_tx) {
  1524. sprintf(adapter->name[vector], "%s-%s-%d",
  1525. netdev->name, "tx", ti++);
  1526. }
  1527. else
  1528. sprintf(adapter->name[vector], "%s-%s-%d",
  1529. netdev->name, "TxRx", vector);
  1530. err = request_irq(adapter->msix_entries[vector].vector,
  1531. handler, 0, adapter->name[vector],
  1532. adapter->q_vector[vector]);
  1533. if (err) {
  1534. DPRINTK(PROBE, ERR,
  1535. "request_irq failed for MSIX interrupt "
  1536. "Error: %d\n", err);
  1537. goto free_queue_irqs;
  1538. }
  1539. }
  1540. sprintf(adapter->name[vector], "%s:lsc", netdev->name);
  1541. err = request_irq(adapter->msix_entries[vector].vector,
  1542. ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
  1543. if (err) {
  1544. DPRINTK(PROBE, ERR,
  1545. "request_irq for msix_lsc failed: %d\n", err);
  1546. goto free_queue_irqs;
  1547. }
  1548. return 0;
  1549. free_queue_irqs:
  1550. for (i = vector - 1; i >= 0; i--)
  1551. free_irq(adapter->msix_entries[--vector].vector,
  1552. adapter->q_vector[i]);
  1553. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  1554. pci_disable_msix(adapter->pdev);
  1555. kfree(adapter->msix_entries);
  1556. adapter->msix_entries = NULL;
  1557. out:
  1558. return err;
  1559. }
  1560. static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
  1561. {
  1562. struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
  1563. u8 current_itr;
  1564. u32 new_itr = q_vector->eitr;
  1565. struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
  1566. struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
  1567. q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
  1568. q_vector->tx_itr,
  1569. tx_ring->total_packets,
  1570. tx_ring->total_bytes);
  1571. q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
  1572. q_vector->rx_itr,
  1573. rx_ring->total_packets,
  1574. rx_ring->total_bytes);
  1575. current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
  1576. switch (current_itr) {
  1577. /* counts and packets in update_itr are dependent on these numbers */
  1578. case lowest_latency:
  1579. new_itr = 100000;
  1580. break;
  1581. case low_latency:
  1582. new_itr = 20000; /* aka hwitr = ~200 */
  1583. break;
  1584. case bulk_latency:
  1585. new_itr = 8000;
  1586. break;
  1587. default:
  1588. break;
  1589. }
  1590. if (new_itr != q_vector->eitr) {
  1591. /* do an exponential smoothing */
  1592. new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
  1593. /* save the algorithm value here, not the smoothed one */
  1594. q_vector->eitr = new_itr;
  1595. ixgbe_write_eitr(q_vector);
  1596. }
  1597. return;
  1598. }
  1599. /**
  1600. * ixgbe_irq_enable - Enable default interrupt generation settings
  1601. * @adapter: board private structure
  1602. **/
  1603. static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
  1604. {
  1605. u32 mask;
  1606. mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
  1607. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
  1608. mask |= IXGBE_EIMS_GPI_SDP1;
  1609. if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  1610. mask |= IXGBE_EIMS_ECC;
  1611. mask |= IXGBE_EIMS_GPI_SDP1;
  1612. mask |= IXGBE_EIMS_GPI_SDP2;
  1613. if (adapter->num_vfs)
  1614. mask |= IXGBE_EIMS_MAILBOX;
  1615. }
  1616. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
  1617. adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
  1618. mask |= IXGBE_EIMS_FLOW_DIR;
  1619. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
  1620. ixgbe_irq_enable_queues(adapter, ~0);
  1621. IXGBE_WRITE_FLUSH(&adapter->hw);
  1622. if (adapter->num_vfs > 32) {
  1623. u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
  1624. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
  1625. }
  1626. }
  1627. /**
  1628. * ixgbe_intr - legacy mode Interrupt Handler
  1629. * @irq: interrupt number
  1630. * @data: pointer to a network interface device structure
  1631. **/
  1632. static irqreturn_t ixgbe_intr(int irq, void *data)
  1633. {
  1634. struct net_device *netdev = data;
  1635. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1636. struct ixgbe_hw *hw = &adapter->hw;
  1637. struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
  1638. u32 eicr;
  1639. /*
  1640. * Workaround for silicon errata. Mask the interrupts
  1641. * before the read of EICR.
  1642. */
  1643. IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
  1644. /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
  1645. * therefore no explict interrupt disable is necessary */
  1646. eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
  1647. if (!eicr) {
  1648. /* shared interrupt alert!
  1649. * make sure interrupts are enabled because the read will
  1650. * have disabled interrupts due to EIAM */
  1651. ixgbe_irq_enable(adapter);
  1652. return IRQ_NONE; /* Not our interrupt */
  1653. }
  1654. if (eicr & IXGBE_EICR_LSC)
  1655. ixgbe_check_lsc(adapter);
  1656. if (hw->mac.type == ixgbe_mac_82599EB)
  1657. ixgbe_check_sfp_event(adapter, eicr);
  1658. ixgbe_check_fan_failure(adapter, eicr);
  1659. if (napi_schedule_prep(&(q_vector->napi))) {
  1660. adapter->tx_ring[0]->total_packets = 0;
  1661. adapter->tx_ring[0]->total_bytes = 0;
  1662. adapter->rx_ring[0]->total_packets = 0;
  1663. adapter->rx_ring[0]->total_bytes = 0;
  1664. /* would disable interrupts here but EIAM disabled it */
  1665. __napi_schedule(&(q_vector->napi));
  1666. }
  1667. return IRQ_HANDLED;
  1668. }
  1669. static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
  1670. {
  1671. int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  1672. for (i = 0; i < q_vectors; i++) {
  1673. struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
  1674. bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
  1675. bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
  1676. q_vector->rxr_count = 0;
  1677. q_vector->txr_count = 0;
  1678. }
  1679. }
  1680. /**
  1681. * ixgbe_request_irq - initialize interrupts
  1682. * @adapter: board private structure
  1683. *
  1684. * Attempts to configure interrupts using the best available
  1685. * capabilities of the hardware and kernel.
  1686. **/
  1687. static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
  1688. {
  1689. struct net_device *netdev = adapter->netdev;
  1690. int err;
  1691. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  1692. err = ixgbe_request_msix_irqs(adapter);
  1693. } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
  1694. err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
  1695. netdev->name, netdev);
  1696. } else {
  1697. err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
  1698. netdev->name, netdev);
  1699. }
  1700. if (err)
  1701. DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
  1702. return err;
  1703. }
  1704. static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
  1705. {
  1706. struct net_device *netdev = adapter->netdev;
  1707. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  1708. int i, q_vectors;
  1709. q_vectors = adapter->num_msix_vectors;
  1710. i = q_vectors - 1;
  1711. free_irq(adapter->msix_entries[i].vector, netdev);
  1712. i--;
  1713. for (; i >= 0; i--) {
  1714. free_irq(adapter->msix_entries[i].vector,
  1715. adapter->q_vector[i]);
  1716. }
  1717. ixgbe_reset_q_vectors(adapter);
  1718. } else {
  1719. free_irq(adapter->pdev->irq, netdev);
  1720. }
  1721. }
  1722. /**
  1723. * ixgbe_irq_disable - Mask off interrupt generation on the NIC
  1724. * @adapter: board private structure
  1725. **/
  1726. static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
  1727. {
  1728. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  1729. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
  1730. } else {
  1731. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
  1732. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
  1733. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
  1734. if (adapter->num_vfs > 32)
  1735. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
  1736. }
  1737. IXGBE_WRITE_FLUSH(&adapter->hw);
  1738. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  1739. int i;
  1740. for (i = 0; i < adapter->num_msix_vectors; i++)
  1741. synchronize_irq(adapter->msix_entries[i].vector);
  1742. } else {
  1743. synchronize_irq(adapter->pdev->irq);
  1744. }
  1745. }
  1746. /**
  1747. * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
  1748. *
  1749. **/
  1750. static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
  1751. {
  1752. struct ixgbe_hw *hw = &adapter->hw;
  1753. IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
  1754. EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
  1755. ixgbe_set_ivar(adapter, 0, 0, 0);
  1756. ixgbe_set_ivar(adapter, 1, 0, 0);
  1757. map_vector_to_rxq(adapter, 0, 0);
  1758. map_vector_to_txq(adapter, 0, 0);
  1759. DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
  1760. }
  1761. /**
  1762. * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
  1763. * @adapter: board private structure
  1764. *
  1765. * Configure the Tx unit of the MAC after a reset.
  1766. **/
  1767. static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
  1768. {
  1769. u64 tdba;
  1770. struct ixgbe_hw *hw = &adapter->hw;
  1771. u32 i, j, tdlen, txctrl;
  1772. /* Setup the HW Tx Head and Tail descriptor pointers */
  1773. for (i = 0; i < adapter->num_tx_queues; i++) {
  1774. struct ixgbe_ring *ring = adapter->tx_ring[i];
  1775. j = ring->reg_idx;
  1776. tdba = ring->dma;
  1777. tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
  1778. IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
  1779. (tdba & DMA_BIT_MASK(32)));
  1780. IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
  1781. IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
  1782. IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
  1783. IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
  1784. adapter->tx_ring[i]->head = IXGBE_TDH(j);
  1785. adapter->tx_ring[i]->tail = IXGBE_TDT(j);
  1786. /*
  1787. * Disable Tx Head Writeback RO bit, since this hoses
  1788. * bookkeeping if things aren't delivered in order.
  1789. */
  1790. switch (hw->mac.type) {
  1791. case ixgbe_mac_82598EB:
  1792. txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
  1793. break;
  1794. case ixgbe_mac_82599EB:
  1795. default:
  1796. txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
  1797. break;
  1798. }
  1799. txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
  1800. switch (hw->mac.type) {
  1801. case ixgbe_mac_82598EB:
  1802. IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
  1803. break;
  1804. case ixgbe_mac_82599EB:
  1805. default:
  1806. IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
  1807. break;
  1808. }
  1809. }
  1810. if (hw->mac.type == ixgbe_mac_82599EB) {
  1811. u32 rttdcs;
  1812. u32 mask;
  1813. /* disable the arbiter while setting MTQC */
  1814. rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
  1815. rttdcs |= IXGBE_RTTDCS_ARBDIS;
  1816. IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
  1817. /* set transmit pool layout */
  1818. mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
  1819. switch (adapter->flags & mask) {
  1820. case (IXGBE_FLAG_SRIOV_ENABLED):
  1821. IXGBE_WRITE_REG(hw, IXGBE_MTQC,
  1822. (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
  1823. break;
  1824. case (IXGBE_FLAG_DCB_ENABLED):
  1825. /* We enable 8 traffic classes, DCB only */
  1826. IXGBE_WRITE_REG(hw, IXGBE_MTQC,
  1827. (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
  1828. break;
  1829. default:
  1830. IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
  1831. break;
  1832. }
  1833. /* re-eable the arbiter */
  1834. rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
  1835. IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
  1836. }
  1837. }
  1838. #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
  1839. static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
  1840. struct ixgbe_ring *rx_ring)
  1841. {
  1842. u32 srrctl;
  1843. int index;
  1844. struct ixgbe_ring_feature *feature = adapter->ring_feature;
  1845. index = rx_ring->reg_idx;
  1846. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  1847. unsigned long mask;
  1848. mask = (unsigned long) feature[RING_F_RSS].mask;
  1849. index = index & mask;
  1850. }
  1851. srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
  1852. srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
  1853. srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
  1854. srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
  1855. IXGBE_SRRCTL_BSIZEHDR_MASK;
  1856. if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
  1857. #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
  1858. srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
  1859. #else
  1860. srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
  1861. #endif
  1862. srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
  1863. } else {
  1864. srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
  1865. IXGBE_SRRCTL_BSIZEPKT_SHIFT;
  1866. srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
  1867. }
  1868. IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
  1869. }
  1870. static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
  1871. {
  1872. u32 mrqc = 0;
  1873. int mask;
  1874. if (!(adapter->hw.mac.type == ixgbe_mac_82599EB))
  1875. return mrqc;
  1876. mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
  1877. #ifdef CONFIG_IXGBE_DCB
  1878. | IXGBE_FLAG_DCB_ENABLED
  1879. #endif
  1880. | IXGBE_FLAG_SRIOV_ENABLED
  1881. );
  1882. switch (mask) {
  1883. case (IXGBE_FLAG_RSS_ENABLED):
  1884. mrqc = IXGBE_MRQC_RSSEN;
  1885. break;
  1886. case (IXGBE_FLAG_SRIOV_ENABLED):
  1887. mrqc = IXGBE_MRQC_VMDQEN;
  1888. break;
  1889. #ifdef CONFIG_IXGBE_DCB
  1890. case (IXGBE_FLAG_DCB_ENABLED):
  1891. mrqc = IXGBE_MRQC_RT8TCEN;
  1892. break;
  1893. #endif /* CONFIG_IXGBE_DCB */
  1894. default:
  1895. break;
  1896. }
  1897. return mrqc;
  1898. }
  1899. /**
  1900. * ixgbe_configure_rscctl - enable RSC for the indicated ring
  1901. * @adapter: address of board private structure
  1902. * @index: index of ring to set
  1903. **/
  1904. static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
  1905. {
  1906. struct ixgbe_ring *rx_ring;
  1907. struct ixgbe_hw *hw = &adapter->hw;
  1908. int j;
  1909. u32 rscctrl;
  1910. int rx_buf_len;
  1911. rx_ring = adapter->rx_ring[index];
  1912. j = rx_ring->reg_idx;
  1913. rx_buf_len = rx_ring->rx_buf_len;
  1914. rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
  1915. rscctrl |= IXGBE_RSCCTL_RSCEN;
  1916. /*
  1917. * we must limit the number of descriptors so that the
  1918. * total size of max desc * buf_len is not greater
  1919. * than 65535
  1920. */
  1921. if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
  1922. #if (MAX_SKB_FRAGS > 16)
  1923. rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
  1924. #elif (MAX_SKB_FRAGS > 8)
  1925. rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
  1926. #elif (MAX_SKB_FRAGS > 4)
  1927. rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
  1928. #else
  1929. rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
  1930. #endif
  1931. } else {
  1932. if (rx_buf_len < IXGBE_RXBUFFER_4096)
  1933. rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
  1934. else if (rx_buf_len < IXGBE_RXBUFFER_8192)
  1935. rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
  1936. else
  1937. rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
  1938. }
  1939. IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
  1940. }
  1941. /**
  1942. * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
  1943. * @adapter: board private structure
  1944. *
  1945. * Configure the Rx unit of the MAC after a reset.
  1946. **/
  1947. static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
  1948. {
  1949. u64 rdba;
  1950. struct ixgbe_hw *hw = &adapter->hw;
  1951. struct ixgbe_ring *rx_ring;
  1952. struct net_device *netdev = adapter->netdev;
  1953. int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
  1954. int i, j;
  1955. u32 rdlen, rxctrl, rxcsum;
  1956. static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
  1957. 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
  1958. 0x6A3E67EA, 0x14364D17, 0x3BED200D};
  1959. u32 fctrl, hlreg0;
  1960. u32 reta = 0, mrqc = 0;
  1961. u32 rdrxctl;
  1962. int rx_buf_len;
  1963. /* Decide whether to use packet split mode or not */
  1964. /* Do not use packet split if we're in SR-IOV Mode */
  1965. if (!adapter->num_vfs)
  1966. adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
  1967. /* Set the RX buffer length according to the mode */
  1968. if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
  1969. rx_buf_len = IXGBE_RX_HDR_SIZE;
  1970. if (hw->mac.type == ixgbe_mac_82599EB) {
  1971. /* PSRTYPE must be initialized in 82599 */
  1972. u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
  1973. IXGBE_PSRTYPE_UDPHDR |
  1974. IXGBE_PSRTYPE_IPV4HDR |
  1975. IXGBE_PSRTYPE_IPV6HDR |
  1976. IXGBE_PSRTYPE_L2HDR;
  1977. IXGBE_WRITE_REG(hw,
  1978. IXGBE_PSRTYPE(adapter->num_vfs),
  1979. psrtype);
  1980. }
  1981. } else {
  1982. if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
  1983. (netdev->mtu <= ETH_DATA_LEN))
  1984. rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
  1985. else
  1986. rx_buf_len = ALIGN(max_frame, 1024);
  1987. }
  1988. fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
  1989. fctrl |= IXGBE_FCTRL_BAM;
  1990. fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
  1991. fctrl |= IXGBE_FCTRL_PMCF;
  1992. IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
  1993. hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  1994. if (adapter->netdev->mtu <= ETH_DATA_LEN)
  1995. hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
  1996. else
  1997. hlreg0 |= IXGBE_HLREG0_JUMBOEN;
  1998. #ifdef IXGBE_FCOE
  1999. if (netdev->features & NETIF_F_FCOE_MTU)
  2000. hlreg0 |= IXGBE_HLREG0_JUMBOEN;
  2001. #endif
  2002. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
  2003. rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
  2004. /* disable receives while setting up the descriptors */
  2005. rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  2006. IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
  2007. /*
  2008. * Setup the HW Rx Head and Tail Descriptor Pointers and
  2009. * the Base and Length of the Rx Descriptor Ring
  2010. */
  2011. for (i = 0; i < adapter->num_rx_queues; i++) {
  2012. rx_ring = adapter->rx_ring[i];
  2013. rdba = rx_ring->dma;
  2014. j = rx_ring->reg_idx;
  2015. IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
  2016. IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
  2017. IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
  2018. IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
  2019. IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
  2020. rx_ring->head = IXGBE_RDH(j);
  2021. rx_ring->tail = IXGBE_RDT(j);
  2022. rx_ring->rx_buf_len = rx_buf_len;
  2023. if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
  2024. rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
  2025. else
  2026. rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
  2027. #ifdef IXGBE_FCOE
  2028. if (netdev->features & NETIF_F_FCOE_MTU) {
  2029. struct ixgbe_ring_feature *f;
  2030. f = &adapter->ring_feature[RING_F_FCOE];
  2031. if ((i >= f->mask) && (i < f->mask + f->indices)) {
  2032. rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
  2033. if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
  2034. rx_ring->rx_buf_len =
  2035. IXGBE_FCOE_JUMBO_FRAME_SIZE;
  2036. }
  2037. }
  2038. #endif /* IXGBE_FCOE */
  2039. ixgbe_configure_srrctl(adapter, rx_ring);
  2040. }
  2041. if (hw->mac.type == ixgbe_mac_82598EB) {
  2042. /*
  2043. * For VMDq support of different descriptor types or
  2044. * buffer sizes through the use of multiple SRRCTL
  2045. * registers, RDRXCTL.MVMEN must be set to 1
  2046. *
  2047. * also, the manual doesn't mention it clearly but DCA hints
  2048. * will only use queue 0's tags unless this bit is set. Side
  2049. * effects of setting this bit are only that SRRCTL must be
  2050. * fully programmed [0..15]
  2051. */
  2052. rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
  2053. rdrxctl |= IXGBE_RDRXCTL_MVMEN;
  2054. IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
  2055. }
  2056. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  2057. u32 vt_reg_bits;
  2058. u32 reg_offset, vf_shift;
  2059. u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
  2060. vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
  2061. | IXGBE_VT_CTL_REPLEN;
  2062. vt_reg_bits |= (adapter->num_vfs <<
  2063. IXGBE_VT_CTL_POOL_SHIFT);
  2064. IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
  2065. IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
  2066. vf_shift = adapter->num_vfs % 32;
  2067. reg_offset = adapter->num_vfs / 32;
  2068. IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
  2069. IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
  2070. IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
  2071. IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
  2072. /* Enable only the PF's pool for Tx/Rx */
  2073. IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
  2074. IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
  2075. IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
  2076. ixgbe_set_vmolr(hw, adapter->num_vfs);
  2077. }
  2078. /* Program MRQC for the distribution of queues */
  2079. mrqc = ixgbe_setup_mrqc(adapter);
  2080. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
  2081. /* Fill out redirection table */
  2082. for (i = 0, j = 0; i < 128; i++, j++) {
  2083. if (j == adapter->ring_feature[RING_F_RSS].indices)
  2084. j = 0;
  2085. /* reta = 4-byte sliding window of
  2086. * 0x00..(indices-1)(indices-1)00..etc. */
  2087. reta = (reta << 8) | (j * 0x11);
  2088. if ((i & 3) == 3)
  2089. IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
  2090. }
  2091. /* Fill out hash function seeds */
  2092. for (i = 0; i < 10; i++)
  2093. IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
  2094. if (hw->mac.type == ixgbe_mac_82598EB)
  2095. mrqc |= IXGBE_MRQC_RSSEN;
  2096. /* Perform hash on these packet types */
  2097. mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
  2098. | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
  2099. | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
  2100. | IXGBE_MRQC_RSS_FIELD_IPV6
  2101. | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
  2102. | IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
  2103. }
  2104. IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
  2105. if (adapter->num_vfs) {
  2106. u32 reg;
  2107. /* Map PF MAC address in RAR Entry 0 to first pool
  2108. * following VFs */
  2109. hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
  2110. /* Set up VF register offsets for selected VT Mode, i.e.
  2111. * 64 VFs for SR-IOV */
  2112. reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
  2113. reg |= IXGBE_GCR_EXT_SRIOV;
  2114. IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
  2115. }
  2116. rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
  2117. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
  2118. adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
  2119. /* Disable indicating checksum in descriptor, enables
  2120. * RSS hash */
  2121. rxcsum |= IXGBE_RXCSUM_PCSD;
  2122. }
  2123. if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
  2124. /* Enable IPv4 payload checksum for UDP fragments
  2125. * if PCSD is not set */
  2126. rxcsum |= IXGBE_RXCSUM_IPPCSE;
  2127. }
  2128. IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
  2129. if (hw->mac.type == ixgbe_mac_82599EB) {
  2130. rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
  2131. rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
  2132. rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
  2133. IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
  2134. }
  2135. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
  2136. /* Enable 82599 HW-RSC */
  2137. for (i = 0; i < adapter->num_rx_queues; i++)
  2138. ixgbe_configure_rscctl(adapter, i);
  2139. /* Disable RSC for ACK packets */
  2140. IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
  2141. (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
  2142. }
  2143. }
  2144. static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
  2145. {
  2146. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2147. struct ixgbe_hw *hw = &adapter->hw;
  2148. int pool_ndx = adapter->num_vfs;
  2149. /* add VID to filter table */
  2150. hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
  2151. }
  2152. static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
  2153. {
  2154. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2155. struct ixgbe_hw *hw = &adapter->hw;
  2156. int pool_ndx = adapter->num_vfs;
  2157. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2158. ixgbe_irq_disable(adapter);
  2159. vlan_group_set_device(adapter->vlgrp, vid, NULL);
  2160. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2161. ixgbe_irq_enable(adapter);
  2162. /* remove VID from filter table */
  2163. hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
  2164. }
  2165. static void ixgbe_vlan_rx_register(struct net_device *netdev,
  2166. struct vlan_group *grp)
  2167. {
  2168. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2169. u32 ctrl;
  2170. int i, j;
  2171. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2172. ixgbe_irq_disable(adapter);
  2173. adapter->vlgrp = grp;
  2174. /*
  2175. * For a DCB driver, always enable VLAN tag stripping so we can
  2176. * still receive traffic from a DCB-enabled host even if we're
  2177. * not in DCB mode.
  2178. */
  2179. ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
  2180. /* Disable CFI check */
  2181. ctrl &= ~IXGBE_VLNCTRL_CFIEN;
  2182. /* enable VLAN tag stripping */
  2183. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  2184. ctrl |= IXGBE_VLNCTRL_VME;
  2185. } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  2186. for (i = 0; i < adapter->num_rx_queues; i++) {
  2187. u32 ctrl;
  2188. j = adapter->rx_ring[i]->reg_idx;
  2189. ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
  2190. ctrl |= IXGBE_RXDCTL_VME;
  2191. IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
  2192. }
  2193. }
  2194. IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
  2195. ixgbe_vlan_rx_add_vid(netdev, 0);
  2196. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2197. ixgbe_irq_enable(adapter);
  2198. }
  2199. static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
  2200. {
  2201. ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
  2202. if (adapter->vlgrp) {
  2203. u16 vid;
  2204. for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
  2205. if (!vlan_group_get_device(adapter->vlgrp, vid))
  2206. continue;
  2207. ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
  2208. }
  2209. }
  2210. }
  2211. static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
  2212. {
  2213. struct dev_mc_list *mc_ptr;
  2214. u8 *addr = *mc_addr_ptr;
  2215. *vmdq = 0;
  2216. mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
  2217. if (mc_ptr->next)
  2218. *mc_addr_ptr = mc_ptr->next->dmi_addr;
  2219. else
  2220. *mc_addr_ptr = NULL;
  2221. return addr;
  2222. }
  2223. /**
  2224. * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
  2225. * @netdev: network interface device structure
  2226. *
  2227. * The set_rx_method entry point is called whenever the unicast/multicast
  2228. * address list or the network interface flags are updated. This routine is
  2229. * responsible for configuring the hardware for proper unicast, multicast and
  2230. * promiscuous mode.
  2231. **/
  2232. void ixgbe_set_rx_mode(struct net_device *netdev)
  2233. {
  2234. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2235. struct ixgbe_hw *hw = &adapter->hw;
  2236. u32 fctrl, vlnctrl;
  2237. u8 *addr_list = NULL;
  2238. int addr_count = 0;
  2239. /* Check for Promiscuous and All Multicast modes */
  2240. fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  2241. vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  2242. if (netdev->flags & IFF_PROMISC) {
  2243. hw->addr_ctrl.user_set_promisc = 1;
  2244. fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
  2245. vlnctrl &= ~IXGBE_VLNCTRL_VFE;
  2246. } else {
  2247. if (netdev->flags & IFF_ALLMULTI) {
  2248. fctrl |= IXGBE_FCTRL_MPE;
  2249. fctrl &= ~IXGBE_FCTRL_UPE;
  2250. } else {
  2251. fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
  2252. }
  2253. vlnctrl |= IXGBE_VLNCTRL_VFE;
  2254. hw->addr_ctrl.user_set_promisc = 0;
  2255. }
  2256. IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
  2257. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  2258. /* reprogram secondary unicast list */
  2259. hw->mac.ops.update_uc_addr_list(hw, netdev);
  2260. /* reprogram multicast list */
  2261. addr_count = netdev_mc_count(netdev);
  2262. if (addr_count)
  2263. addr_list = netdev->mc_list->dmi_addr;
  2264. hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
  2265. ixgbe_addr_list_itr);
  2266. if (adapter->num_vfs)
  2267. ixgbe_restore_vf_multicasts(adapter);
  2268. }
  2269. static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
  2270. {
  2271. int q_idx;
  2272. struct ixgbe_q_vector *q_vector;
  2273. int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  2274. /* legacy and MSI only use one vector */
  2275. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  2276. q_vectors = 1;
  2277. for (q_idx = 0; q_idx < q_vectors; q_idx++) {
  2278. struct napi_struct *napi;
  2279. q_vector = adapter->q_vector[q_idx];
  2280. napi = &q_vector->napi;
  2281. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  2282. if (!q_vector->rxr_count || !q_vector->txr_count) {
  2283. if (q_vector->txr_count == 1)
  2284. napi->poll = &ixgbe_clean_txonly;
  2285. else if (q_vector->rxr_count == 1)
  2286. napi->poll = &ixgbe_clean_rxonly;
  2287. }
  2288. }
  2289. napi_enable(napi);
  2290. }
  2291. }
  2292. static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
  2293. {
  2294. int q_idx;
  2295. struct ixgbe_q_vector *q_vector;
  2296. int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  2297. /* legacy and MSI only use one vector */
  2298. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  2299. q_vectors = 1;
  2300. for (q_idx = 0; q_idx < q_vectors; q_idx++) {
  2301. q_vector = adapter->q_vector[q_idx];
  2302. napi_disable(&q_vector->napi);
  2303. }
  2304. }
  2305. #ifdef CONFIG_IXGBE_DCB
  2306. /*
  2307. * ixgbe_configure_dcb - Configure DCB hardware
  2308. * @adapter: ixgbe adapter struct
  2309. *
  2310. * This is called by the driver on open to configure the DCB hardware.
  2311. * This is also called by the gennetlink interface when reconfiguring
  2312. * the DCB state.
  2313. */
  2314. static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
  2315. {
  2316. struct ixgbe_hw *hw = &adapter->hw;
  2317. u32 txdctl, vlnctrl;
  2318. int i, j;
  2319. ixgbe_dcb_check_config(&adapter->dcb_cfg);
  2320. ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
  2321. ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
  2322. /* reconfigure the hardware */
  2323. ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
  2324. for (i = 0; i < adapter->num_tx_queues; i++) {
  2325. j = adapter->tx_ring[i]->reg_idx;
  2326. txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
  2327. /* PThresh workaround for Tx hang with DFP enabled. */
  2328. txdctl |= 32;
  2329. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
  2330. }
  2331. /* Enable VLAN tag insert/strip */
  2332. vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  2333. if (hw->mac.type == ixgbe_mac_82598EB) {
  2334. vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
  2335. vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
  2336. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  2337. } else if (hw->mac.type == ixgbe_mac_82599EB) {
  2338. vlnctrl |= IXGBE_VLNCTRL_VFE;
  2339. vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
  2340. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  2341. for (i = 0; i < adapter->num_rx_queues; i++) {
  2342. j = adapter->rx_ring[i]->reg_idx;
  2343. vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
  2344. vlnctrl |= IXGBE_RXDCTL_VME;
  2345. IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
  2346. }
  2347. }
  2348. hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
  2349. }
  2350. #endif
  2351. static void ixgbe_configure(struct ixgbe_adapter *adapter)
  2352. {
  2353. struct net_device *netdev = adapter->netdev;
  2354. struct ixgbe_hw *hw = &adapter->hw;
  2355. int i;
  2356. ixgbe_set_rx_mode(netdev);
  2357. ixgbe_restore_vlan(adapter);
  2358. #ifdef CONFIG_IXGBE_DCB
  2359. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  2360. if (hw->mac.type == ixgbe_mac_82598EB)
  2361. netif_set_gso_max_size(netdev, 32768);
  2362. else
  2363. netif_set_gso_max_size(netdev, 65536);
  2364. ixgbe_configure_dcb(adapter);
  2365. } else {
  2366. netif_set_gso_max_size(netdev, 65536);
  2367. }
  2368. #else
  2369. netif_set_gso_max_size(netdev, 65536);
  2370. #endif
  2371. #ifdef IXGBE_FCOE
  2372. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
  2373. ixgbe_configure_fcoe(adapter);
  2374. #endif /* IXGBE_FCOE */
  2375. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
  2376. for (i = 0; i < adapter->num_tx_queues; i++)
  2377. adapter->tx_ring[i]->atr_sample_rate =
  2378. adapter->atr_sample_rate;
  2379. ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
  2380. } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
  2381. ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
  2382. }
  2383. ixgbe_configure_tx(adapter);
  2384. ixgbe_configure_rx(adapter);
  2385. for (i = 0; i < adapter->num_rx_queues; i++)
  2386. ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
  2387. (adapter->rx_ring[i]->count - 1));
  2388. }
  2389. static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
  2390. {
  2391. switch (hw->phy.type) {
  2392. case ixgbe_phy_sfp_avago:
  2393. case ixgbe_phy_sfp_ftl:
  2394. case ixgbe_phy_sfp_intel:
  2395. case ixgbe_phy_sfp_unknown:
  2396. case ixgbe_phy_tw_tyco:
  2397. case ixgbe_phy_tw_unknown:
  2398. return true;
  2399. default:
  2400. return false;
  2401. }
  2402. }
  2403. /**
  2404. * ixgbe_sfp_link_config - set up SFP+ link
  2405. * @adapter: pointer to private adapter struct
  2406. **/
  2407. static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
  2408. {
  2409. struct ixgbe_hw *hw = &adapter->hw;
  2410. if (hw->phy.multispeed_fiber) {
  2411. /*
  2412. * In multispeed fiber setups, the device may not have
  2413. * had a physical connection when the driver loaded.
  2414. * If that's the case, the initial link configuration
  2415. * couldn't get the MAC into 10G or 1G mode, so we'll
  2416. * never have a link status change interrupt fire.
  2417. * We need to try and force an autonegotiation
  2418. * session, then bring up link.
  2419. */
  2420. hw->mac.ops.setup_sfp(hw);
  2421. if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
  2422. schedule_work(&adapter->multispeed_fiber_task);
  2423. } else {
  2424. /*
  2425. * Direct Attach Cu and non-multispeed fiber modules
  2426. * still need to be configured properly prior to
  2427. * attempting link.
  2428. */
  2429. if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
  2430. schedule_work(&adapter->sfp_config_module_task);
  2431. }
  2432. }
  2433. /**
  2434. * ixgbe_non_sfp_link_config - set up non-SFP+ link
  2435. * @hw: pointer to private hardware struct
  2436. *
  2437. * Returns 0 on success, negative on failure
  2438. **/
  2439. static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
  2440. {
  2441. u32 autoneg;
  2442. bool negotiation, link_up = false;
  2443. u32 ret = IXGBE_ERR_LINK_SETUP;
  2444. if (hw->mac.ops.check_link)
  2445. ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
  2446. if (ret)
  2447. goto link_cfg_out;
  2448. if (hw->mac.ops.get_link_capabilities)
  2449. ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
  2450. if (ret)
  2451. goto link_cfg_out;
  2452. if (hw->mac.ops.setup_link)
  2453. ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
  2454. link_cfg_out:
  2455. return ret;
  2456. }
  2457. #define IXGBE_MAX_RX_DESC_POLL 10
  2458. static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
  2459. int rxr)
  2460. {
  2461. int j = adapter->rx_ring[rxr]->reg_idx;
  2462. int k;
  2463. for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
  2464. if (IXGBE_READ_REG(&adapter->hw,
  2465. IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
  2466. break;
  2467. else
  2468. msleep(1);
  2469. }
  2470. if (k >= IXGBE_MAX_RX_DESC_POLL) {
  2471. DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
  2472. "not set within the polling period\n", rxr);
  2473. }
  2474. ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
  2475. (adapter->rx_ring[rxr]->count - 1));
  2476. }
  2477. static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
  2478. {
  2479. struct net_device *netdev = adapter->netdev;
  2480. struct ixgbe_hw *hw = &adapter->hw;
  2481. int i, j = 0;
  2482. int num_rx_rings = adapter->num_rx_queues;
  2483. int err;
  2484. int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
  2485. u32 txdctl, rxdctl, mhadd;
  2486. u32 dmatxctl;
  2487. u32 gpie;
  2488. u32 ctrl_ext;
  2489. ixgbe_get_hw_control(adapter);
  2490. if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
  2491. (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
  2492. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  2493. gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
  2494. IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
  2495. } else {
  2496. /* MSI only */
  2497. gpie = 0;
  2498. }
  2499. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  2500. gpie &= ~IXGBE_GPIE_VTMODE_MASK;
  2501. gpie |= IXGBE_GPIE_VTMODE_64;
  2502. }
  2503. /* XXX: to interrupt immediately for EICS writes, enable this */
  2504. /* gpie |= IXGBE_GPIE_EIMEN; */
  2505. IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
  2506. }
  2507. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  2508. /*
  2509. * use EIAM to auto-mask when MSI-X interrupt is asserted
  2510. * this saves a register write for every interrupt
  2511. */
  2512. switch (hw->mac.type) {
  2513. case ixgbe_mac_82598EB:
  2514. IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
  2515. break;
  2516. default:
  2517. case ixgbe_mac_82599EB:
  2518. IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
  2519. IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
  2520. break;
  2521. }
  2522. } else {
  2523. /* legacy interrupts, use EIAM to auto-mask when reading EICR,
  2524. * specifically only auto mask tx and rx interrupts */
  2525. IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
  2526. }
  2527. /* Enable fan failure interrupt if media type is copper */
  2528. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
  2529. gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
  2530. gpie |= IXGBE_SDP1_GPIEN;
  2531. IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
  2532. }
  2533. if (hw->mac.type == ixgbe_mac_82599EB) {
  2534. gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
  2535. gpie |= IXGBE_SDP1_GPIEN;
  2536. gpie |= IXGBE_SDP2_GPIEN;
  2537. IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
  2538. }
  2539. #ifdef IXGBE_FCOE
  2540. /* adjust max frame to be able to do baby jumbo for FCoE */
  2541. if ((netdev->features & NETIF_F_FCOE_MTU) &&
  2542. (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
  2543. max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
  2544. #endif /* IXGBE_FCOE */
  2545. mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
  2546. if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
  2547. mhadd &= ~IXGBE_MHADD_MFS_MASK;
  2548. mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
  2549. IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
  2550. }
  2551. for (i = 0; i < adapter->num_tx_queues; i++) {
  2552. j = adapter->tx_ring[i]->reg_idx;
  2553. txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
  2554. /* enable WTHRESH=8 descriptors, to encourage burst writeback */
  2555. txdctl |= (8 << 16);
  2556. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
  2557. }
  2558. if (hw->mac.type == ixgbe_mac_82599EB) {
  2559. /* DMATXCTL.EN must be set after all Tx queue config is done */
  2560. dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
  2561. dmatxctl |= IXGBE_DMATXCTL_TE;
  2562. IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
  2563. }
  2564. for (i = 0; i < adapter->num_tx_queues; i++) {
  2565. j = adapter->tx_ring[i]->reg_idx;
  2566. txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
  2567. txdctl |= IXGBE_TXDCTL_ENABLE;
  2568. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
  2569. if (hw->mac.type == ixgbe_mac_82599EB) {
  2570. int wait_loop = 10;
  2571. /* poll for Tx Enable ready */
  2572. do {
  2573. msleep(1);
  2574. txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
  2575. } while (--wait_loop &&
  2576. !(txdctl & IXGBE_TXDCTL_ENABLE));
  2577. if (!wait_loop)
  2578. DPRINTK(DRV, ERR, "Could not enable "
  2579. "Tx Queue %d\n", j);
  2580. }
  2581. }
  2582. for (i = 0; i < num_rx_rings; i++) {
  2583. j = adapter->rx_ring[i]->reg_idx;
  2584. rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
  2585. /* enable PTHRESH=32 descriptors (half the internal cache)
  2586. * and HTHRESH=0 descriptors (to minimize latency on fetch),
  2587. * this also removes a pesky rx_no_buffer_count increment */
  2588. rxdctl |= 0x0020;
  2589. rxdctl |= IXGBE_RXDCTL_ENABLE;
  2590. IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
  2591. if (hw->mac.type == ixgbe_mac_82599EB)
  2592. ixgbe_rx_desc_queue_enable(adapter, i);
  2593. }
  2594. /* enable all receives */
  2595. rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  2596. if (hw->mac.type == ixgbe_mac_82598EB)
  2597. rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
  2598. else
  2599. rxdctl |= IXGBE_RXCTRL_RXEN;
  2600. hw->mac.ops.enable_rx_dma(hw, rxdctl);
  2601. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
  2602. ixgbe_configure_msix(adapter);
  2603. else
  2604. ixgbe_configure_msi_and_legacy(adapter);
  2605. clear_bit(__IXGBE_DOWN, &adapter->state);
  2606. ixgbe_napi_enable_all(adapter);
  2607. /* clear any pending interrupts, may auto mask */
  2608. IXGBE_READ_REG(hw, IXGBE_EICR);
  2609. ixgbe_irq_enable(adapter);
  2610. /*
  2611. * If this adapter has a fan, check to see if we had a failure
  2612. * before we enabled the interrupt.
  2613. */
  2614. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
  2615. u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  2616. if (esdp & IXGBE_ESDP_SDP1)
  2617. DPRINTK(DRV, CRIT,
  2618. "Fan has stopped, replace the adapter\n");
  2619. }
  2620. /*
  2621. * For hot-pluggable SFP+ devices, a new SFP+ module may have
  2622. * arrived before interrupts were enabled but after probe. Such
  2623. * devices wouldn't have their type identified yet. We need to
  2624. * kick off the SFP+ module setup first, then try to bring up link.
  2625. * If we're not hot-pluggable SFP+, we just need to configure link
  2626. * and bring it up.
  2627. */
  2628. if (hw->phy.type == ixgbe_phy_unknown) {
  2629. err = hw->phy.ops.identify(hw);
  2630. if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
  2631. /*
  2632. * Take the device down and schedule the sfp tasklet
  2633. * which will unregister_netdev and log it.
  2634. */
  2635. ixgbe_down(adapter);
  2636. schedule_work(&adapter->sfp_config_module_task);
  2637. return err;
  2638. }
  2639. }
  2640. if (ixgbe_is_sfp(hw)) {
  2641. ixgbe_sfp_link_config(adapter);
  2642. } else {
  2643. err = ixgbe_non_sfp_link_config(hw);
  2644. if (err)
  2645. DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
  2646. }
  2647. for (i = 0; i < adapter->num_tx_queues; i++)
  2648. set_bit(__IXGBE_FDIR_INIT_DONE,
  2649. &(adapter->tx_ring[i]->reinit_state));
  2650. /* enable transmits */
  2651. netif_tx_start_all_queues(netdev);
  2652. /* bring the link up in the watchdog, this could race with our first
  2653. * link up interrupt but shouldn't be a problem */
  2654. adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
  2655. adapter->link_check_timeout = jiffies;
  2656. mod_timer(&adapter->watchdog_timer, jiffies);
  2657. /* Set PF Reset Done bit so PF/VF Mail Ops can work */
  2658. ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
  2659. ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
  2660. IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
  2661. return 0;
  2662. }
  2663. void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
  2664. {
  2665. WARN_ON(in_interrupt());
  2666. while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
  2667. msleep(1);
  2668. ixgbe_down(adapter);
  2669. ixgbe_up(adapter);
  2670. clear_bit(__IXGBE_RESETTING, &adapter->state);
  2671. }
  2672. int ixgbe_up(struct ixgbe_adapter *adapter)
  2673. {
  2674. /* hardware has been reset, we need to reload some things */
  2675. ixgbe_configure(adapter);
  2676. return ixgbe_up_complete(adapter);
  2677. }
  2678. void ixgbe_reset(struct ixgbe_adapter *adapter)
  2679. {
  2680. struct ixgbe_hw *hw = &adapter->hw;
  2681. int err;
  2682. err = hw->mac.ops.init_hw(hw);
  2683. switch (err) {
  2684. case 0:
  2685. case IXGBE_ERR_SFP_NOT_PRESENT:
  2686. break;
  2687. case IXGBE_ERR_MASTER_REQUESTS_PENDING:
  2688. dev_err(&adapter->pdev->dev, "master disable timed out\n");
  2689. break;
  2690. case IXGBE_ERR_EEPROM_VERSION:
  2691. /* We are running on a pre-production device, log a warning */
  2692. dev_warn(&adapter->pdev->dev, "This device is a pre-production "
  2693. "adapter/LOM. Please be aware there may be issues "
  2694. "associated with your hardware. If you are "
  2695. "experiencing problems please contact your Intel or "
  2696. "hardware representative who provided you with this "
  2697. "hardware.\n");
  2698. break;
  2699. default:
  2700. dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
  2701. }
  2702. /* reprogram the RAR[0] in case user changed it. */
  2703. hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
  2704. IXGBE_RAH_AV);
  2705. }
  2706. /**
  2707. * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
  2708. * @adapter: board private structure
  2709. * @rx_ring: ring to free buffers from
  2710. **/
  2711. static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
  2712. struct ixgbe_ring *rx_ring)
  2713. {
  2714. struct pci_dev *pdev = adapter->pdev;
  2715. unsigned long size;
  2716. unsigned int i;
  2717. /* Free all the Rx ring sk_buffs */
  2718. for (i = 0; i < rx_ring->count; i++) {
  2719. struct ixgbe_rx_buffer *rx_buffer_info;
  2720. rx_buffer_info = &rx_ring->rx_buffer_info[i];
  2721. if (rx_buffer_info->dma) {
  2722. pci_unmap_single(pdev, rx_buffer_info->dma,
  2723. rx_ring->rx_buf_len,
  2724. PCI_DMA_FROMDEVICE);
  2725. rx_buffer_info->dma = 0;
  2726. }
  2727. if (rx_buffer_info->skb) {
  2728. struct sk_buff *skb = rx_buffer_info->skb;
  2729. rx_buffer_info->skb = NULL;
  2730. do {
  2731. struct sk_buff *this = skb;
  2732. if (IXGBE_RSC_CB(this)->dma)
  2733. pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma,
  2734. rx_ring->rx_buf_len,
  2735. PCI_DMA_FROMDEVICE);
  2736. skb = skb->prev;
  2737. dev_kfree_skb(this);
  2738. } while (skb);
  2739. }
  2740. if (!rx_buffer_info->page)
  2741. continue;
  2742. if (rx_buffer_info->page_dma) {
  2743. pci_unmap_page(pdev, rx_buffer_info->page_dma,
  2744. PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
  2745. rx_buffer_info->page_dma = 0;
  2746. }
  2747. put_page(rx_buffer_info->page);
  2748. rx_buffer_info->page = NULL;
  2749. rx_buffer_info->page_offset = 0;
  2750. }
  2751. size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
  2752. memset(rx_ring->rx_buffer_info, 0, size);
  2753. /* Zero out the descriptor ring */
  2754. memset(rx_ring->desc, 0, rx_ring->size);
  2755. rx_ring->next_to_clean = 0;
  2756. rx_ring->next_to_use = 0;
  2757. if (rx_ring->head)
  2758. writel(0, adapter->hw.hw_addr + rx_ring->head);
  2759. if (rx_ring->tail)
  2760. writel(0, adapter->hw.hw_addr + rx_ring->tail);
  2761. }
  2762. /**
  2763. * ixgbe_clean_tx_ring - Free Tx Buffers
  2764. * @adapter: board private structure
  2765. * @tx_ring: ring to be cleaned
  2766. **/
  2767. static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
  2768. struct ixgbe_ring *tx_ring)
  2769. {
  2770. struct ixgbe_tx_buffer *tx_buffer_info;
  2771. unsigned long size;
  2772. unsigned int i;
  2773. /* Free all the Tx ring sk_buffs */
  2774. for (i = 0; i < tx_ring->count; i++) {
  2775. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  2776. ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
  2777. }
  2778. size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
  2779. memset(tx_ring->tx_buffer_info, 0, size);
  2780. /* Zero out the descriptor ring */
  2781. memset(tx_ring->desc, 0, tx_ring->size);
  2782. tx_ring->next_to_use = 0;
  2783. tx_ring->next_to_clean = 0;
  2784. if (tx_ring->head)
  2785. writel(0, adapter->hw.hw_addr + tx_ring->head);
  2786. if (tx_ring->tail)
  2787. writel(0, adapter->hw.hw_addr + tx_ring->tail);
  2788. }
  2789. /**
  2790. * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
  2791. * @adapter: board private structure
  2792. **/
  2793. static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
  2794. {
  2795. int i;
  2796. for (i = 0; i < adapter->num_rx_queues; i++)
  2797. ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
  2798. }
  2799. /**
  2800. * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
  2801. * @adapter: board private structure
  2802. **/
  2803. static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
  2804. {
  2805. int i;
  2806. for (i = 0; i < adapter->num_tx_queues; i++)
  2807. ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
  2808. }
  2809. void ixgbe_down(struct ixgbe_adapter *adapter)
  2810. {
  2811. struct net_device *netdev = adapter->netdev;
  2812. struct ixgbe_hw *hw = &adapter->hw;
  2813. u32 rxctrl;
  2814. u32 txdctl;
  2815. int i, j;
  2816. /* signal that we are down to the interrupt handler */
  2817. set_bit(__IXGBE_DOWN, &adapter->state);
  2818. /* disable receive for all VFs and wait one second */
  2819. if (adapter->num_vfs) {
  2820. for (i = 0 ; i < adapter->num_vfs; i++)
  2821. adapter->vfinfo[i].clear_to_send = 0;
  2822. /* ping all the active vfs to let them know we are going down */
  2823. ixgbe_ping_all_vfs(adapter);
  2824. /* Disable all VFTE/VFRE TX/RX */
  2825. ixgbe_disable_tx_rx(adapter);
  2826. }
  2827. /* disable receives */
  2828. rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  2829. IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
  2830. netif_tx_disable(netdev);
  2831. IXGBE_WRITE_FLUSH(hw);
  2832. msleep(10);
  2833. netif_tx_stop_all_queues(netdev);
  2834. ixgbe_irq_disable(adapter);
  2835. ixgbe_napi_disable_all(adapter);
  2836. clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
  2837. del_timer_sync(&adapter->sfp_timer);
  2838. del_timer_sync(&adapter->watchdog_timer);
  2839. cancel_work_sync(&adapter->watchdog_task);
  2840. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
  2841. adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
  2842. cancel_work_sync(&adapter->fdir_reinit_task);
  2843. /* disable transmits in the hardware now that interrupts are off */
  2844. for (i = 0; i < adapter->num_tx_queues; i++) {
  2845. j = adapter->tx_ring[i]->reg_idx;
  2846. txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
  2847. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
  2848. (txdctl & ~IXGBE_TXDCTL_ENABLE));
  2849. }
  2850. /* Disable the Tx DMA engine on 82599 */
  2851. if (hw->mac.type == ixgbe_mac_82599EB)
  2852. IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
  2853. (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
  2854. ~IXGBE_DMATXCTL_TE));
  2855. netif_carrier_off(netdev);
  2856. /* clear n-tuple filters that are cached */
  2857. ethtool_ntuple_flush(netdev);
  2858. if (!pci_channel_offline(adapter->pdev))
  2859. ixgbe_reset(adapter);
  2860. ixgbe_clean_all_tx_rings(adapter);
  2861. ixgbe_clean_all_rx_rings(adapter);
  2862. #ifdef CONFIG_IXGBE_DCA
  2863. /* since we reset the hardware DCA settings were cleared */
  2864. ixgbe_setup_dca(adapter);
  2865. #endif
  2866. }
  2867. /**
  2868. * ixgbe_poll - NAPI Rx polling callback
  2869. * @napi: structure for representing this polling device
  2870. * @budget: how many packets driver is allowed to clean
  2871. *
  2872. * This function is used for legacy and MSI, NAPI mode
  2873. **/
  2874. static int ixgbe_poll(struct napi_struct *napi, int budget)
  2875. {
  2876. struct ixgbe_q_vector *q_vector =
  2877. container_of(napi, struct ixgbe_q_vector, napi);
  2878. struct ixgbe_adapter *adapter = q_vector->adapter;
  2879. int tx_clean_complete, work_done = 0;
  2880. #ifdef CONFIG_IXGBE_DCA
  2881. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
  2882. ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
  2883. ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
  2884. }
  2885. #endif
  2886. tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
  2887. ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
  2888. if (!tx_clean_complete)
  2889. work_done = budget;
  2890. /* If budget not fully consumed, exit the polling mode */
  2891. if (work_done < budget) {
  2892. napi_complete(napi);
  2893. if (adapter->rx_itr_setting & 1)
  2894. ixgbe_set_itr(adapter);
  2895. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2896. ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
  2897. }
  2898. return work_done;
  2899. }
  2900. /**
  2901. * ixgbe_tx_timeout - Respond to a Tx Hang
  2902. * @netdev: network interface device structure
  2903. **/
  2904. static void ixgbe_tx_timeout(struct net_device *netdev)
  2905. {
  2906. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2907. /* Do the reset outside of interrupt context */
  2908. schedule_work(&adapter->reset_task);
  2909. }
  2910. static void ixgbe_reset_task(struct work_struct *work)
  2911. {
  2912. struct ixgbe_adapter *adapter;
  2913. adapter = container_of(work, struct ixgbe_adapter, reset_task);
  2914. /* If we're already down or resetting, just bail */
  2915. if (test_bit(__IXGBE_DOWN, &adapter->state) ||
  2916. test_bit(__IXGBE_RESETTING, &adapter->state))
  2917. return;
  2918. adapter->tx_timeout_count++;
  2919. ixgbe_reinit_locked(adapter);
  2920. }
  2921. #ifdef CONFIG_IXGBE_DCB
  2922. static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
  2923. {
  2924. bool ret = false;
  2925. struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
  2926. if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
  2927. return ret;
  2928. f->mask = 0x7 << 3;
  2929. adapter->num_rx_queues = f->indices;
  2930. adapter->num_tx_queues = f->indices;
  2931. ret = true;
  2932. return ret;
  2933. }
  2934. #endif
  2935. /**
  2936. * ixgbe_set_rss_queues: Allocate queues for RSS
  2937. * @adapter: board private structure to initialize
  2938. *
  2939. * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
  2940. * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
  2941. *
  2942. **/
  2943. static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
  2944. {
  2945. bool ret = false;
  2946. struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
  2947. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
  2948. f->mask = 0xF;
  2949. adapter->num_rx_queues = f->indices;
  2950. adapter->num_tx_queues = f->indices;
  2951. ret = true;
  2952. } else {
  2953. ret = false;
  2954. }
  2955. return ret;
  2956. }
  2957. /**
  2958. * ixgbe_set_fdir_queues: Allocate queues for Flow Director
  2959. * @adapter: board private structure to initialize
  2960. *
  2961. * Flow Director is an advanced Rx filter, attempting to get Rx flows back
  2962. * to the original CPU that initiated the Tx session. This runs in addition
  2963. * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
  2964. * Rx load across CPUs using RSS.
  2965. *
  2966. **/
  2967. static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
  2968. {
  2969. bool ret = false;
  2970. struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
  2971. f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
  2972. f_fdir->mask = 0;
  2973. /* Flow Director must have RSS enabled */
  2974. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
  2975. ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
  2976. (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
  2977. adapter->num_tx_queues = f_fdir->indices;
  2978. adapter->num_rx_queues = f_fdir->indices;
  2979. ret = true;
  2980. } else {
  2981. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  2982. adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
  2983. }
  2984. return ret;
  2985. }
  2986. #ifdef IXGBE_FCOE
  2987. /**
  2988. * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
  2989. * @adapter: board private structure to initialize
  2990. *
  2991. * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
  2992. * The ring feature mask is not used as a mask for FCoE, as it can take any 8
  2993. * rx queues out of the max number of rx queues, instead, it is used as the
  2994. * index of the first rx queue used by FCoE.
  2995. *
  2996. **/
  2997. static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
  2998. {
  2999. bool ret = false;
  3000. struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
  3001. f->indices = min((int)num_online_cpus(), f->indices);
  3002. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  3003. adapter->num_rx_queues = 1;
  3004. adapter->num_tx_queues = 1;
  3005. #ifdef CONFIG_IXGBE_DCB
  3006. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  3007. DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n");
  3008. ixgbe_set_dcb_queues(adapter);
  3009. }
  3010. #endif
  3011. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
  3012. DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n");
  3013. if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
  3014. (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
  3015. ixgbe_set_fdir_queues(adapter);
  3016. else
  3017. ixgbe_set_rss_queues(adapter);
  3018. }
  3019. /* adding FCoE rx rings to the end */
  3020. f->mask = adapter->num_rx_queues;
  3021. adapter->num_rx_queues += f->indices;
  3022. adapter->num_tx_queues += f->indices;
  3023. ret = true;
  3024. }
  3025. return ret;
  3026. }
  3027. #endif /* IXGBE_FCOE */
  3028. /**
  3029. * ixgbe_set_sriov_queues: Allocate queues for IOV use
  3030. * @adapter: board private structure to initialize
  3031. *
  3032. * IOV doesn't actually use anything, so just NAK the
  3033. * request for now and let the other queue routines
  3034. * figure out what to do.
  3035. */
  3036. static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
  3037. {
  3038. return false;
  3039. }
  3040. /*
  3041. * ixgbe_set_num_queues: Allocate queues for device, feature dependant
  3042. * @adapter: board private structure to initialize
  3043. *
  3044. * This is the top level queue allocation routine. The order here is very
  3045. * important, starting with the "most" number of features turned on at once,
  3046. * and ending with the smallest set of features. This way large combinations
  3047. * can be allocated if they're turned on, and smaller combinations are the
  3048. * fallthrough conditions.
  3049. *
  3050. **/
  3051. static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
  3052. {
  3053. /* Start with base case */
  3054. adapter->num_rx_queues = 1;
  3055. adapter->num_tx_queues = 1;
  3056. adapter->num_rx_pools = adapter->num_rx_queues;
  3057. adapter->num_rx_queues_per_pool = 1;
  3058. if (ixgbe_set_sriov_queues(adapter))
  3059. return;
  3060. #ifdef IXGBE_FCOE
  3061. if (ixgbe_set_fcoe_queues(adapter))
  3062. goto done;
  3063. #endif /* IXGBE_FCOE */
  3064. #ifdef CONFIG_IXGBE_DCB
  3065. if (ixgbe_set_dcb_queues(adapter))
  3066. goto done;
  3067. #endif
  3068. if (ixgbe_set_fdir_queues(adapter))
  3069. goto done;
  3070. if (ixgbe_set_rss_queues(adapter))
  3071. goto done;
  3072. /* fallback to base case */
  3073. adapter->num_rx_queues = 1;
  3074. adapter->num_tx_queues = 1;
  3075. done:
  3076. /* Notify the stack of the (possibly) reduced Tx Queue count. */
  3077. adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
  3078. }
  3079. static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
  3080. int vectors)
  3081. {
  3082. int err, vector_threshold;
  3083. /* We'll want at least 3 (vector_threshold):
  3084. * 1) TxQ[0] Cleanup
  3085. * 2) RxQ[0] Cleanup
  3086. * 3) Other (Link Status Change, etc.)
  3087. * 4) TCP Timer (optional)
  3088. */
  3089. vector_threshold = MIN_MSIX_COUNT;
  3090. /* The more we get, the more we will assign to Tx/Rx Cleanup
  3091. * for the separate queues...where Rx Cleanup >= Tx Cleanup.
  3092. * Right now, we simply care about how many we'll get; we'll
  3093. * set them up later while requesting irq's.
  3094. */
  3095. while (vectors >= vector_threshold) {
  3096. err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
  3097. vectors);
  3098. if (!err) /* Success in acquiring all requested vectors. */
  3099. break;
  3100. else if (err < 0)
  3101. vectors = 0; /* Nasty failure, quit now */
  3102. else /* err == number of vectors we should try again with */
  3103. vectors = err;
  3104. }
  3105. if (vectors < vector_threshold) {
  3106. /* Can't allocate enough MSI-X interrupts? Oh well.
  3107. * This just means we'll go with either a single MSI
  3108. * vector or fall back to legacy interrupts.
  3109. */
  3110. DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
  3111. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  3112. kfree(adapter->msix_entries);
  3113. adapter->msix_entries = NULL;
  3114. } else {
  3115. adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
  3116. /*
  3117. * Adjust for only the vectors we'll use, which is minimum
  3118. * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
  3119. * vectors we were allocated.
  3120. */
  3121. adapter->num_msix_vectors = min(vectors,
  3122. adapter->max_msix_q_vectors + NON_Q_VECTORS);
  3123. }
  3124. }
  3125. /**
  3126. * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
  3127. * @adapter: board private structure to initialize
  3128. *
  3129. * Cache the descriptor ring offsets for RSS to the assigned rings.
  3130. *
  3131. **/
  3132. static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
  3133. {
  3134. int i;
  3135. bool ret = false;
  3136. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
  3137. for (i = 0; i < adapter->num_rx_queues; i++)
  3138. adapter->rx_ring[i]->reg_idx = i;
  3139. for (i = 0; i < adapter->num_tx_queues; i++)
  3140. adapter->tx_ring[i]->reg_idx = i;
  3141. ret = true;
  3142. } else {
  3143. ret = false;
  3144. }
  3145. return ret;
  3146. }
  3147. #ifdef CONFIG_IXGBE_DCB
  3148. /**
  3149. * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
  3150. * @adapter: board private structure to initialize
  3151. *
  3152. * Cache the descriptor ring offsets for DCB to the assigned rings.
  3153. *
  3154. **/
  3155. static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
  3156. {
  3157. int i;
  3158. bool ret = false;
  3159. int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
  3160. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  3161. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  3162. /* the number of queues is assumed to be symmetric */
  3163. for (i = 0; i < dcb_i; i++) {
  3164. adapter->rx_ring[i]->reg_idx = i << 3;
  3165. adapter->tx_ring[i]->reg_idx = i << 2;
  3166. }
  3167. ret = true;
  3168. } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  3169. if (dcb_i == 8) {
  3170. /*
  3171. * Tx TC0 starts at: descriptor queue 0
  3172. * Tx TC1 starts at: descriptor queue 32
  3173. * Tx TC2 starts at: descriptor queue 64
  3174. * Tx TC3 starts at: descriptor queue 80
  3175. * Tx TC4 starts at: descriptor queue 96
  3176. * Tx TC5 starts at: descriptor queue 104
  3177. * Tx TC6 starts at: descriptor queue 112
  3178. * Tx TC7 starts at: descriptor queue 120
  3179. *
  3180. * Rx TC0-TC7 are offset by 16 queues each
  3181. */
  3182. for (i = 0; i < 3; i++) {
  3183. adapter->tx_ring[i]->reg_idx = i << 5;
  3184. adapter->rx_ring[i]->reg_idx = i << 4;
  3185. }
  3186. for ( ; i < 5; i++) {
  3187. adapter->tx_ring[i]->reg_idx =
  3188. ((i + 2) << 4);
  3189. adapter->rx_ring[i]->reg_idx = i << 4;
  3190. }
  3191. for ( ; i < dcb_i; i++) {
  3192. adapter->tx_ring[i]->reg_idx =
  3193. ((i + 8) << 3);
  3194. adapter->rx_ring[i]->reg_idx = i << 4;
  3195. }
  3196. ret = true;
  3197. } else if (dcb_i == 4) {
  3198. /*
  3199. * Tx TC0 starts at: descriptor queue 0
  3200. * Tx TC1 starts at: descriptor queue 64
  3201. * Tx TC2 starts at: descriptor queue 96
  3202. * Tx TC3 starts at: descriptor queue 112
  3203. *
  3204. * Rx TC0-TC3 are offset by 32 queues each
  3205. */
  3206. adapter->tx_ring[0]->reg_idx = 0;
  3207. adapter->tx_ring[1]->reg_idx = 64;
  3208. adapter->tx_ring[2]->reg_idx = 96;
  3209. adapter->tx_ring[3]->reg_idx = 112;
  3210. for (i = 0 ; i < dcb_i; i++)
  3211. adapter->rx_ring[i]->reg_idx = i << 5;
  3212. ret = true;
  3213. } else {
  3214. ret = false;
  3215. }
  3216. } else {
  3217. ret = false;
  3218. }
  3219. } else {
  3220. ret = false;
  3221. }
  3222. return ret;
  3223. }
  3224. #endif
  3225. /**
  3226. * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
  3227. * @adapter: board private structure to initialize
  3228. *
  3229. * Cache the descriptor ring offsets for Flow Director to the assigned rings.
  3230. *
  3231. **/
  3232. static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
  3233. {
  3234. int i;
  3235. bool ret = false;
  3236. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
  3237. ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
  3238. (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
  3239. for (i = 0; i < adapter->num_rx_queues; i++)
  3240. adapter->rx_ring[i]->reg_idx = i;
  3241. for (i = 0; i < adapter->num_tx_queues; i++)
  3242. adapter->tx_ring[i]->reg_idx = i;
  3243. ret = true;
  3244. }
  3245. return ret;
  3246. }
  3247. #ifdef IXGBE_FCOE
  3248. /**
  3249. * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
  3250. * @adapter: board private structure to initialize
  3251. *
  3252. * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
  3253. *
  3254. */
  3255. static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
  3256. {
  3257. int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
  3258. bool ret = false;
  3259. struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
  3260. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  3261. #ifdef CONFIG_IXGBE_DCB
  3262. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  3263. struct ixgbe_fcoe *fcoe = &adapter->fcoe;
  3264. ixgbe_cache_ring_dcb(adapter);
  3265. /* find out queues in TC for FCoE */
  3266. fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
  3267. fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
  3268. /*
  3269. * In 82599, the number of Tx queues for each traffic
  3270. * class for both 8-TC and 4-TC modes are:
  3271. * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
  3272. * 8 TCs: 32 32 16 16 8 8 8 8
  3273. * 4 TCs: 64 64 32 32
  3274. * We have max 8 queues for FCoE, where 8 the is
  3275. * FCoE redirection table size. If TC for FCoE is
  3276. * less than or equal to TC3, we have enough queues
  3277. * to add max of 8 queues for FCoE, so we start FCoE
  3278. * tx descriptor from the next one, i.e., reg_idx + 1.
  3279. * If TC for FCoE is above TC3, implying 8 TC mode,
  3280. * and we need 8 for FCoE, we have to take all queues
  3281. * in that traffic class for FCoE.
  3282. */
  3283. if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
  3284. fcoe_tx_i--;
  3285. }
  3286. #endif /* CONFIG_IXGBE_DCB */
  3287. if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
  3288. if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
  3289. (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
  3290. ixgbe_cache_ring_fdir(adapter);
  3291. else
  3292. ixgbe_cache_ring_rss(adapter);
  3293. fcoe_rx_i = f->mask;
  3294. fcoe_tx_i = f->mask;
  3295. }
  3296. for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
  3297. adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
  3298. adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
  3299. }
  3300. ret = true;
  3301. }
  3302. return ret;
  3303. }
  3304. #endif /* IXGBE_FCOE */
  3305. /**
  3306. * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
  3307. * @adapter: board private structure to initialize
  3308. *
  3309. * SR-IOV doesn't use any descriptor rings but changes the default if
  3310. * no other mapping is used.
  3311. *
  3312. */
  3313. static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
  3314. {
  3315. adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
  3316. adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
  3317. if (adapter->num_vfs)
  3318. return true;
  3319. else
  3320. return false;
  3321. }
  3322. /**
  3323. * ixgbe_cache_ring_register - Descriptor ring to register mapping
  3324. * @adapter: board private structure to initialize
  3325. *
  3326. * Once we know the feature-set enabled for the device, we'll cache
  3327. * the register offset the descriptor ring is assigned to.
  3328. *
  3329. * Note, the order the various feature calls is important. It must start with
  3330. * the "most" features enabled at the same time, then trickle down to the
  3331. * least amount of features turned on at once.
  3332. **/
  3333. static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
  3334. {
  3335. /* start with default case */
  3336. adapter->rx_ring[0]->reg_idx = 0;
  3337. adapter->tx_ring[0]->reg_idx = 0;
  3338. if (ixgbe_cache_ring_sriov(adapter))
  3339. return;
  3340. #ifdef IXGBE_FCOE
  3341. if (ixgbe_cache_ring_fcoe(adapter))
  3342. return;
  3343. #endif /* IXGBE_FCOE */
  3344. #ifdef CONFIG_IXGBE_DCB
  3345. if (ixgbe_cache_ring_dcb(adapter))
  3346. return;
  3347. #endif
  3348. if (ixgbe_cache_ring_fdir(adapter))
  3349. return;
  3350. if (ixgbe_cache_ring_rss(adapter))
  3351. return;
  3352. }
  3353. /**
  3354. * ixgbe_alloc_queues - Allocate memory for all rings
  3355. * @adapter: board private structure to initialize
  3356. *
  3357. * We allocate one ring per queue at run-time since we don't know the
  3358. * number of queues at compile-time. The polling_netdev array is
  3359. * intended for Multiqueue, but should work fine with a single queue.
  3360. **/
  3361. static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
  3362. {
  3363. int i;
  3364. int orig_node = adapter->node;
  3365. for (i = 0; i < adapter->num_tx_queues; i++) {
  3366. struct ixgbe_ring *ring = adapter->tx_ring[i];
  3367. if (orig_node == -1) {
  3368. int cur_node = next_online_node(adapter->node);
  3369. if (cur_node == MAX_NUMNODES)
  3370. cur_node = first_online_node;
  3371. adapter->node = cur_node;
  3372. }
  3373. ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
  3374. adapter->node);
  3375. if (!ring)
  3376. ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
  3377. if (!ring)
  3378. goto err_tx_ring_allocation;
  3379. ring->count = adapter->tx_ring_count;
  3380. ring->queue_index = i;
  3381. ring->numa_node = adapter->node;
  3382. adapter->tx_ring[i] = ring;
  3383. }
  3384. /* Restore the adapter's original node */
  3385. adapter->node = orig_node;
  3386. for (i = 0; i < adapter->num_rx_queues; i++) {
  3387. struct ixgbe_ring *ring = adapter->rx_ring[i];
  3388. if (orig_node == -1) {
  3389. int cur_node = next_online_node(adapter->node);
  3390. if (cur_node == MAX_NUMNODES)
  3391. cur_node = first_online_node;
  3392. adapter->node = cur_node;
  3393. }
  3394. ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
  3395. adapter->node);
  3396. if (!ring)
  3397. ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
  3398. if (!ring)
  3399. goto err_rx_ring_allocation;
  3400. ring->count = adapter->rx_ring_count;
  3401. ring->queue_index = i;
  3402. ring->numa_node = adapter->node;
  3403. adapter->rx_ring[i] = ring;
  3404. }
  3405. /* Restore the adapter's original node */
  3406. adapter->node = orig_node;
  3407. ixgbe_cache_ring_register(adapter);
  3408. return 0;
  3409. err_rx_ring_allocation:
  3410. for (i = 0; i < adapter->num_tx_queues; i++)
  3411. kfree(adapter->tx_ring[i]);
  3412. err_tx_ring_allocation:
  3413. return -ENOMEM;
  3414. }
  3415. /**
  3416. * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
  3417. * @adapter: board private structure to initialize
  3418. *
  3419. * Attempt to configure the interrupts using the best available
  3420. * capabilities of the hardware and the kernel.
  3421. **/
  3422. static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
  3423. {
  3424. struct ixgbe_hw *hw = &adapter->hw;
  3425. int err = 0;
  3426. int vector, v_budget;
  3427. /*
  3428. * It's easy to be greedy for MSI-X vectors, but it really
  3429. * doesn't do us much good if we have a lot more vectors
  3430. * than CPU's. So let's be conservative and only ask for
  3431. * (roughly) the same number of vectors as there are CPU's.
  3432. */
  3433. v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
  3434. (int)num_online_cpus()) + NON_Q_VECTORS;
  3435. /*
  3436. * At the same time, hardware can only support a maximum of
  3437. * hw.mac->max_msix_vectors vectors. With features
  3438. * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
  3439. * descriptor queues supported by our device. Thus, we cap it off in
  3440. * those rare cases where the cpu count also exceeds our vector limit.
  3441. */
  3442. v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
  3443. /* A failure in MSI-X entry allocation isn't fatal, but it does
  3444. * mean we disable MSI-X capabilities of the adapter. */
  3445. adapter->msix_entries = kcalloc(v_budget,
  3446. sizeof(struct msix_entry), GFP_KERNEL);
  3447. if (adapter->msix_entries) {
  3448. for (vector = 0; vector < v_budget; vector++)
  3449. adapter->msix_entries[vector].entry = vector;
  3450. ixgbe_acquire_msix_vectors(adapter, v_budget);
  3451. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
  3452. goto out;
  3453. }
  3454. adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
  3455. adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
  3456. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  3457. adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
  3458. adapter->atr_sample_rate = 0;
  3459. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  3460. ixgbe_disable_sriov(adapter);
  3461. ixgbe_set_num_queues(adapter);
  3462. err = pci_enable_msi(adapter->pdev);
  3463. if (!err) {
  3464. adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
  3465. } else {
  3466. DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
  3467. "falling back to legacy. Error: %d\n", err);
  3468. /* reset err */
  3469. err = 0;
  3470. }
  3471. out:
  3472. return err;
  3473. }
  3474. /**
  3475. * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
  3476. * @adapter: board private structure to initialize
  3477. *
  3478. * We allocate one q_vector per queue interrupt. If allocation fails we
  3479. * return -ENOMEM.
  3480. **/
  3481. static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
  3482. {
  3483. int q_idx, num_q_vectors;
  3484. struct ixgbe_q_vector *q_vector;
  3485. int napi_vectors;
  3486. int (*poll)(struct napi_struct *, int);
  3487. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  3488. num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  3489. napi_vectors = adapter->num_rx_queues;
  3490. poll = &ixgbe_clean_rxtx_many;
  3491. } else {
  3492. num_q_vectors = 1;
  3493. napi_vectors = 1;
  3494. poll = &ixgbe_poll;
  3495. }
  3496. for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
  3497. q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
  3498. GFP_KERNEL, adapter->node);
  3499. if (!q_vector)
  3500. q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
  3501. GFP_KERNEL);
  3502. if (!q_vector)
  3503. goto err_out;
  3504. q_vector->adapter = adapter;
  3505. if (q_vector->txr_count && !q_vector->rxr_count)
  3506. q_vector->eitr = adapter->tx_eitr_param;
  3507. else
  3508. q_vector->eitr = adapter->rx_eitr_param;
  3509. q_vector->v_idx = q_idx;
  3510. netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
  3511. adapter->q_vector[q_idx] = q_vector;
  3512. }
  3513. return 0;
  3514. err_out:
  3515. while (q_idx) {
  3516. q_idx--;
  3517. q_vector = adapter->q_vector[q_idx];
  3518. netif_napi_del(&q_vector->napi);
  3519. kfree(q_vector);
  3520. adapter->q_vector[q_idx] = NULL;
  3521. }
  3522. return -ENOMEM;
  3523. }
  3524. /**
  3525. * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
  3526. * @adapter: board private structure to initialize
  3527. *
  3528. * This function frees the memory allocated to the q_vectors. In addition if
  3529. * NAPI is enabled it will delete any references to the NAPI struct prior
  3530. * to freeing the q_vector.
  3531. **/
  3532. static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
  3533. {
  3534. int q_idx, num_q_vectors;
  3535. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
  3536. num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  3537. else
  3538. num_q_vectors = 1;
  3539. for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
  3540. struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
  3541. adapter->q_vector[q_idx] = NULL;
  3542. netif_napi_del(&q_vector->napi);
  3543. kfree(q_vector);
  3544. }
  3545. }
  3546. static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
  3547. {
  3548. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  3549. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  3550. pci_disable_msix(adapter->pdev);
  3551. kfree(adapter->msix_entries);
  3552. adapter->msix_entries = NULL;
  3553. } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
  3554. adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
  3555. pci_disable_msi(adapter->pdev);
  3556. }
  3557. return;
  3558. }
  3559. /**
  3560. * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
  3561. * @adapter: board private structure to initialize
  3562. *
  3563. * We determine which interrupt scheme to use based on...
  3564. * - Kernel support (MSI, MSI-X)
  3565. * - which can be user-defined (via MODULE_PARAM)
  3566. * - Hardware queue count (num_*_queues)
  3567. * - defined by miscellaneous hardware support/features (RSS, etc.)
  3568. **/
  3569. int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
  3570. {
  3571. int err;
  3572. /* Number of supported queues */
  3573. ixgbe_set_num_queues(adapter);
  3574. err = ixgbe_set_interrupt_capability(adapter);
  3575. if (err) {
  3576. DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
  3577. goto err_set_interrupt;
  3578. }
  3579. err = ixgbe_alloc_q_vectors(adapter);
  3580. if (err) {
  3581. DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
  3582. "vectors\n");
  3583. goto err_alloc_q_vectors;
  3584. }
  3585. err = ixgbe_alloc_queues(adapter);
  3586. if (err) {
  3587. DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
  3588. goto err_alloc_queues;
  3589. }
  3590. DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
  3591. "Tx Queue count = %u\n",
  3592. (adapter->num_rx_queues > 1) ? "Enabled" :
  3593. "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
  3594. set_bit(__IXGBE_DOWN, &adapter->state);
  3595. return 0;
  3596. err_alloc_queues:
  3597. ixgbe_free_q_vectors(adapter);
  3598. err_alloc_q_vectors:
  3599. ixgbe_reset_interrupt_capability(adapter);
  3600. err_set_interrupt:
  3601. return err;
  3602. }
  3603. /**
  3604. * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
  3605. * @adapter: board private structure to clear interrupt scheme on
  3606. *
  3607. * We go through and clear interrupt specific resources and reset the structure
  3608. * to pre-load conditions
  3609. **/
  3610. void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
  3611. {
  3612. int i;
  3613. for (i = 0; i < adapter->num_tx_queues; i++) {
  3614. kfree(adapter->tx_ring[i]);
  3615. adapter->tx_ring[i] = NULL;
  3616. }
  3617. for (i = 0; i < adapter->num_rx_queues; i++) {
  3618. kfree(adapter->rx_ring[i]);
  3619. adapter->rx_ring[i] = NULL;
  3620. }
  3621. ixgbe_free_q_vectors(adapter);
  3622. ixgbe_reset_interrupt_capability(adapter);
  3623. }
  3624. /**
  3625. * ixgbe_sfp_timer - worker thread to find a missing module
  3626. * @data: pointer to our adapter struct
  3627. **/
  3628. static void ixgbe_sfp_timer(unsigned long data)
  3629. {
  3630. struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
  3631. /*
  3632. * Do the sfp_timer outside of interrupt context due to the
  3633. * delays that sfp+ detection requires
  3634. */
  3635. schedule_work(&adapter->sfp_task);
  3636. }
  3637. /**
  3638. * ixgbe_sfp_task - worker thread to find a missing module
  3639. * @work: pointer to work_struct containing our data
  3640. **/
  3641. static void ixgbe_sfp_task(struct work_struct *work)
  3642. {
  3643. struct ixgbe_adapter *adapter = container_of(work,
  3644. struct ixgbe_adapter,
  3645. sfp_task);
  3646. struct ixgbe_hw *hw = &adapter->hw;
  3647. if ((hw->phy.type == ixgbe_phy_nl) &&
  3648. (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
  3649. s32 ret = hw->phy.ops.identify_sfp(hw);
  3650. if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
  3651. goto reschedule;
  3652. ret = hw->phy.ops.reset(hw);
  3653. if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
  3654. dev_err(&adapter->pdev->dev, "failed to initialize "
  3655. "because an unsupported SFP+ module type "
  3656. "was detected.\n"
  3657. "Reload the driver after installing a "
  3658. "supported module.\n");
  3659. unregister_netdev(adapter->netdev);
  3660. } else {
  3661. DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
  3662. hw->phy.sfp_type);
  3663. }
  3664. /* don't need this routine any more */
  3665. clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
  3666. }
  3667. return;
  3668. reschedule:
  3669. if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
  3670. mod_timer(&adapter->sfp_timer,
  3671. round_jiffies(jiffies + (2 * HZ)));
  3672. }
  3673. /**
  3674. * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
  3675. * @adapter: board private structure to initialize
  3676. *
  3677. * ixgbe_sw_init initializes the Adapter private data structure.
  3678. * Fields are initialized based on PCI device information and
  3679. * OS network device settings (MTU size).
  3680. **/
  3681. static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
  3682. {
  3683. struct ixgbe_hw *hw = &adapter->hw;
  3684. struct pci_dev *pdev = adapter->pdev;
  3685. struct net_device *dev = adapter->netdev;
  3686. unsigned int rss;
  3687. #ifdef CONFIG_IXGBE_DCB
  3688. int j;
  3689. struct tc_configuration *tc;
  3690. #endif
  3691. /* PCI config space info */
  3692. hw->vendor_id = pdev->vendor;
  3693. hw->device_id = pdev->device;
  3694. hw->revision_id = pdev->revision;
  3695. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  3696. hw->subsystem_device_id = pdev->subsystem_device;
  3697. /* Set capability flags */
  3698. rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
  3699. adapter->ring_feature[RING_F_RSS].indices = rss;
  3700. adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
  3701. adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
  3702. if (hw->mac.type == ixgbe_mac_82598EB) {
  3703. if (hw->device_id == IXGBE_DEV_ID_82598AT)
  3704. adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
  3705. adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
  3706. } else if (hw->mac.type == ixgbe_mac_82599EB) {
  3707. adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
  3708. adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
  3709. adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
  3710. if (dev->features & NETIF_F_NTUPLE) {
  3711. /* Flow Director perfect filter enabled */
  3712. adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
  3713. adapter->atr_sample_rate = 0;
  3714. spin_lock_init(&adapter->fdir_perfect_lock);
  3715. } else {
  3716. /* Flow Director hash filters enabled */
  3717. adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
  3718. adapter->atr_sample_rate = 20;
  3719. }
  3720. adapter->ring_feature[RING_F_FDIR].indices =
  3721. IXGBE_MAX_FDIR_INDICES;
  3722. adapter->fdir_pballoc = 0;
  3723. #ifdef IXGBE_FCOE
  3724. adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
  3725. adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
  3726. adapter->ring_feature[RING_F_FCOE].indices = 0;
  3727. #ifdef CONFIG_IXGBE_DCB
  3728. /* Default traffic class to use for FCoE */
  3729. adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
  3730. #endif
  3731. #endif /* IXGBE_FCOE */
  3732. }
  3733. #ifdef CONFIG_IXGBE_DCB
  3734. /* Configure DCB traffic classes */
  3735. for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
  3736. tc = &adapter->dcb_cfg.tc_config[j];
  3737. tc->path[DCB_TX_CONFIG].bwg_id = 0;
  3738. tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
  3739. tc->path[DCB_RX_CONFIG].bwg_id = 0;
  3740. tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
  3741. tc->dcb_pfc = pfc_disabled;
  3742. }
  3743. adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
  3744. adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
  3745. adapter->dcb_cfg.rx_pba_cfg = pba_equal;
  3746. adapter->dcb_cfg.pfc_mode_enable = false;
  3747. adapter->dcb_cfg.round_robin_enable = false;
  3748. adapter->dcb_set_bitmap = 0x00;
  3749. ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
  3750. adapter->ring_feature[RING_F_DCB].indices);
  3751. #endif
  3752. /* default flow control settings */
  3753. hw->fc.requested_mode = ixgbe_fc_full;
  3754. hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
  3755. #ifdef CONFIG_DCB
  3756. adapter->last_lfc_mode = hw->fc.current_mode;
  3757. #endif
  3758. hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
  3759. hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
  3760. hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
  3761. hw->fc.send_xon = true;
  3762. hw->fc.disable_fc_autoneg = false;
  3763. /* enable itr by default in dynamic mode */
  3764. adapter->rx_itr_setting = 1;
  3765. adapter->rx_eitr_param = 20000;
  3766. adapter->tx_itr_setting = 1;
  3767. adapter->tx_eitr_param = 10000;
  3768. /* set defaults for eitr in MegaBytes */
  3769. adapter->eitr_low = 10;
  3770. adapter->eitr_high = 20;
  3771. /* set default ring sizes */
  3772. adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
  3773. adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
  3774. /* initialize eeprom parameters */
  3775. if (ixgbe_init_eeprom_params_generic(hw)) {
  3776. dev_err(&pdev->dev, "EEPROM initialization failed\n");
  3777. return -EIO;
  3778. }
  3779. /* enable rx csum by default */
  3780. adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
  3781. /* get assigned NUMA node */
  3782. adapter->node = dev_to_node(&pdev->dev);
  3783. set_bit(__IXGBE_DOWN, &adapter->state);
  3784. return 0;
  3785. }
  3786. /**
  3787. * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
  3788. * @adapter: board private structure
  3789. * @tx_ring: tx descriptor ring (for a specific queue) to setup
  3790. *
  3791. * Return 0 on success, negative on failure
  3792. **/
  3793. int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
  3794. struct ixgbe_ring *tx_ring)
  3795. {
  3796. struct pci_dev *pdev = adapter->pdev;
  3797. int size;
  3798. size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
  3799. tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
  3800. if (!tx_ring->tx_buffer_info)
  3801. tx_ring->tx_buffer_info = vmalloc(size);
  3802. if (!tx_ring->tx_buffer_info)
  3803. goto err;
  3804. memset(tx_ring->tx_buffer_info, 0, size);
  3805. /* round up to nearest 4K */
  3806. tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
  3807. tx_ring->size = ALIGN(tx_ring->size, 4096);
  3808. tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
  3809. &tx_ring->dma);
  3810. if (!tx_ring->desc)
  3811. goto err;
  3812. tx_ring->next_to_use = 0;
  3813. tx_ring->next_to_clean = 0;
  3814. tx_ring->work_limit = tx_ring->count;
  3815. return 0;
  3816. err:
  3817. vfree(tx_ring->tx_buffer_info);
  3818. tx_ring->tx_buffer_info = NULL;
  3819. DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
  3820. "descriptor ring\n");
  3821. return -ENOMEM;
  3822. }
  3823. /**
  3824. * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
  3825. * @adapter: board private structure
  3826. *
  3827. * If this function returns with an error, then it's possible one or
  3828. * more of the rings is populated (while the rest are not). It is the
  3829. * callers duty to clean those orphaned rings.
  3830. *
  3831. * Return 0 on success, negative on failure
  3832. **/
  3833. static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
  3834. {
  3835. int i, err = 0;
  3836. for (i = 0; i < adapter->num_tx_queues; i++) {
  3837. err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
  3838. if (!err)
  3839. continue;
  3840. DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
  3841. break;
  3842. }
  3843. return err;
  3844. }
  3845. /**
  3846. * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
  3847. * @adapter: board private structure
  3848. * @rx_ring: rx descriptor ring (for a specific queue) to setup
  3849. *
  3850. * Returns 0 on success, negative on failure
  3851. **/
  3852. int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
  3853. struct ixgbe_ring *rx_ring)
  3854. {
  3855. struct pci_dev *pdev = adapter->pdev;
  3856. int size;
  3857. size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
  3858. rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
  3859. if (!rx_ring->rx_buffer_info)
  3860. rx_ring->rx_buffer_info = vmalloc(size);
  3861. if (!rx_ring->rx_buffer_info) {
  3862. DPRINTK(PROBE, ERR,
  3863. "vmalloc allocation failed for the rx desc ring\n");
  3864. goto alloc_failed;
  3865. }
  3866. memset(rx_ring->rx_buffer_info, 0, size);
  3867. /* Round up to nearest 4K */
  3868. rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
  3869. rx_ring->size = ALIGN(rx_ring->size, 4096);
  3870. rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
  3871. if (!rx_ring->desc) {
  3872. DPRINTK(PROBE, ERR,
  3873. "Memory allocation failed for the rx desc ring\n");
  3874. vfree(rx_ring->rx_buffer_info);
  3875. goto alloc_failed;
  3876. }
  3877. rx_ring->next_to_clean = 0;
  3878. rx_ring->next_to_use = 0;
  3879. return 0;
  3880. alloc_failed:
  3881. return -ENOMEM;
  3882. }
  3883. /**
  3884. * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
  3885. * @adapter: board private structure
  3886. *
  3887. * If this function returns with an error, then it's possible one or
  3888. * more of the rings is populated (while the rest are not). It is the
  3889. * callers duty to clean those orphaned rings.
  3890. *
  3891. * Return 0 on success, negative on failure
  3892. **/
  3893. static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
  3894. {
  3895. int i, err = 0;
  3896. for (i = 0; i < adapter->num_rx_queues; i++) {
  3897. err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
  3898. if (!err)
  3899. continue;
  3900. DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
  3901. break;
  3902. }
  3903. return err;
  3904. }
  3905. /**
  3906. * ixgbe_free_tx_resources - Free Tx Resources per Queue
  3907. * @adapter: board private structure
  3908. * @tx_ring: Tx descriptor ring for a specific queue
  3909. *
  3910. * Free all transmit software resources
  3911. **/
  3912. void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
  3913. struct ixgbe_ring *tx_ring)
  3914. {
  3915. struct pci_dev *pdev = adapter->pdev;
  3916. ixgbe_clean_tx_ring(adapter, tx_ring);
  3917. vfree(tx_ring->tx_buffer_info);
  3918. tx_ring->tx_buffer_info = NULL;
  3919. pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
  3920. tx_ring->desc = NULL;
  3921. }
  3922. /**
  3923. * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
  3924. * @adapter: board private structure
  3925. *
  3926. * Free all transmit software resources
  3927. **/
  3928. static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
  3929. {
  3930. int i;
  3931. for (i = 0; i < adapter->num_tx_queues; i++)
  3932. if (adapter->tx_ring[i]->desc)
  3933. ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
  3934. }
  3935. /**
  3936. * ixgbe_free_rx_resources - Free Rx Resources
  3937. * @adapter: board private structure
  3938. * @rx_ring: ring to clean the resources from
  3939. *
  3940. * Free all receive software resources
  3941. **/
  3942. void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
  3943. struct ixgbe_ring *rx_ring)
  3944. {
  3945. struct pci_dev *pdev = adapter->pdev;
  3946. ixgbe_clean_rx_ring(adapter, rx_ring);
  3947. vfree(rx_ring->rx_buffer_info);
  3948. rx_ring->rx_buffer_info = NULL;
  3949. pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
  3950. rx_ring->desc = NULL;
  3951. }
  3952. /**
  3953. * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
  3954. * @adapter: board private structure
  3955. *
  3956. * Free all receive software resources
  3957. **/
  3958. static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
  3959. {
  3960. int i;
  3961. for (i = 0; i < adapter->num_rx_queues; i++)
  3962. if (adapter->rx_ring[i]->desc)
  3963. ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
  3964. }
  3965. /**
  3966. * ixgbe_change_mtu - Change the Maximum Transfer Unit
  3967. * @netdev: network interface device structure
  3968. * @new_mtu: new value for maximum frame size
  3969. *
  3970. * Returns 0 on success, negative on failure
  3971. **/
  3972. static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
  3973. {
  3974. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3975. int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
  3976. /* MTU < 68 is an error and causes problems on some kernels */
  3977. if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
  3978. return -EINVAL;
  3979. DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
  3980. netdev->mtu, new_mtu);
  3981. /* must set new MTU before calling down or up */
  3982. netdev->mtu = new_mtu;
  3983. if (netif_running(netdev))
  3984. ixgbe_reinit_locked(adapter);
  3985. return 0;
  3986. }
  3987. /**
  3988. * ixgbe_open - Called when a network interface is made active
  3989. * @netdev: network interface device structure
  3990. *
  3991. * Returns 0 on success, negative value on failure
  3992. *
  3993. * The open entry point is called when a network interface is made
  3994. * active by the system (IFF_UP). At this point all resources needed
  3995. * for transmit and receive operations are allocated, the interrupt
  3996. * handler is registered with the OS, the watchdog timer is started,
  3997. * and the stack is notified that the interface is ready.
  3998. **/
  3999. static int ixgbe_open(struct net_device *netdev)
  4000. {
  4001. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4002. int err;
  4003. /* disallow open during test */
  4004. if (test_bit(__IXGBE_TESTING, &adapter->state))
  4005. return -EBUSY;
  4006. netif_carrier_off(netdev);
  4007. /* allocate transmit descriptors */
  4008. err = ixgbe_setup_all_tx_resources(adapter);
  4009. if (err)
  4010. goto err_setup_tx;
  4011. /* allocate receive descriptors */
  4012. err = ixgbe_setup_all_rx_resources(adapter);
  4013. if (err)
  4014. goto err_setup_rx;
  4015. ixgbe_configure(adapter);
  4016. err = ixgbe_request_irq(adapter);
  4017. if (err)
  4018. goto err_req_irq;
  4019. err = ixgbe_up_complete(adapter);
  4020. if (err)
  4021. goto err_up;
  4022. netif_tx_start_all_queues(netdev);
  4023. return 0;
  4024. err_up:
  4025. ixgbe_release_hw_control(adapter);
  4026. ixgbe_free_irq(adapter);
  4027. err_req_irq:
  4028. err_setup_rx:
  4029. ixgbe_free_all_rx_resources(adapter);
  4030. err_setup_tx:
  4031. ixgbe_free_all_tx_resources(adapter);
  4032. ixgbe_reset(adapter);
  4033. return err;
  4034. }
  4035. /**
  4036. * ixgbe_close - Disables a network interface
  4037. * @netdev: network interface device structure
  4038. *
  4039. * Returns 0, this is not allowed to fail
  4040. *
  4041. * The close entry point is called when an interface is de-activated
  4042. * by the OS. The hardware is still under the drivers control, but
  4043. * needs to be disabled. A global MAC reset is issued to stop the
  4044. * hardware, and all transmit and receive resources are freed.
  4045. **/
  4046. static int ixgbe_close(struct net_device *netdev)
  4047. {
  4048. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4049. ixgbe_down(adapter);
  4050. ixgbe_free_irq(adapter);
  4051. ixgbe_free_all_tx_resources(adapter);
  4052. ixgbe_free_all_rx_resources(adapter);
  4053. ixgbe_release_hw_control(adapter);
  4054. return 0;
  4055. }
  4056. #ifdef CONFIG_PM
  4057. static int ixgbe_resume(struct pci_dev *pdev)
  4058. {
  4059. struct net_device *netdev = pci_get_drvdata(pdev);
  4060. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4061. u32 err;
  4062. pci_set_power_state(pdev, PCI_D0);
  4063. pci_restore_state(pdev);
  4064. /*
  4065. * pci_restore_state clears dev->state_saved so call
  4066. * pci_save_state to restore it.
  4067. */
  4068. pci_save_state(pdev);
  4069. err = pci_enable_device_mem(pdev);
  4070. if (err) {
  4071. printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
  4072. "suspend\n");
  4073. return err;
  4074. }
  4075. pci_set_master(pdev);
  4076. pci_wake_from_d3(pdev, false);
  4077. err = ixgbe_init_interrupt_scheme(adapter);
  4078. if (err) {
  4079. printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
  4080. "device\n");
  4081. return err;
  4082. }
  4083. ixgbe_reset(adapter);
  4084. IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
  4085. if (netif_running(netdev)) {
  4086. err = ixgbe_open(adapter->netdev);
  4087. if (err)
  4088. return err;
  4089. }
  4090. netif_device_attach(netdev);
  4091. return 0;
  4092. }
  4093. #endif /* CONFIG_PM */
  4094. static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
  4095. {
  4096. struct net_device *netdev = pci_get_drvdata(pdev);
  4097. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4098. struct ixgbe_hw *hw = &adapter->hw;
  4099. u32 ctrl, fctrl;
  4100. u32 wufc = adapter->wol;
  4101. #ifdef CONFIG_PM
  4102. int retval = 0;
  4103. #endif
  4104. netif_device_detach(netdev);
  4105. if (netif_running(netdev)) {
  4106. ixgbe_down(adapter);
  4107. ixgbe_free_irq(adapter);
  4108. ixgbe_free_all_tx_resources(adapter);
  4109. ixgbe_free_all_rx_resources(adapter);
  4110. }
  4111. ixgbe_clear_interrupt_scheme(adapter);
  4112. #ifdef CONFIG_PM
  4113. retval = pci_save_state(pdev);
  4114. if (retval)
  4115. return retval;
  4116. #endif
  4117. if (wufc) {
  4118. ixgbe_set_rx_mode(netdev);
  4119. /* turn on all-multi mode if wake on multicast is enabled */
  4120. if (wufc & IXGBE_WUFC_MC) {
  4121. fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  4122. fctrl |= IXGBE_FCTRL_MPE;
  4123. IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
  4124. }
  4125. ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
  4126. ctrl |= IXGBE_CTRL_GIO_DIS;
  4127. IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
  4128. IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
  4129. } else {
  4130. IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
  4131. IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
  4132. }
  4133. if (wufc && hw->mac.type == ixgbe_mac_82599EB)
  4134. pci_wake_from_d3(pdev, true);
  4135. else
  4136. pci_wake_from_d3(pdev, false);
  4137. *enable_wake = !!wufc;
  4138. ixgbe_release_hw_control(adapter);
  4139. pci_disable_device(pdev);
  4140. return 0;
  4141. }
  4142. #ifdef CONFIG_PM
  4143. static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
  4144. {
  4145. int retval;
  4146. bool wake;
  4147. retval = __ixgbe_shutdown(pdev, &wake);
  4148. if (retval)
  4149. return retval;
  4150. if (wake) {
  4151. pci_prepare_to_sleep(pdev);
  4152. } else {
  4153. pci_wake_from_d3(pdev, false);
  4154. pci_set_power_state(pdev, PCI_D3hot);
  4155. }
  4156. return 0;
  4157. }
  4158. #endif /* CONFIG_PM */
  4159. static void ixgbe_shutdown(struct pci_dev *pdev)
  4160. {
  4161. bool wake;
  4162. __ixgbe_shutdown(pdev, &wake);
  4163. if (system_state == SYSTEM_POWER_OFF) {
  4164. pci_wake_from_d3(pdev, wake);
  4165. pci_set_power_state(pdev, PCI_D3hot);
  4166. }
  4167. }
  4168. /**
  4169. * ixgbe_update_stats - Update the board statistics counters.
  4170. * @adapter: board private structure
  4171. **/
  4172. void ixgbe_update_stats(struct ixgbe_adapter *adapter)
  4173. {
  4174. struct net_device *netdev = adapter->netdev;
  4175. struct ixgbe_hw *hw = &adapter->hw;
  4176. u64 total_mpc = 0;
  4177. u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
  4178. u64 non_eop_descs = 0, restart_queue = 0;
  4179. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
  4180. u64 rsc_count = 0;
  4181. u64 rsc_flush = 0;
  4182. for (i = 0; i < 16; i++)
  4183. adapter->hw_rx_no_dma_resources +=
  4184. IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
  4185. for (i = 0; i < adapter->num_rx_queues; i++) {
  4186. rsc_count += adapter->rx_ring[i]->rsc_count;
  4187. rsc_flush += adapter->rx_ring[i]->rsc_flush;
  4188. }
  4189. adapter->rsc_total_count = rsc_count;
  4190. adapter->rsc_total_flush = rsc_flush;
  4191. }
  4192. /* gather some stats to the adapter struct that are per queue */
  4193. for (i = 0; i < adapter->num_tx_queues; i++)
  4194. restart_queue += adapter->tx_ring[i]->restart_queue;
  4195. adapter->restart_queue = restart_queue;
  4196. for (i = 0; i < adapter->num_rx_queues; i++)
  4197. non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
  4198. adapter->non_eop_descs = non_eop_descs;
  4199. adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
  4200. for (i = 0; i < 8; i++) {
  4201. /* for packet buffers not used, the register should read 0 */
  4202. mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
  4203. missed_rx += mpc;
  4204. adapter->stats.mpc[i] += mpc;
  4205. total_mpc += adapter->stats.mpc[i];
  4206. if (hw->mac.type == ixgbe_mac_82598EB)
  4207. adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
  4208. adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
  4209. adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
  4210. adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
  4211. adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
  4212. if (hw->mac.type == ixgbe_mac_82599EB) {
  4213. adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
  4214. IXGBE_PXONRXCNT(i));
  4215. adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
  4216. IXGBE_PXOFFRXCNT(i));
  4217. adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
  4218. } else {
  4219. adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
  4220. IXGBE_PXONRXC(i));
  4221. adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
  4222. IXGBE_PXOFFRXC(i));
  4223. }
  4224. adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
  4225. IXGBE_PXONTXC(i));
  4226. adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
  4227. IXGBE_PXOFFTXC(i));
  4228. }
  4229. adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
  4230. /* work around hardware counting issue */
  4231. adapter->stats.gprc -= missed_rx;
  4232. /* 82598 hardware only has a 32 bit counter in the high register */
  4233. if (hw->mac.type == ixgbe_mac_82599EB) {
  4234. u64 tmp;
  4235. adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
  4236. tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
  4237. adapter->stats.gorc += (tmp << 32);
  4238. adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
  4239. tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
  4240. adapter->stats.gotc += (tmp << 32);
  4241. adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
  4242. IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
  4243. adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
  4244. adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
  4245. adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
  4246. adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
  4247. #ifdef IXGBE_FCOE
  4248. adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
  4249. adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
  4250. adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
  4251. adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
  4252. adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
  4253. adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
  4254. #endif /* IXGBE_FCOE */
  4255. } else {
  4256. adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
  4257. adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
  4258. adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
  4259. adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
  4260. adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
  4261. }
  4262. bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
  4263. adapter->stats.bprc += bprc;
  4264. adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
  4265. if (hw->mac.type == ixgbe_mac_82598EB)
  4266. adapter->stats.mprc -= bprc;
  4267. adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
  4268. adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
  4269. adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
  4270. adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
  4271. adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
  4272. adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
  4273. adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
  4274. adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
  4275. lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
  4276. adapter->stats.lxontxc += lxon;
  4277. lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
  4278. adapter->stats.lxofftxc += lxoff;
  4279. adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
  4280. adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
  4281. adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
  4282. /*
  4283. * 82598 errata - tx of flow control packets is included in tx counters
  4284. */
  4285. xon_off_tot = lxon + lxoff;
  4286. adapter->stats.gptc -= xon_off_tot;
  4287. adapter->stats.mptc -= xon_off_tot;
  4288. adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
  4289. adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
  4290. adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
  4291. adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
  4292. adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
  4293. adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
  4294. adapter->stats.ptc64 -= xon_off_tot;
  4295. adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
  4296. adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
  4297. adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
  4298. adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
  4299. adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
  4300. adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
  4301. /* Fill out the OS statistics structure */
  4302. netdev->stats.multicast = adapter->stats.mprc;
  4303. /* Rx Errors */
  4304. netdev->stats.rx_errors = adapter->stats.crcerrs +
  4305. adapter->stats.rlec;
  4306. netdev->stats.rx_dropped = 0;
  4307. netdev->stats.rx_length_errors = adapter->stats.rlec;
  4308. netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
  4309. netdev->stats.rx_missed_errors = total_mpc;
  4310. }
  4311. /**
  4312. * ixgbe_watchdog - Timer Call-back
  4313. * @data: pointer to adapter cast into an unsigned long
  4314. **/
  4315. static void ixgbe_watchdog(unsigned long data)
  4316. {
  4317. struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
  4318. struct ixgbe_hw *hw = &adapter->hw;
  4319. u64 eics = 0;
  4320. int i;
  4321. /*
  4322. * Do the watchdog outside of interrupt context due to the lovely
  4323. * delays that some of the newer hardware requires
  4324. */
  4325. if (test_bit(__IXGBE_DOWN, &adapter->state))
  4326. goto watchdog_short_circuit;
  4327. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
  4328. /*
  4329. * for legacy and MSI interrupts don't set any bits
  4330. * that are enabled for EIAM, because this operation
  4331. * would set *both* EIMS and EICS for any bit in EIAM
  4332. */
  4333. IXGBE_WRITE_REG(hw, IXGBE_EICS,
  4334. (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
  4335. goto watchdog_reschedule;
  4336. }
  4337. /* get one bit for every active tx/rx interrupt vector */
  4338. for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
  4339. struct ixgbe_q_vector *qv = adapter->q_vector[i];
  4340. if (qv->rxr_count || qv->txr_count)
  4341. eics |= ((u64)1 << i);
  4342. }
  4343. /* Cause software interrupt to ensure rx rings are cleaned */
  4344. ixgbe_irq_rearm_queues(adapter, eics);
  4345. watchdog_reschedule:
  4346. /* Reset the timer */
  4347. mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
  4348. watchdog_short_circuit:
  4349. schedule_work(&adapter->watchdog_task);
  4350. }
  4351. /**
  4352. * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
  4353. * @work: pointer to work_struct containing our data
  4354. **/
  4355. static void ixgbe_multispeed_fiber_task(struct work_struct *work)
  4356. {
  4357. struct ixgbe_adapter *adapter = container_of(work,
  4358. struct ixgbe_adapter,
  4359. multispeed_fiber_task);
  4360. struct ixgbe_hw *hw = &adapter->hw;
  4361. u32 autoneg;
  4362. bool negotiation;
  4363. adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
  4364. autoneg = hw->phy.autoneg_advertised;
  4365. if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
  4366. hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
  4367. if (hw->mac.ops.setup_link)
  4368. hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
  4369. adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
  4370. adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
  4371. }
  4372. /**
  4373. * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
  4374. * @work: pointer to work_struct containing our data
  4375. **/
  4376. static void ixgbe_sfp_config_module_task(struct work_struct *work)
  4377. {
  4378. struct ixgbe_adapter *adapter = container_of(work,
  4379. struct ixgbe_adapter,
  4380. sfp_config_module_task);
  4381. struct ixgbe_hw *hw = &adapter->hw;
  4382. u32 err;
  4383. adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
  4384. /* Time for electrical oscillations to settle down */
  4385. msleep(100);
  4386. err = hw->phy.ops.identify_sfp(hw);
  4387. if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
  4388. dev_err(&adapter->pdev->dev, "failed to initialize because "
  4389. "an unsupported SFP+ module type was detected.\n"
  4390. "Reload the driver after installing a supported "
  4391. "module.\n");
  4392. unregister_netdev(adapter->netdev);
  4393. return;
  4394. }
  4395. hw->mac.ops.setup_sfp(hw);
  4396. if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
  4397. /* This will also work for DA Twinax connections */
  4398. schedule_work(&adapter->multispeed_fiber_task);
  4399. adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
  4400. }
  4401. /**
  4402. * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
  4403. * @work: pointer to work_struct containing our data
  4404. **/
  4405. static void ixgbe_fdir_reinit_task(struct work_struct *work)
  4406. {
  4407. struct ixgbe_adapter *adapter = container_of(work,
  4408. struct ixgbe_adapter,
  4409. fdir_reinit_task);
  4410. struct ixgbe_hw *hw = &adapter->hw;
  4411. int i;
  4412. if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
  4413. for (i = 0; i < adapter->num_tx_queues; i++)
  4414. set_bit(__IXGBE_FDIR_INIT_DONE,
  4415. &(adapter->tx_ring[i]->reinit_state));
  4416. } else {
  4417. DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
  4418. "ignored adding FDIR ATR filters \n");
  4419. }
  4420. /* Done FDIR Re-initialization, enable transmits */
  4421. netif_tx_start_all_queues(adapter->netdev);
  4422. }
  4423. static DEFINE_MUTEX(ixgbe_watchdog_lock);
  4424. /**
  4425. * ixgbe_watchdog_task - worker thread to bring link up
  4426. * @work: pointer to work_struct containing our data
  4427. **/
  4428. static void ixgbe_watchdog_task(struct work_struct *work)
  4429. {
  4430. struct ixgbe_adapter *adapter = container_of(work,
  4431. struct ixgbe_adapter,
  4432. watchdog_task);
  4433. struct net_device *netdev = adapter->netdev;
  4434. struct ixgbe_hw *hw = &adapter->hw;
  4435. u32 link_speed;
  4436. bool link_up;
  4437. int i;
  4438. struct ixgbe_ring *tx_ring;
  4439. int some_tx_pending = 0;
  4440. mutex_lock(&ixgbe_watchdog_lock);
  4441. link_up = adapter->link_up;
  4442. link_speed = adapter->link_speed;
  4443. if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
  4444. hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
  4445. if (link_up) {
  4446. #ifdef CONFIG_DCB
  4447. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  4448. for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
  4449. hw->mac.ops.fc_enable(hw, i);
  4450. } else {
  4451. hw->mac.ops.fc_enable(hw, 0);
  4452. }
  4453. #else
  4454. hw->mac.ops.fc_enable(hw, 0);
  4455. #endif
  4456. }
  4457. if (link_up ||
  4458. time_after(jiffies, (adapter->link_check_timeout +
  4459. IXGBE_TRY_LINK_TIMEOUT))) {
  4460. adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
  4461. IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
  4462. }
  4463. adapter->link_up = link_up;
  4464. adapter->link_speed = link_speed;
  4465. }
  4466. if (link_up) {
  4467. if (!netif_carrier_ok(netdev)) {
  4468. bool flow_rx, flow_tx;
  4469. if (hw->mac.type == ixgbe_mac_82599EB) {
  4470. u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
  4471. u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
  4472. flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
  4473. flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
  4474. } else {
  4475. u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  4476. u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
  4477. flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
  4478. flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
  4479. }
  4480. printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
  4481. "Flow Control: %s\n",
  4482. netdev->name,
  4483. (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
  4484. "10 Gbps" :
  4485. (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
  4486. "1 Gbps" : "unknown speed")),
  4487. ((flow_rx && flow_tx) ? "RX/TX" :
  4488. (flow_rx ? "RX" :
  4489. (flow_tx ? "TX" : "None"))));
  4490. netif_carrier_on(netdev);
  4491. } else {
  4492. /* Force detection of hung controller */
  4493. adapter->detect_tx_hung = true;
  4494. }
  4495. } else {
  4496. adapter->link_up = false;
  4497. adapter->link_speed = 0;
  4498. if (netif_carrier_ok(netdev)) {
  4499. printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
  4500. netdev->name);
  4501. netif_carrier_off(netdev);
  4502. }
  4503. }
  4504. if (!netif_carrier_ok(netdev)) {
  4505. for (i = 0; i < adapter->num_tx_queues; i++) {
  4506. tx_ring = adapter->tx_ring[i];
  4507. if (tx_ring->next_to_use != tx_ring->next_to_clean) {
  4508. some_tx_pending = 1;
  4509. break;
  4510. }
  4511. }
  4512. if (some_tx_pending) {
  4513. /* We've lost link, so the controller stops DMA,
  4514. * but we've got queued Tx work that's never going
  4515. * to get done, so reset controller to flush Tx.
  4516. * (Do the reset outside of interrupt context).
  4517. */
  4518. schedule_work(&adapter->reset_task);
  4519. }
  4520. }
  4521. ixgbe_update_stats(adapter);
  4522. mutex_unlock(&ixgbe_watchdog_lock);
  4523. }
  4524. static int ixgbe_tso(struct ixgbe_adapter *adapter,
  4525. struct ixgbe_ring *tx_ring, struct sk_buff *skb,
  4526. u32 tx_flags, u8 *hdr_len)
  4527. {
  4528. struct ixgbe_adv_tx_context_desc *context_desc;
  4529. unsigned int i;
  4530. int err;
  4531. struct ixgbe_tx_buffer *tx_buffer_info;
  4532. u32 vlan_macip_lens = 0, type_tucmd_mlhl;
  4533. u32 mss_l4len_idx, l4len;
  4534. if (skb_is_gso(skb)) {
  4535. if (skb_header_cloned(skb)) {
  4536. err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  4537. if (err)
  4538. return err;
  4539. }
  4540. l4len = tcp_hdrlen(skb);
  4541. *hdr_len += l4len;
  4542. if (skb->protocol == htons(ETH_P_IP)) {
  4543. struct iphdr *iph = ip_hdr(skb);
  4544. iph->tot_len = 0;
  4545. iph->check = 0;
  4546. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  4547. iph->daddr, 0,
  4548. IPPROTO_TCP,
  4549. 0);
  4550. } else if (skb_is_gso_v6(skb)) {
  4551. ipv6_hdr(skb)->payload_len = 0;
  4552. tcp_hdr(skb)->check =
  4553. ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  4554. &ipv6_hdr(skb)->daddr,
  4555. 0, IPPROTO_TCP, 0);
  4556. }
  4557. i = tx_ring->next_to_use;
  4558. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  4559. context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
  4560. /* VLAN MACLEN IPLEN */
  4561. if (tx_flags & IXGBE_TX_FLAGS_VLAN)
  4562. vlan_macip_lens |=
  4563. (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
  4564. vlan_macip_lens |= ((skb_network_offset(skb)) <<
  4565. IXGBE_ADVTXD_MACLEN_SHIFT);
  4566. *hdr_len += skb_network_offset(skb);
  4567. vlan_macip_lens |=
  4568. (skb_transport_header(skb) - skb_network_header(skb));
  4569. *hdr_len +=
  4570. (skb_transport_header(skb) - skb_network_header(skb));
  4571. context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
  4572. context_desc->seqnum_seed = 0;
  4573. /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
  4574. type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
  4575. IXGBE_ADVTXD_DTYP_CTXT);
  4576. if (skb->protocol == htons(ETH_P_IP))
  4577. type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
  4578. type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
  4579. context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
  4580. /* MSS L4LEN IDX */
  4581. mss_l4len_idx =
  4582. (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
  4583. mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
  4584. /* use index 1 for TSO */
  4585. mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
  4586. context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
  4587. tx_buffer_info->time_stamp = jiffies;
  4588. tx_buffer_info->next_to_watch = i;
  4589. i++;
  4590. if (i == tx_ring->count)
  4591. i = 0;
  4592. tx_ring->next_to_use = i;
  4593. return true;
  4594. }
  4595. return false;
  4596. }
  4597. static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
  4598. struct ixgbe_ring *tx_ring,
  4599. struct sk_buff *skb, u32 tx_flags)
  4600. {
  4601. struct ixgbe_adv_tx_context_desc *context_desc;
  4602. unsigned int i;
  4603. struct ixgbe_tx_buffer *tx_buffer_info;
  4604. u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
  4605. if (skb->ip_summed == CHECKSUM_PARTIAL ||
  4606. (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
  4607. i = tx_ring->next_to_use;
  4608. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  4609. context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
  4610. if (tx_flags & IXGBE_TX_FLAGS_VLAN)
  4611. vlan_macip_lens |=
  4612. (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
  4613. vlan_macip_lens |= (skb_network_offset(skb) <<
  4614. IXGBE_ADVTXD_MACLEN_SHIFT);
  4615. if (skb->ip_summed == CHECKSUM_PARTIAL)
  4616. vlan_macip_lens |= (skb_transport_header(skb) -
  4617. skb_network_header(skb));
  4618. context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
  4619. context_desc->seqnum_seed = 0;
  4620. type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
  4621. IXGBE_ADVTXD_DTYP_CTXT);
  4622. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  4623. __be16 protocol;
  4624. if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
  4625. const struct vlan_ethhdr *vhdr =
  4626. (const struct vlan_ethhdr *)skb->data;
  4627. protocol = vhdr->h_vlan_encapsulated_proto;
  4628. } else {
  4629. protocol = skb->protocol;
  4630. }
  4631. switch (protocol) {
  4632. case cpu_to_be16(ETH_P_IP):
  4633. type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
  4634. if (ip_hdr(skb)->protocol == IPPROTO_TCP)
  4635. type_tucmd_mlhl |=
  4636. IXGBE_ADVTXD_TUCMD_L4T_TCP;
  4637. else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
  4638. type_tucmd_mlhl |=
  4639. IXGBE_ADVTXD_TUCMD_L4T_SCTP;
  4640. break;
  4641. case cpu_to_be16(ETH_P_IPV6):
  4642. /* XXX what about other V6 headers?? */
  4643. if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
  4644. type_tucmd_mlhl |=
  4645. IXGBE_ADVTXD_TUCMD_L4T_TCP;
  4646. else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
  4647. type_tucmd_mlhl |=
  4648. IXGBE_ADVTXD_TUCMD_L4T_SCTP;
  4649. break;
  4650. default:
  4651. if (unlikely(net_ratelimit())) {
  4652. DPRINTK(PROBE, WARNING,
  4653. "partial checksum but proto=%x!\n",
  4654. skb->protocol);
  4655. }
  4656. break;
  4657. }
  4658. }
  4659. context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
  4660. /* use index zero for tx checksum offload */
  4661. context_desc->mss_l4len_idx = 0;
  4662. tx_buffer_info->time_stamp = jiffies;
  4663. tx_buffer_info->next_to_watch = i;
  4664. i++;
  4665. if (i == tx_ring->count)
  4666. i = 0;
  4667. tx_ring->next_to_use = i;
  4668. return true;
  4669. }
  4670. return false;
  4671. }
  4672. static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
  4673. struct ixgbe_ring *tx_ring,
  4674. struct sk_buff *skb, u32 tx_flags,
  4675. unsigned int first)
  4676. {
  4677. struct pci_dev *pdev = adapter->pdev;
  4678. struct ixgbe_tx_buffer *tx_buffer_info;
  4679. unsigned int len;
  4680. unsigned int total = skb->len;
  4681. unsigned int offset = 0, size, count = 0, i;
  4682. unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
  4683. unsigned int f;
  4684. i = tx_ring->next_to_use;
  4685. if (tx_flags & IXGBE_TX_FLAGS_FCOE)
  4686. /* excluding fcoe_crc_eof for FCoE */
  4687. total -= sizeof(struct fcoe_crc_eof);
  4688. len = min(skb_headlen(skb), total);
  4689. while (len) {
  4690. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  4691. size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
  4692. tx_buffer_info->length = size;
  4693. tx_buffer_info->mapped_as_page = false;
  4694. tx_buffer_info->dma = pci_map_single(pdev,
  4695. skb->data + offset,
  4696. size, PCI_DMA_TODEVICE);
  4697. if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
  4698. goto dma_error;
  4699. tx_buffer_info->time_stamp = jiffies;
  4700. tx_buffer_info->next_to_watch = i;
  4701. len -= size;
  4702. total -= size;
  4703. offset += size;
  4704. count++;
  4705. if (len) {
  4706. i++;
  4707. if (i == tx_ring->count)
  4708. i = 0;
  4709. }
  4710. }
  4711. for (f = 0; f < nr_frags; f++) {
  4712. struct skb_frag_struct *frag;
  4713. frag = &skb_shinfo(skb)->frags[f];
  4714. len = min((unsigned int)frag->size, total);
  4715. offset = frag->page_offset;
  4716. while (len) {
  4717. i++;
  4718. if (i == tx_ring->count)
  4719. i = 0;
  4720. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  4721. size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
  4722. tx_buffer_info->length = size;
  4723. tx_buffer_info->dma = pci_map_page(adapter->pdev,
  4724. frag->page,
  4725. offset, size,
  4726. PCI_DMA_TODEVICE);
  4727. tx_buffer_info->mapped_as_page = true;
  4728. if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
  4729. goto dma_error;
  4730. tx_buffer_info->time_stamp = jiffies;
  4731. tx_buffer_info->next_to_watch = i;
  4732. len -= size;
  4733. total -= size;
  4734. offset += size;
  4735. count++;
  4736. }
  4737. if (total == 0)
  4738. break;
  4739. }
  4740. tx_ring->tx_buffer_info[i].skb = skb;
  4741. tx_ring->tx_buffer_info[first].next_to_watch = i;
  4742. return count;
  4743. dma_error:
  4744. dev_err(&pdev->dev, "TX DMA map failed\n");
  4745. /* clear timestamp and dma mappings for failed tx_buffer_info map */
  4746. tx_buffer_info->dma = 0;
  4747. tx_buffer_info->time_stamp = 0;
  4748. tx_buffer_info->next_to_watch = 0;
  4749. if (count)
  4750. count--;
  4751. /* clear timestamp and dma mappings for remaining portion of packet */
  4752. while (count--) {
  4753. if (i==0)
  4754. i += tx_ring->count;
  4755. i--;
  4756. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  4757. ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
  4758. }
  4759. return 0;
  4760. }
  4761. static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
  4762. struct ixgbe_ring *tx_ring,
  4763. int tx_flags, int count, u32 paylen, u8 hdr_len)
  4764. {
  4765. union ixgbe_adv_tx_desc *tx_desc = NULL;
  4766. struct ixgbe_tx_buffer *tx_buffer_info;
  4767. u32 olinfo_status = 0, cmd_type_len = 0;
  4768. unsigned int i;
  4769. u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
  4770. cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
  4771. cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
  4772. if (tx_flags & IXGBE_TX_FLAGS_VLAN)
  4773. cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
  4774. if (tx_flags & IXGBE_TX_FLAGS_TSO) {
  4775. cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
  4776. olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
  4777. IXGBE_ADVTXD_POPTS_SHIFT;
  4778. /* use index 1 context for tso */
  4779. olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
  4780. if (tx_flags & IXGBE_TX_FLAGS_IPV4)
  4781. olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
  4782. IXGBE_ADVTXD_POPTS_SHIFT;
  4783. } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
  4784. olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
  4785. IXGBE_ADVTXD_POPTS_SHIFT;
  4786. if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
  4787. olinfo_status |= IXGBE_ADVTXD_CC;
  4788. olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
  4789. if (tx_flags & IXGBE_TX_FLAGS_FSO)
  4790. cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
  4791. }
  4792. olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
  4793. i = tx_ring->next_to_use;
  4794. while (count--) {
  4795. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  4796. tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
  4797. tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
  4798. tx_desc->read.cmd_type_len =
  4799. cpu_to_le32(cmd_type_len | tx_buffer_info->length);
  4800. tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
  4801. i++;
  4802. if (i == tx_ring->count)
  4803. i = 0;
  4804. }
  4805. tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
  4806. /*
  4807. * Force memory writes to complete before letting h/w
  4808. * know there are new descriptors to fetch. (Only
  4809. * applicable for weak-ordered memory model archs,
  4810. * such as IA-64).
  4811. */
  4812. wmb();
  4813. tx_ring->next_to_use = i;
  4814. writel(i, adapter->hw.hw_addr + tx_ring->tail);
  4815. }
  4816. static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
  4817. int queue, u32 tx_flags)
  4818. {
  4819. /* Right now, we support IPv4 only */
  4820. struct ixgbe_atr_input atr_input;
  4821. struct tcphdr *th;
  4822. struct iphdr *iph = ip_hdr(skb);
  4823. struct ethhdr *eth = (struct ethhdr *)skb->data;
  4824. u16 vlan_id, src_port, dst_port, flex_bytes;
  4825. u32 src_ipv4_addr, dst_ipv4_addr;
  4826. u8 l4type = 0;
  4827. /* check if we're UDP or TCP */
  4828. if (iph->protocol == IPPROTO_TCP) {
  4829. th = tcp_hdr(skb);
  4830. src_port = th->source;
  4831. dst_port = th->dest;
  4832. l4type |= IXGBE_ATR_L4TYPE_TCP;
  4833. /* l4type IPv4 type is 0, no need to assign */
  4834. } else {
  4835. /* Unsupported L4 header, just bail here */
  4836. return;
  4837. }
  4838. memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
  4839. vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
  4840. IXGBE_TX_FLAGS_VLAN_SHIFT;
  4841. src_ipv4_addr = iph->saddr;
  4842. dst_ipv4_addr = iph->daddr;
  4843. flex_bytes = eth->h_proto;
  4844. ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
  4845. ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
  4846. ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
  4847. ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
  4848. ixgbe_atr_set_l4type_82599(&atr_input, l4type);
  4849. /* src and dst are inverted, think how the receiver sees them */
  4850. ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
  4851. ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
  4852. /* This assumes the Rx queue and Tx queue are bound to the same CPU */
  4853. ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
  4854. }
  4855. static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
  4856. struct ixgbe_ring *tx_ring, int size)
  4857. {
  4858. netif_stop_subqueue(netdev, tx_ring->queue_index);
  4859. /* Herbert's original patch had:
  4860. * smp_mb__after_netif_stop_queue();
  4861. * but since that doesn't exist yet, just open code it. */
  4862. smp_mb();
  4863. /* We need to check again in a case another CPU has just
  4864. * made room available. */
  4865. if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
  4866. return -EBUSY;
  4867. /* A reprieve! - use start_queue because it doesn't call schedule */
  4868. netif_start_subqueue(netdev, tx_ring->queue_index);
  4869. ++tx_ring->restart_queue;
  4870. return 0;
  4871. }
  4872. static int ixgbe_maybe_stop_tx(struct net_device *netdev,
  4873. struct ixgbe_ring *tx_ring, int size)
  4874. {
  4875. if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
  4876. return 0;
  4877. return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
  4878. }
  4879. static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
  4880. {
  4881. struct ixgbe_adapter *adapter = netdev_priv(dev);
  4882. int txq = smp_processor_id();
  4883. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
  4884. while (unlikely(txq >= dev->real_num_tx_queues))
  4885. txq -= dev->real_num_tx_queues;
  4886. return txq;
  4887. }
  4888. #ifdef IXGBE_FCOE
  4889. if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
  4890. (skb->protocol == htons(ETH_P_FCOE))) {
  4891. txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
  4892. txq += adapter->ring_feature[RING_F_FCOE].mask;
  4893. return txq;
  4894. }
  4895. #endif
  4896. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  4897. if (skb->priority == TC_PRIO_CONTROL)
  4898. txq = adapter->ring_feature[RING_F_DCB].indices-1;
  4899. else
  4900. txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
  4901. >> 13;
  4902. return txq;
  4903. }
  4904. return skb_tx_hash(dev, skb);
  4905. }
  4906. static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
  4907. struct net_device *netdev)
  4908. {
  4909. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4910. struct ixgbe_ring *tx_ring;
  4911. struct netdev_queue *txq;
  4912. unsigned int first;
  4913. unsigned int tx_flags = 0;
  4914. u8 hdr_len = 0;
  4915. int tso;
  4916. int count = 0;
  4917. unsigned int f;
  4918. if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
  4919. tx_flags |= vlan_tx_tag_get(skb);
  4920. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  4921. tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
  4922. tx_flags |= ((skb->queue_mapping & 0x7) << 13);
  4923. }
  4924. tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
  4925. tx_flags |= IXGBE_TX_FLAGS_VLAN;
  4926. } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  4927. tx_flags |= ((skb->queue_mapping & 0x7) << 13);
  4928. tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
  4929. tx_flags |= IXGBE_TX_FLAGS_VLAN;
  4930. }
  4931. tx_ring = adapter->tx_ring[skb->queue_mapping];
  4932. if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
  4933. (skb->protocol == htons(ETH_P_FCOE))) {
  4934. tx_flags |= IXGBE_TX_FLAGS_FCOE;
  4935. #ifdef IXGBE_FCOE
  4936. #ifdef CONFIG_IXGBE_DCB
  4937. tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
  4938. << IXGBE_TX_FLAGS_VLAN_SHIFT);
  4939. tx_flags |= ((adapter->fcoe.up << 13)
  4940. << IXGBE_TX_FLAGS_VLAN_SHIFT);
  4941. #endif
  4942. #endif
  4943. }
  4944. /* four things can cause us to need a context descriptor */
  4945. if (skb_is_gso(skb) ||
  4946. (skb->ip_summed == CHECKSUM_PARTIAL) ||
  4947. (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
  4948. (tx_flags & IXGBE_TX_FLAGS_FCOE))
  4949. count++;
  4950. count += TXD_USE_COUNT(skb_headlen(skb));
  4951. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
  4952. count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
  4953. if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
  4954. adapter->tx_busy++;
  4955. return NETDEV_TX_BUSY;
  4956. }
  4957. first = tx_ring->next_to_use;
  4958. if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
  4959. #ifdef IXGBE_FCOE
  4960. /* setup tx offload for FCoE */
  4961. tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
  4962. if (tso < 0) {
  4963. dev_kfree_skb_any(skb);
  4964. return NETDEV_TX_OK;
  4965. }
  4966. if (tso)
  4967. tx_flags |= IXGBE_TX_FLAGS_FSO;
  4968. #endif /* IXGBE_FCOE */
  4969. } else {
  4970. if (skb->protocol == htons(ETH_P_IP))
  4971. tx_flags |= IXGBE_TX_FLAGS_IPV4;
  4972. tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
  4973. if (tso < 0) {
  4974. dev_kfree_skb_any(skb);
  4975. return NETDEV_TX_OK;
  4976. }
  4977. if (tso)
  4978. tx_flags |= IXGBE_TX_FLAGS_TSO;
  4979. else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
  4980. (skb->ip_summed == CHECKSUM_PARTIAL))
  4981. tx_flags |= IXGBE_TX_FLAGS_CSUM;
  4982. }
  4983. count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
  4984. if (count) {
  4985. /* add the ATR filter if ATR is on */
  4986. if (tx_ring->atr_sample_rate) {
  4987. ++tx_ring->atr_count;
  4988. if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
  4989. test_bit(__IXGBE_FDIR_INIT_DONE,
  4990. &tx_ring->reinit_state)) {
  4991. ixgbe_atr(adapter, skb, tx_ring->queue_index,
  4992. tx_flags);
  4993. tx_ring->atr_count = 0;
  4994. }
  4995. }
  4996. txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
  4997. txq->tx_bytes += skb->len;
  4998. txq->tx_packets++;
  4999. ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
  5000. hdr_len);
  5001. ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
  5002. } else {
  5003. dev_kfree_skb_any(skb);
  5004. tx_ring->tx_buffer_info[first].time_stamp = 0;
  5005. tx_ring->next_to_use = first;
  5006. }
  5007. return NETDEV_TX_OK;
  5008. }
  5009. /**
  5010. * ixgbe_set_mac - Change the Ethernet Address of the NIC
  5011. * @netdev: network interface device structure
  5012. * @p: pointer to an address structure
  5013. *
  5014. * Returns 0 on success, negative on failure
  5015. **/
  5016. static int ixgbe_set_mac(struct net_device *netdev, void *p)
  5017. {
  5018. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5019. struct ixgbe_hw *hw = &adapter->hw;
  5020. struct sockaddr *addr = p;
  5021. if (!is_valid_ether_addr(addr->sa_data))
  5022. return -EADDRNOTAVAIL;
  5023. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  5024. memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
  5025. hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
  5026. IXGBE_RAH_AV);
  5027. return 0;
  5028. }
  5029. static int
  5030. ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
  5031. {
  5032. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5033. struct ixgbe_hw *hw = &adapter->hw;
  5034. u16 value;
  5035. int rc;
  5036. if (prtad != hw->phy.mdio.prtad)
  5037. return -EINVAL;
  5038. rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
  5039. if (!rc)
  5040. rc = value;
  5041. return rc;
  5042. }
  5043. static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
  5044. u16 addr, u16 value)
  5045. {
  5046. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5047. struct ixgbe_hw *hw = &adapter->hw;
  5048. if (prtad != hw->phy.mdio.prtad)
  5049. return -EINVAL;
  5050. return hw->phy.ops.write_reg(hw, addr, devad, value);
  5051. }
  5052. static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
  5053. {
  5054. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5055. return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
  5056. }
  5057. /**
  5058. * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
  5059. * netdev->dev_addrs
  5060. * @netdev: network interface device structure
  5061. *
  5062. * Returns non-zero on failure
  5063. **/
  5064. static int ixgbe_add_sanmac_netdev(struct net_device *dev)
  5065. {
  5066. int err = 0;
  5067. struct ixgbe_adapter *adapter = netdev_priv(dev);
  5068. struct ixgbe_mac_info *mac = &adapter->hw.mac;
  5069. if (is_valid_ether_addr(mac->san_addr)) {
  5070. rtnl_lock();
  5071. err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
  5072. rtnl_unlock();
  5073. }
  5074. return err;
  5075. }
  5076. /**
  5077. * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
  5078. * netdev->dev_addrs
  5079. * @netdev: network interface device structure
  5080. *
  5081. * Returns non-zero on failure
  5082. **/
  5083. static int ixgbe_del_sanmac_netdev(struct net_device *dev)
  5084. {
  5085. int err = 0;
  5086. struct ixgbe_adapter *adapter = netdev_priv(dev);
  5087. struct ixgbe_mac_info *mac = &adapter->hw.mac;
  5088. if (is_valid_ether_addr(mac->san_addr)) {
  5089. rtnl_lock();
  5090. err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
  5091. rtnl_unlock();
  5092. }
  5093. return err;
  5094. }
  5095. #ifdef CONFIG_NET_POLL_CONTROLLER
  5096. /*
  5097. * Polling 'interrupt' - used by things like netconsole to send skbs
  5098. * without having to re-enable interrupts. It's not called while
  5099. * the interrupt routine is executing.
  5100. */
  5101. static void ixgbe_netpoll(struct net_device *netdev)
  5102. {
  5103. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5104. int i;
  5105. /* if interface is down do nothing */
  5106. if (test_bit(__IXGBE_DOWN, &adapter->state))
  5107. return;
  5108. adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
  5109. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  5110. int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  5111. for (i = 0; i < num_q_vectors; i++) {
  5112. struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
  5113. ixgbe_msix_clean_many(0, q_vector);
  5114. }
  5115. } else {
  5116. ixgbe_intr(adapter->pdev->irq, netdev);
  5117. }
  5118. adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
  5119. }
  5120. #endif
  5121. static const struct net_device_ops ixgbe_netdev_ops = {
  5122. .ndo_open = ixgbe_open,
  5123. .ndo_stop = ixgbe_close,
  5124. .ndo_start_xmit = ixgbe_xmit_frame,
  5125. .ndo_select_queue = ixgbe_select_queue,
  5126. .ndo_set_rx_mode = ixgbe_set_rx_mode,
  5127. .ndo_set_multicast_list = ixgbe_set_rx_mode,
  5128. .ndo_validate_addr = eth_validate_addr,
  5129. .ndo_set_mac_address = ixgbe_set_mac,
  5130. .ndo_change_mtu = ixgbe_change_mtu,
  5131. .ndo_tx_timeout = ixgbe_tx_timeout,
  5132. .ndo_vlan_rx_register = ixgbe_vlan_rx_register,
  5133. .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
  5134. .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
  5135. .ndo_do_ioctl = ixgbe_ioctl,
  5136. #ifdef CONFIG_NET_POLL_CONTROLLER
  5137. .ndo_poll_controller = ixgbe_netpoll,
  5138. #endif
  5139. #ifdef IXGBE_FCOE
  5140. .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
  5141. .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
  5142. .ndo_fcoe_enable = ixgbe_fcoe_enable,
  5143. .ndo_fcoe_disable = ixgbe_fcoe_disable,
  5144. .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
  5145. #endif /* IXGBE_FCOE */
  5146. };
  5147. static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
  5148. const struct ixgbe_info *ii)
  5149. {
  5150. #ifdef CONFIG_PCI_IOV
  5151. struct ixgbe_hw *hw = &adapter->hw;
  5152. int err;
  5153. if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
  5154. return;
  5155. /* The 82599 supports up to 64 VFs per physical function
  5156. * but this implementation limits allocation to 63 so that
  5157. * basic networking resources are still available to the
  5158. * physical function
  5159. */
  5160. adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
  5161. adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
  5162. err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
  5163. if (err) {
  5164. DPRINTK(PROBE, ERR,
  5165. "Failed to enable PCI sriov: %d\n", err);
  5166. goto err_novfs;
  5167. }
  5168. /* If call to enable VFs succeeded then allocate memory
  5169. * for per VF control structures.
  5170. */
  5171. adapter->vfinfo =
  5172. kcalloc(adapter->num_vfs,
  5173. sizeof(struct vf_data_storage), GFP_KERNEL);
  5174. if (adapter->vfinfo) {
  5175. /* Now that we're sure SR-IOV is enabled
  5176. * and memory allocated set up the mailbox parameters
  5177. */
  5178. ixgbe_init_mbx_params_pf(hw);
  5179. memcpy(&hw->mbx.ops, ii->mbx_ops,
  5180. sizeof(hw->mbx.ops));
  5181. /* Disable RSC when in SR-IOV mode */
  5182. adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
  5183. IXGBE_FLAG2_RSC_ENABLED);
  5184. return;
  5185. }
  5186. /* Oh oh */
  5187. DPRINTK(PROBE, ERR,
  5188. "Unable to allocate memory for VF "
  5189. "Data Storage - SRIOV disabled\n");
  5190. pci_disable_sriov(adapter->pdev);
  5191. err_novfs:
  5192. adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
  5193. adapter->num_vfs = 0;
  5194. #endif /* CONFIG_PCI_IOV */
  5195. }
  5196. /**
  5197. * ixgbe_probe - Device Initialization Routine
  5198. * @pdev: PCI device information struct
  5199. * @ent: entry in ixgbe_pci_tbl
  5200. *
  5201. * Returns 0 on success, negative on failure
  5202. *
  5203. * ixgbe_probe initializes an adapter identified by a pci_dev structure.
  5204. * The OS initialization, configuring of the adapter private structure,
  5205. * and a hardware reset occur.
  5206. **/
  5207. static int __devinit ixgbe_probe(struct pci_dev *pdev,
  5208. const struct pci_device_id *ent)
  5209. {
  5210. struct net_device *netdev;
  5211. struct ixgbe_adapter *adapter = NULL;
  5212. struct ixgbe_hw *hw;
  5213. const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
  5214. static int cards_found;
  5215. int i, err, pci_using_dac;
  5216. unsigned int indices = num_possible_cpus();
  5217. #ifdef IXGBE_FCOE
  5218. u16 device_caps;
  5219. #endif
  5220. u32 part_num, eec;
  5221. err = pci_enable_device_mem(pdev);
  5222. if (err)
  5223. return err;
  5224. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
  5225. !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  5226. pci_using_dac = 1;
  5227. } else {
  5228. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  5229. if (err) {
  5230. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  5231. if (err) {
  5232. dev_err(&pdev->dev, "No usable DMA "
  5233. "configuration, aborting\n");
  5234. goto err_dma;
  5235. }
  5236. }
  5237. pci_using_dac = 0;
  5238. }
  5239. err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
  5240. IORESOURCE_MEM), ixgbe_driver_name);
  5241. if (err) {
  5242. dev_err(&pdev->dev,
  5243. "pci_request_selected_regions failed 0x%x\n", err);
  5244. goto err_pci_reg;
  5245. }
  5246. pci_enable_pcie_error_reporting(pdev);
  5247. pci_set_master(pdev);
  5248. pci_save_state(pdev);
  5249. if (ii->mac == ixgbe_mac_82598EB)
  5250. indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
  5251. else
  5252. indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
  5253. indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
  5254. #ifdef IXGBE_FCOE
  5255. indices += min_t(unsigned int, num_possible_cpus(),
  5256. IXGBE_MAX_FCOE_INDICES);
  5257. #endif
  5258. indices = min_t(unsigned int, indices, MAX_TX_QUEUES);
  5259. netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
  5260. if (!netdev) {
  5261. err = -ENOMEM;
  5262. goto err_alloc_etherdev;
  5263. }
  5264. SET_NETDEV_DEV(netdev, &pdev->dev);
  5265. pci_set_drvdata(pdev, netdev);
  5266. adapter = netdev_priv(netdev);
  5267. adapter->netdev = netdev;
  5268. adapter->pdev = pdev;
  5269. hw = &adapter->hw;
  5270. hw->back = adapter;
  5271. adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
  5272. hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
  5273. pci_resource_len(pdev, 0));
  5274. if (!hw->hw_addr) {
  5275. err = -EIO;
  5276. goto err_ioremap;
  5277. }
  5278. for (i = 1; i <= 5; i++) {
  5279. if (pci_resource_len(pdev, i) == 0)
  5280. continue;
  5281. }
  5282. netdev->netdev_ops = &ixgbe_netdev_ops;
  5283. ixgbe_set_ethtool_ops(netdev);
  5284. netdev->watchdog_timeo = 5 * HZ;
  5285. strcpy(netdev->name, pci_name(pdev));
  5286. adapter->bd_number = cards_found;
  5287. /* Setup hw api */
  5288. memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
  5289. hw->mac.type = ii->mac;
  5290. /* EEPROM */
  5291. memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
  5292. eec = IXGBE_READ_REG(hw, IXGBE_EEC);
  5293. /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
  5294. if (!(eec & (1 << 8)))
  5295. hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
  5296. /* PHY */
  5297. memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
  5298. hw->phy.sfp_type = ixgbe_sfp_type_unknown;
  5299. /* ixgbe_identify_phy_generic will set prtad and mmds properly */
  5300. hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
  5301. hw->phy.mdio.mmds = 0;
  5302. hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
  5303. hw->phy.mdio.dev = netdev;
  5304. hw->phy.mdio.mdio_read = ixgbe_mdio_read;
  5305. hw->phy.mdio.mdio_write = ixgbe_mdio_write;
  5306. /* set up this timer and work struct before calling get_invariants
  5307. * which might start the timer
  5308. */
  5309. init_timer(&adapter->sfp_timer);
  5310. adapter->sfp_timer.function = &ixgbe_sfp_timer;
  5311. adapter->sfp_timer.data = (unsigned long) adapter;
  5312. INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
  5313. /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
  5314. INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
  5315. /* a new SFP+ module arrival, called from GPI SDP2 context */
  5316. INIT_WORK(&adapter->sfp_config_module_task,
  5317. ixgbe_sfp_config_module_task);
  5318. ii->get_invariants(hw);
  5319. /* setup the private structure */
  5320. err = ixgbe_sw_init(adapter);
  5321. if (err)
  5322. goto err_sw_init;
  5323. /* Make it possible the adapter to be woken up via WOL */
  5324. if (adapter->hw.mac.type == ixgbe_mac_82599EB)
  5325. IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
  5326. /*
  5327. * If there is a fan on this device and it has failed log the
  5328. * failure.
  5329. */
  5330. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
  5331. u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  5332. if (esdp & IXGBE_ESDP_SDP1)
  5333. DPRINTK(PROBE, CRIT,
  5334. "Fan has stopped, replace the adapter\n");
  5335. }
  5336. /* reset_hw fills in the perm_addr as well */
  5337. err = hw->mac.ops.reset_hw(hw);
  5338. if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
  5339. hw->mac.type == ixgbe_mac_82598EB) {
  5340. /*
  5341. * Start a kernel thread to watch for a module to arrive.
  5342. * Only do this for 82598, since 82599 will generate
  5343. * interrupts on module arrival.
  5344. */
  5345. set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
  5346. mod_timer(&adapter->sfp_timer,
  5347. round_jiffies(jiffies + (2 * HZ)));
  5348. err = 0;
  5349. } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
  5350. dev_err(&adapter->pdev->dev, "failed to initialize because "
  5351. "an unsupported SFP+ module type was detected.\n"
  5352. "Reload the driver after installing a supported "
  5353. "module.\n");
  5354. goto err_sw_init;
  5355. } else if (err) {
  5356. dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
  5357. goto err_sw_init;
  5358. }
  5359. ixgbe_probe_vf(adapter, ii);
  5360. netdev->features = NETIF_F_SG |
  5361. NETIF_F_IP_CSUM |
  5362. NETIF_F_HW_VLAN_TX |
  5363. NETIF_F_HW_VLAN_RX |
  5364. NETIF_F_HW_VLAN_FILTER;
  5365. netdev->features |= NETIF_F_IPV6_CSUM;
  5366. netdev->features |= NETIF_F_TSO;
  5367. netdev->features |= NETIF_F_TSO6;
  5368. netdev->features |= NETIF_F_GRO;
  5369. if (adapter->hw.mac.type == ixgbe_mac_82599EB)
  5370. netdev->features |= NETIF_F_SCTP_CSUM;
  5371. netdev->vlan_features |= NETIF_F_TSO;
  5372. netdev->vlan_features |= NETIF_F_TSO6;
  5373. netdev->vlan_features |= NETIF_F_IP_CSUM;
  5374. netdev->vlan_features |= NETIF_F_IPV6_CSUM;
  5375. netdev->vlan_features |= NETIF_F_SG;
  5376. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  5377. adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
  5378. IXGBE_FLAG_DCB_ENABLED);
  5379. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
  5380. adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
  5381. #ifdef CONFIG_IXGBE_DCB
  5382. netdev->dcbnl_ops = &dcbnl_ops;
  5383. #endif
  5384. #ifdef IXGBE_FCOE
  5385. if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
  5386. if (hw->mac.ops.get_device_caps) {
  5387. hw->mac.ops.get_device_caps(hw, &device_caps);
  5388. if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
  5389. adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
  5390. }
  5391. }
  5392. #endif /* IXGBE_FCOE */
  5393. if (pci_using_dac)
  5394. netdev->features |= NETIF_F_HIGHDMA;
  5395. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
  5396. netdev->features |= NETIF_F_LRO;
  5397. /* make sure the EEPROM is good */
  5398. if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
  5399. dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
  5400. err = -EIO;
  5401. goto err_eeprom;
  5402. }
  5403. memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
  5404. memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
  5405. if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
  5406. dev_err(&pdev->dev, "invalid MAC address\n");
  5407. err = -EIO;
  5408. goto err_eeprom;
  5409. }
  5410. init_timer(&adapter->watchdog_timer);
  5411. adapter->watchdog_timer.function = &ixgbe_watchdog;
  5412. adapter->watchdog_timer.data = (unsigned long)adapter;
  5413. INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
  5414. INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
  5415. err = ixgbe_init_interrupt_scheme(adapter);
  5416. if (err)
  5417. goto err_sw_init;
  5418. switch (pdev->device) {
  5419. case IXGBE_DEV_ID_82599_KX4:
  5420. adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
  5421. IXGBE_WUFC_MC | IXGBE_WUFC_BC);
  5422. /* Enable ACPI wakeup in GRC */
  5423. IXGBE_WRITE_REG(hw, IXGBE_GRC,
  5424. (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
  5425. break;
  5426. default:
  5427. adapter->wol = 0;
  5428. break;
  5429. }
  5430. device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
  5431. /* pick up the PCI bus settings for reporting later */
  5432. hw->mac.ops.get_bus_info(hw);
  5433. /* print bus type/speed/width info */
  5434. dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
  5435. ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
  5436. (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
  5437. ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
  5438. (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
  5439. (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
  5440. "Unknown"),
  5441. netdev->dev_addr);
  5442. ixgbe_read_pba_num_generic(hw, &part_num);
  5443. if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
  5444. dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n",
  5445. hw->mac.type, hw->phy.type, hw->phy.sfp_type,
  5446. (part_num >> 8), (part_num & 0xff));
  5447. else
  5448. dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
  5449. hw->mac.type, hw->phy.type,
  5450. (part_num >> 8), (part_num & 0xff));
  5451. if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
  5452. dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
  5453. "this card is not sufficient for optimal "
  5454. "performance.\n");
  5455. dev_warn(&pdev->dev, "For optimal performance a x8 "
  5456. "PCI-Express slot is required.\n");
  5457. }
  5458. /* save off EEPROM version number */
  5459. hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
  5460. /* reset the hardware with the new settings */
  5461. err = hw->mac.ops.start_hw(hw);
  5462. if (err == IXGBE_ERR_EEPROM_VERSION) {
  5463. /* We are running on a pre-production device, log a warning */
  5464. dev_warn(&pdev->dev, "This device is a pre-production "
  5465. "adapter/LOM. Please be aware there may be issues "
  5466. "associated with your hardware. If you are "
  5467. "experiencing problems please contact your Intel or "
  5468. "hardware representative who provided you with this "
  5469. "hardware.\n");
  5470. }
  5471. strcpy(netdev->name, "eth%d");
  5472. err = register_netdev(netdev);
  5473. if (err)
  5474. goto err_register;
  5475. /* carrier off reporting is important to ethtool even BEFORE open */
  5476. netif_carrier_off(netdev);
  5477. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
  5478. adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
  5479. INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
  5480. #ifdef CONFIG_IXGBE_DCA
  5481. if (dca_add_requester(&pdev->dev) == 0) {
  5482. adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
  5483. ixgbe_setup_dca(adapter);
  5484. }
  5485. #endif
  5486. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  5487. DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
  5488. adapter->num_vfs);
  5489. for (i = 0; i < adapter->num_vfs; i++)
  5490. ixgbe_vf_configuration(pdev, (i | 0x10000000));
  5491. }
  5492. /* add san mac addr to netdev */
  5493. ixgbe_add_sanmac_netdev(netdev);
  5494. dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
  5495. cards_found++;
  5496. return 0;
  5497. err_register:
  5498. ixgbe_release_hw_control(adapter);
  5499. ixgbe_clear_interrupt_scheme(adapter);
  5500. err_sw_init:
  5501. err_eeprom:
  5502. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  5503. ixgbe_disable_sriov(adapter);
  5504. clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
  5505. del_timer_sync(&adapter->sfp_timer);
  5506. cancel_work_sync(&adapter->sfp_task);
  5507. cancel_work_sync(&adapter->multispeed_fiber_task);
  5508. cancel_work_sync(&adapter->sfp_config_module_task);
  5509. iounmap(hw->hw_addr);
  5510. err_ioremap:
  5511. free_netdev(netdev);
  5512. err_alloc_etherdev:
  5513. pci_release_selected_regions(pdev, pci_select_bars(pdev,
  5514. IORESOURCE_MEM));
  5515. err_pci_reg:
  5516. err_dma:
  5517. pci_disable_device(pdev);
  5518. return err;
  5519. }
  5520. /**
  5521. * ixgbe_remove - Device Removal Routine
  5522. * @pdev: PCI device information struct
  5523. *
  5524. * ixgbe_remove is called by the PCI subsystem to alert the driver
  5525. * that it should release a PCI device. The could be caused by a
  5526. * Hot-Plug event, or because the driver is going to be removed from
  5527. * memory.
  5528. **/
  5529. static void __devexit ixgbe_remove(struct pci_dev *pdev)
  5530. {
  5531. struct net_device *netdev = pci_get_drvdata(pdev);
  5532. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5533. set_bit(__IXGBE_DOWN, &adapter->state);
  5534. /* clear the module not found bit to make sure the worker won't
  5535. * reschedule
  5536. */
  5537. clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
  5538. del_timer_sync(&adapter->watchdog_timer);
  5539. del_timer_sync(&adapter->sfp_timer);
  5540. cancel_work_sync(&adapter->watchdog_task);
  5541. cancel_work_sync(&adapter->sfp_task);
  5542. cancel_work_sync(&adapter->multispeed_fiber_task);
  5543. cancel_work_sync(&adapter->sfp_config_module_task);
  5544. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
  5545. adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
  5546. cancel_work_sync(&adapter->fdir_reinit_task);
  5547. flush_scheduled_work();
  5548. #ifdef CONFIG_IXGBE_DCA
  5549. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
  5550. adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
  5551. dca_remove_requester(&pdev->dev);
  5552. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
  5553. }
  5554. #endif
  5555. #ifdef IXGBE_FCOE
  5556. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
  5557. ixgbe_cleanup_fcoe(adapter);
  5558. #endif /* IXGBE_FCOE */
  5559. /* remove the added san mac */
  5560. ixgbe_del_sanmac_netdev(netdev);
  5561. if (netdev->reg_state == NETREG_REGISTERED)
  5562. unregister_netdev(netdev);
  5563. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  5564. ixgbe_disable_sriov(adapter);
  5565. ixgbe_clear_interrupt_scheme(adapter);
  5566. ixgbe_release_hw_control(adapter);
  5567. iounmap(adapter->hw.hw_addr);
  5568. pci_release_selected_regions(pdev, pci_select_bars(pdev,
  5569. IORESOURCE_MEM));
  5570. DPRINTK(PROBE, INFO, "complete\n");
  5571. free_netdev(netdev);
  5572. pci_disable_pcie_error_reporting(pdev);
  5573. pci_disable_device(pdev);
  5574. }
  5575. /**
  5576. * ixgbe_io_error_detected - called when PCI error is detected
  5577. * @pdev: Pointer to PCI device
  5578. * @state: The current pci connection state
  5579. *
  5580. * This function is called after a PCI bus error affecting
  5581. * this device has been detected.
  5582. */
  5583. static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
  5584. pci_channel_state_t state)
  5585. {
  5586. struct net_device *netdev = pci_get_drvdata(pdev);
  5587. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5588. netif_device_detach(netdev);
  5589. if (state == pci_channel_io_perm_failure)
  5590. return PCI_ERS_RESULT_DISCONNECT;
  5591. if (netif_running(netdev))
  5592. ixgbe_down(adapter);
  5593. pci_disable_device(pdev);
  5594. /* Request a slot reset. */
  5595. return PCI_ERS_RESULT_NEED_RESET;
  5596. }
  5597. /**
  5598. * ixgbe_io_slot_reset - called after the pci bus has been reset.
  5599. * @pdev: Pointer to PCI device
  5600. *
  5601. * Restart the card from scratch, as if from a cold-boot.
  5602. */
  5603. static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
  5604. {
  5605. struct net_device *netdev = pci_get_drvdata(pdev);
  5606. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5607. pci_ers_result_t result;
  5608. int err;
  5609. if (pci_enable_device_mem(pdev)) {
  5610. DPRINTK(PROBE, ERR,
  5611. "Cannot re-enable PCI device after reset.\n");
  5612. result = PCI_ERS_RESULT_DISCONNECT;
  5613. } else {
  5614. pci_set_master(pdev);
  5615. pci_restore_state(pdev);
  5616. pci_save_state(pdev);
  5617. pci_wake_from_d3(pdev, false);
  5618. ixgbe_reset(adapter);
  5619. IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
  5620. result = PCI_ERS_RESULT_RECOVERED;
  5621. }
  5622. err = pci_cleanup_aer_uncorrect_error_status(pdev);
  5623. if (err) {
  5624. dev_err(&pdev->dev,
  5625. "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
  5626. /* non-fatal, continue */
  5627. }
  5628. return result;
  5629. }
  5630. /**
  5631. * ixgbe_io_resume - called when traffic can start flowing again.
  5632. * @pdev: Pointer to PCI device
  5633. *
  5634. * This callback is called when the error recovery driver tells us that
  5635. * its OK to resume normal operation.
  5636. */
  5637. static void ixgbe_io_resume(struct pci_dev *pdev)
  5638. {
  5639. struct net_device *netdev = pci_get_drvdata(pdev);
  5640. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5641. if (netif_running(netdev)) {
  5642. if (ixgbe_up(adapter)) {
  5643. DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
  5644. return;
  5645. }
  5646. }
  5647. netif_device_attach(netdev);
  5648. }
  5649. static struct pci_error_handlers ixgbe_err_handler = {
  5650. .error_detected = ixgbe_io_error_detected,
  5651. .slot_reset = ixgbe_io_slot_reset,
  5652. .resume = ixgbe_io_resume,
  5653. };
  5654. static struct pci_driver ixgbe_driver = {
  5655. .name = ixgbe_driver_name,
  5656. .id_table = ixgbe_pci_tbl,
  5657. .probe = ixgbe_probe,
  5658. .remove = __devexit_p(ixgbe_remove),
  5659. #ifdef CONFIG_PM
  5660. .suspend = ixgbe_suspend,
  5661. .resume = ixgbe_resume,
  5662. #endif
  5663. .shutdown = ixgbe_shutdown,
  5664. .err_handler = &ixgbe_err_handler
  5665. };
  5666. /**
  5667. * ixgbe_init_module - Driver Registration Routine
  5668. *
  5669. * ixgbe_init_module is the first routine called when the driver is
  5670. * loaded. All it does is register with the PCI subsystem.
  5671. **/
  5672. static int __init ixgbe_init_module(void)
  5673. {
  5674. int ret;
  5675. printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
  5676. ixgbe_driver_string, ixgbe_driver_version);
  5677. printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
  5678. #ifdef CONFIG_IXGBE_DCA
  5679. dca_register_notify(&dca_notifier);
  5680. #endif
  5681. ret = pci_register_driver(&ixgbe_driver);
  5682. return ret;
  5683. }
  5684. module_init(ixgbe_init_module);
  5685. /**
  5686. * ixgbe_exit_module - Driver Exit Cleanup Routine
  5687. *
  5688. * ixgbe_exit_module is called just before the driver is removed
  5689. * from memory.
  5690. **/
  5691. static void __exit ixgbe_exit_module(void)
  5692. {
  5693. #ifdef CONFIG_IXGBE_DCA
  5694. dca_unregister_notify(&dca_notifier);
  5695. #endif
  5696. pci_unregister_driver(&ixgbe_driver);
  5697. }
  5698. #ifdef CONFIG_IXGBE_DCA
  5699. static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
  5700. void *p)
  5701. {
  5702. int ret_val;
  5703. ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
  5704. __ixgbe_notify_dca);
  5705. return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
  5706. }
  5707. #endif /* CONFIG_IXGBE_DCA */
  5708. #ifdef DEBUG
  5709. /**
  5710. * ixgbe_get_hw_dev_name - return device name string
  5711. * used by hardware layer to print debugging information
  5712. **/
  5713. char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
  5714. {
  5715. struct ixgbe_adapter *adapter = hw->back;
  5716. return adapter->netdev->name;
  5717. }
  5718. #endif
  5719. module_exit(ixgbe_exit_module);
  5720. /* ixgbe_main.c */