forcedeth.c 188 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224
  1. /*
  2. * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
  3. *
  4. * Note: This driver is a cleanroom reimplementation based on reverse
  5. * engineered documentation written by Carl-Daniel Hailfinger
  6. * and Andrew de Quincey.
  7. *
  8. * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
  9. * trademarks of NVIDIA Corporation in the United States and other
  10. * countries.
  11. *
  12. * Copyright (C) 2003,4,5 Manfred Spraul
  13. * Copyright (C) 2004 Andrew de Quincey (wol support)
  14. * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
  15. * IRQ rate fixes, bigendian fixes, cleanups, verification)
  16. * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
  17. *
  18. * This program is free software; you can redistribute it and/or modify
  19. * it under the terms of the GNU General Public License as published by
  20. * the Free Software Foundation; either version 2 of the License, or
  21. * (at your option) any later version.
  22. *
  23. * This program is distributed in the hope that it will be useful,
  24. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  25. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  26. * GNU General Public License for more details.
  27. *
  28. * You should have received a copy of the GNU General Public License
  29. * along with this program; if not, write to the Free Software
  30. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  31. *
  32. * Known bugs:
  33. * We suspect that on some hardware no TX done interrupts are generated.
  34. * This means recovery from netif_stop_queue only happens if the hw timer
  35. * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
  36. * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
  37. * If your hardware reliably generates tx done interrupts, then you can remove
  38. * DEV_NEED_TIMERIRQ from the driver_data flags.
  39. * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  40. * superfluous timer interrupts from the nic.
  41. */
  42. #define FORCEDETH_VERSION "0.64"
  43. #define DRV_NAME "forcedeth"
  44. #include <linux/module.h>
  45. #include <linux/types.h>
  46. #include <linux/pci.h>
  47. #include <linux/interrupt.h>
  48. #include <linux/netdevice.h>
  49. #include <linux/etherdevice.h>
  50. #include <linux/delay.h>
  51. #include <linux/sched.h>
  52. #include <linux/spinlock.h>
  53. #include <linux/ethtool.h>
  54. #include <linux/timer.h>
  55. #include <linux/skbuff.h>
  56. #include <linux/mii.h>
  57. #include <linux/random.h>
  58. #include <linux/init.h>
  59. #include <linux/if_vlan.h>
  60. #include <linux/dma-mapping.h>
  61. #include <linux/slab.h>
  62. #include <asm/irq.h>
  63. #include <asm/io.h>
  64. #include <asm/uaccess.h>
  65. #include <asm/system.h>
  66. #if 0
  67. #define dprintk printk
  68. #else
  69. #define dprintk(x...) do { } while (0)
  70. #endif
  71. #define TX_WORK_PER_LOOP 64
  72. #define RX_WORK_PER_LOOP 64
  73. /*
  74. * Hardware access:
  75. */
  76. #define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */
  77. #define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */
  78. #define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */
  79. #define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */
  80. #define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */
  81. #define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */
  82. #define DEV_HAS_MSI 0x0000040 /* device supports MSI */
  83. #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
  84. #define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
  85. #define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
  86. #define DEV_HAS_STATISTICS_V2 0x0000600 /* device supports hw statistics version 2 */
  87. #define DEV_HAS_STATISTICS_V3 0x0000e00 /* device supports hw statistics version 3 */
  88. #define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
  89. #define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
  90. #define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
  91. #define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */
  92. #define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */
  93. #define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */
  94. #define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */
  95. #define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */
  96. #define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */
  97. #define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */
  98. #define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */
  99. #define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */
  100. #define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */
  101. enum {
  102. NvRegIrqStatus = 0x000,
  103. #define NVREG_IRQSTAT_MIIEVENT 0x040
  104. #define NVREG_IRQSTAT_MASK 0x83ff
  105. NvRegIrqMask = 0x004,
  106. #define NVREG_IRQ_RX_ERROR 0x0001
  107. #define NVREG_IRQ_RX 0x0002
  108. #define NVREG_IRQ_RX_NOBUF 0x0004
  109. #define NVREG_IRQ_TX_ERR 0x0008
  110. #define NVREG_IRQ_TX_OK 0x0010
  111. #define NVREG_IRQ_TIMER 0x0020
  112. #define NVREG_IRQ_LINK 0x0040
  113. #define NVREG_IRQ_RX_FORCED 0x0080
  114. #define NVREG_IRQ_TX_FORCED 0x0100
  115. #define NVREG_IRQ_RECOVER_ERROR 0x8200
  116. #define NVREG_IRQMASK_THROUGHPUT 0x00df
  117. #define NVREG_IRQMASK_CPU 0x0060
  118. #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
  119. #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
  120. #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
  121. NvRegUnknownSetupReg6 = 0x008,
  122. #define NVREG_UNKSETUP6_VAL 3
  123. /*
  124. * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
  125. * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
  126. */
  127. NvRegPollingInterval = 0x00c,
  128. #define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */
  129. #define NVREG_POLL_DEFAULT_CPU 13
  130. NvRegMSIMap0 = 0x020,
  131. NvRegMSIMap1 = 0x024,
  132. NvRegMSIIrqMask = 0x030,
  133. #define NVREG_MSI_VECTOR_0_ENABLED 0x01
  134. NvRegMisc1 = 0x080,
  135. #define NVREG_MISC1_PAUSE_TX 0x01
  136. #define NVREG_MISC1_HD 0x02
  137. #define NVREG_MISC1_FORCE 0x3b0f3c
  138. NvRegMacReset = 0x34,
  139. #define NVREG_MAC_RESET_ASSERT 0x0F3
  140. NvRegTransmitterControl = 0x084,
  141. #define NVREG_XMITCTL_START 0x01
  142. #define NVREG_XMITCTL_MGMT_ST 0x40000000
  143. #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
  144. #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
  145. #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
  146. #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
  147. #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
  148. #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
  149. #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
  150. #define NVREG_XMITCTL_HOST_LOADED 0x00004000
  151. #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
  152. #define NVREG_XMITCTL_DATA_START 0x00100000
  153. #define NVREG_XMITCTL_DATA_READY 0x00010000
  154. #define NVREG_XMITCTL_DATA_ERROR 0x00020000
  155. NvRegTransmitterStatus = 0x088,
  156. #define NVREG_XMITSTAT_BUSY 0x01
  157. NvRegPacketFilterFlags = 0x8c,
  158. #define NVREG_PFF_PAUSE_RX 0x08
  159. #define NVREG_PFF_ALWAYS 0x7F0000
  160. #define NVREG_PFF_PROMISC 0x80
  161. #define NVREG_PFF_MYADDR 0x20
  162. #define NVREG_PFF_LOOPBACK 0x10
  163. NvRegOffloadConfig = 0x90,
  164. #define NVREG_OFFLOAD_HOMEPHY 0x601
  165. #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
  166. NvRegReceiverControl = 0x094,
  167. #define NVREG_RCVCTL_START 0x01
  168. #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
  169. NvRegReceiverStatus = 0x98,
  170. #define NVREG_RCVSTAT_BUSY 0x01
  171. NvRegSlotTime = 0x9c,
  172. #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
  173. #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
  174. #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
  175. #define NVREG_SLOTTIME_HALF 0x0000ff00
  176. #define NVREG_SLOTTIME_DEFAULT 0x00007f00
  177. #define NVREG_SLOTTIME_MASK 0x000000ff
  178. NvRegTxDeferral = 0xA0,
  179. #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
  180. #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
  181. #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
  182. #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
  183. #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
  184. #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
  185. NvRegRxDeferral = 0xA4,
  186. #define NVREG_RX_DEFERRAL_DEFAULT 0x16
  187. NvRegMacAddrA = 0xA8,
  188. NvRegMacAddrB = 0xAC,
  189. NvRegMulticastAddrA = 0xB0,
  190. #define NVREG_MCASTADDRA_FORCE 0x01
  191. NvRegMulticastAddrB = 0xB4,
  192. NvRegMulticastMaskA = 0xB8,
  193. #define NVREG_MCASTMASKA_NONE 0xffffffff
  194. NvRegMulticastMaskB = 0xBC,
  195. #define NVREG_MCASTMASKB_NONE 0xffff
  196. NvRegPhyInterface = 0xC0,
  197. #define PHY_RGMII 0x10000000
  198. NvRegBackOffControl = 0xC4,
  199. #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
  200. #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
  201. #define NVREG_BKOFFCTRL_SELECT 24
  202. #define NVREG_BKOFFCTRL_GEAR 12
  203. NvRegTxRingPhysAddr = 0x100,
  204. NvRegRxRingPhysAddr = 0x104,
  205. NvRegRingSizes = 0x108,
  206. #define NVREG_RINGSZ_TXSHIFT 0
  207. #define NVREG_RINGSZ_RXSHIFT 16
  208. NvRegTransmitPoll = 0x10c,
  209. #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
  210. NvRegLinkSpeed = 0x110,
  211. #define NVREG_LINKSPEED_FORCE 0x10000
  212. #define NVREG_LINKSPEED_10 1000
  213. #define NVREG_LINKSPEED_100 100
  214. #define NVREG_LINKSPEED_1000 50
  215. #define NVREG_LINKSPEED_MASK (0xFFF)
  216. NvRegUnknownSetupReg5 = 0x130,
  217. #define NVREG_UNKSETUP5_BIT31 (1<<31)
  218. NvRegTxWatermark = 0x13c,
  219. #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
  220. #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
  221. #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
  222. NvRegTxRxControl = 0x144,
  223. #define NVREG_TXRXCTL_KICK 0x0001
  224. #define NVREG_TXRXCTL_BIT1 0x0002
  225. #define NVREG_TXRXCTL_BIT2 0x0004
  226. #define NVREG_TXRXCTL_IDLE 0x0008
  227. #define NVREG_TXRXCTL_RESET 0x0010
  228. #define NVREG_TXRXCTL_RXCHECK 0x0400
  229. #define NVREG_TXRXCTL_DESC_1 0
  230. #define NVREG_TXRXCTL_DESC_2 0x002100
  231. #define NVREG_TXRXCTL_DESC_3 0xc02200
  232. #define NVREG_TXRXCTL_VLANSTRIP 0x00040
  233. #define NVREG_TXRXCTL_VLANINS 0x00080
  234. NvRegTxRingPhysAddrHigh = 0x148,
  235. NvRegRxRingPhysAddrHigh = 0x14C,
  236. NvRegTxPauseFrame = 0x170,
  237. #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
  238. #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
  239. #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
  240. #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
  241. NvRegTxPauseFrameLimit = 0x174,
  242. #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
  243. NvRegMIIStatus = 0x180,
  244. #define NVREG_MIISTAT_ERROR 0x0001
  245. #define NVREG_MIISTAT_LINKCHANGE 0x0008
  246. #define NVREG_MIISTAT_MASK_RW 0x0007
  247. #define NVREG_MIISTAT_MASK_ALL 0x000f
  248. NvRegMIIMask = 0x184,
  249. #define NVREG_MII_LINKCHANGE 0x0008
  250. NvRegAdapterControl = 0x188,
  251. #define NVREG_ADAPTCTL_START 0x02
  252. #define NVREG_ADAPTCTL_LINKUP 0x04
  253. #define NVREG_ADAPTCTL_PHYVALID 0x40000
  254. #define NVREG_ADAPTCTL_RUNNING 0x100000
  255. #define NVREG_ADAPTCTL_PHYSHIFT 24
  256. NvRegMIISpeed = 0x18c,
  257. #define NVREG_MIISPEED_BIT8 (1<<8)
  258. #define NVREG_MIIDELAY 5
  259. NvRegMIIControl = 0x190,
  260. #define NVREG_MIICTL_INUSE 0x08000
  261. #define NVREG_MIICTL_WRITE 0x00400
  262. #define NVREG_MIICTL_ADDRSHIFT 5
  263. NvRegMIIData = 0x194,
  264. NvRegTxUnicast = 0x1a0,
  265. NvRegTxMulticast = 0x1a4,
  266. NvRegTxBroadcast = 0x1a8,
  267. NvRegWakeUpFlags = 0x200,
  268. #define NVREG_WAKEUPFLAGS_VAL 0x7770
  269. #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
  270. #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
  271. #define NVREG_WAKEUPFLAGS_D3SHIFT 12
  272. #define NVREG_WAKEUPFLAGS_D2SHIFT 8
  273. #define NVREG_WAKEUPFLAGS_D1SHIFT 4
  274. #define NVREG_WAKEUPFLAGS_D0SHIFT 0
  275. #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
  276. #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
  277. #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
  278. #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
  279. NvRegMgmtUnitGetVersion = 0x204,
  280. #define NVREG_MGMTUNITGETVERSION 0x01
  281. NvRegMgmtUnitVersion = 0x208,
  282. #define NVREG_MGMTUNITVERSION 0x08
  283. NvRegPowerCap = 0x268,
  284. #define NVREG_POWERCAP_D3SUPP (1<<30)
  285. #define NVREG_POWERCAP_D2SUPP (1<<26)
  286. #define NVREG_POWERCAP_D1SUPP (1<<25)
  287. NvRegPowerState = 0x26c,
  288. #define NVREG_POWERSTATE_POWEREDUP 0x8000
  289. #define NVREG_POWERSTATE_VALID 0x0100
  290. #define NVREG_POWERSTATE_MASK 0x0003
  291. #define NVREG_POWERSTATE_D0 0x0000
  292. #define NVREG_POWERSTATE_D1 0x0001
  293. #define NVREG_POWERSTATE_D2 0x0002
  294. #define NVREG_POWERSTATE_D3 0x0003
  295. NvRegMgmtUnitControl = 0x278,
  296. #define NVREG_MGMTUNITCONTROL_INUSE 0x20000
  297. NvRegTxCnt = 0x280,
  298. NvRegTxZeroReXmt = 0x284,
  299. NvRegTxOneReXmt = 0x288,
  300. NvRegTxManyReXmt = 0x28c,
  301. NvRegTxLateCol = 0x290,
  302. NvRegTxUnderflow = 0x294,
  303. NvRegTxLossCarrier = 0x298,
  304. NvRegTxExcessDef = 0x29c,
  305. NvRegTxRetryErr = 0x2a0,
  306. NvRegRxFrameErr = 0x2a4,
  307. NvRegRxExtraByte = 0x2a8,
  308. NvRegRxLateCol = 0x2ac,
  309. NvRegRxRunt = 0x2b0,
  310. NvRegRxFrameTooLong = 0x2b4,
  311. NvRegRxOverflow = 0x2b8,
  312. NvRegRxFCSErr = 0x2bc,
  313. NvRegRxFrameAlignErr = 0x2c0,
  314. NvRegRxLenErr = 0x2c4,
  315. NvRegRxUnicast = 0x2c8,
  316. NvRegRxMulticast = 0x2cc,
  317. NvRegRxBroadcast = 0x2d0,
  318. NvRegTxDef = 0x2d4,
  319. NvRegTxFrame = 0x2d8,
  320. NvRegRxCnt = 0x2dc,
  321. NvRegTxPause = 0x2e0,
  322. NvRegRxPause = 0x2e4,
  323. NvRegRxDropFrame = 0x2e8,
  324. NvRegVlanControl = 0x300,
  325. #define NVREG_VLANCONTROL_ENABLE 0x2000
  326. NvRegMSIXMap0 = 0x3e0,
  327. NvRegMSIXMap1 = 0x3e4,
  328. NvRegMSIXIrqStatus = 0x3f0,
  329. NvRegPowerState2 = 0x600,
  330. #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
  331. #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
  332. #define NVREG_POWERSTATE2_PHY_RESET 0x0004
  333. #define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00
  334. };
  335. /* Big endian: should work, but is untested */
  336. struct ring_desc {
  337. __le32 buf;
  338. __le32 flaglen;
  339. };
  340. struct ring_desc_ex {
  341. __le32 bufhigh;
  342. __le32 buflow;
  343. __le32 txvlan;
  344. __le32 flaglen;
  345. };
  346. union ring_type {
  347. struct ring_desc* orig;
  348. struct ring_desc_ex* ex;
  349. };
  350. #define FLAG_MASK_V1 0xffff0000
  351. #define FLAG_MASK_V2 0xffffc000
  352. #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
  353. #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
  354. #define NV_TX_LASTPACKET (1<<16)
  355. #define NV_TX_RETRYERROR (1<<19)
  356. #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
  357. #define NV_TX_FORCED_INTERRUPT (1<<24)
  358. #define NV_TX_DEFERRED (1<<26)
  359. #define NV_TX_CARRIERLOST (1<<27)
  360. #define NV_TX_LATECOLLISION (1<<28)
  361. #define NV_TX_UNDERFLOW (1<<29)
  362. #define NV_TX_ERROR (1<<30)
  363. #define NV_TX_VALID (1<<31)
  364. #define NV_TX2_LASTPACKET (1<<29)
  365. #define NV_TX2_RETRYERROR (1<<18)
  366. #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
  367. #define NV_TX2_FORCED_INTERRUPT (1<<30)
  368. #define NV_TX2_DEFERRED (1<<25)
  369. #define NV_TX2_CARRIERLOST (1<<26)
  370. #define NV_TX2_LATECOLLISION (1<<27)
  371. #define NV_TX2_UNDERFLOW (1<<28)
  372. /* error and valid are the same for both */
  373. #define NV_TX2_ERROR (1<<30)
  374. #define NV_TX2_VALID (1<<31)
  375. #define NV_TX2_TSO (1<<28)
  376. #define NV_TX2_TSO_SHIFT 14
  377. #define NV_TX2_TSO_MAX_SHIFT 14
  378. #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
  379. #define NV_TX2_CHECKSUM_L3 (1<<27)
  380. #define NV_TX2_CHECKSUM_L4 (1<<26)
  381. #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
  382. #define NV_RX_DESCRIPTORVALID (1<<16)
  383. #define NV_RX_MISSEDFRAME (1<<17)
  384. #define NV_RX_SUBSTRACT1 (1<<18)
  385. #define NV_RX_ERROR1 (1<<23)
  386. #define NV_RX_ERROR2 (1<<24)
  387. #define NV_RX_ERROR3 (1<<25)
  388. #define NV_RX_ERROR4 (1<<26)
  389. #define NV_RX_CRCERR (1<<27)
  390. #define NV_RX_OVERFLOW (1<<28)
  391. #define NV_RX_FRAMINGERR (1<<29)
  392. #define NV_RX_ERROR (1<<30)
  393. #define NV_RX_AVAIL (1<<31)
  394. #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
  395. #define NV_RX2_CHECKSUMMASK (0x1C000000)
  396. #define NV_RX2_CHECKSUM_IP (0x10000000)
  397. #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
  398. #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
  399. #define NV_RX2_DESCRIPTORVALID (1<<29)
  400. #define NV_RX2_SUBSTRACT1 (1<<25)
  401. #define NV_RX2_ERROR1 (1<<18)
  402. #define NV_RX2_ERROR2 (1<<19)
  403. #define NV_RX2_ERROR3 (1<<20)
  404. #define NV_RX2_ERROR4 (1<<21)
  405. #define NV_RX2_CRCERR (1<<22)
  406. #define NV_RX2_OVERFLOW (1<<23)
  407. #define NV_RX2_FRAMINGERR (1<<24)
  408. /* error and avail are the same for both */
  409. #define NV_RX2_ERROR (1<<30)
  410. #define NV_RX2_AVAIL (1<<31)
  411. #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
  412. #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
  413. #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
  414. /* Miscelaneous hardware related defines: */
  415. #define NV_PCI_REGSZ_VER1 0x270
  416. #define NV_PCI_REGSZ_VER2 0x2d4
  417. #define NV_PCI_REGSZ_VER3 0x604
  418. #define NV_PCI_REGSZ_MAX 0x604
  419. /* various timeout delays: all in usec */
  420. #define NV_TXRX_RESET_DELAY 4
  421. #define NV_TXSTOP_DELAY1 10
  422. #define NV_TXSTOP_DELAY1MAX 500000
  423. #define NV_TXSTOP_DELAY2 100
  424. #define NV_RXSTOP_DELAY1 10
  425. #define NV_RXSTOP_DELAY1MAX 500000
  426. #define NV_RXSTOP_DELAY2 100
  427. #define NV_SETUP5_DELAY 5
  428. #define NV_SETUP5_DELAYMAX 50000
  429. #define NV_POWERUP_DELAY 5
  430. #define NV_POWERUP_DELAYMAX 5000
  431. #define NV_MIIBUSY_DELAY 50
  432. #define NV_MIIPHY_DELAY 10
  433. #define NV_MIIPHY_DELAYMAX 10000
  434. #define NV_MAC_RESET_DELAY 64
  435. #define NV_WAKEUPPATTERNS 5
  436. #define NV_WAKEUPMASKENTRIES 4
  437. /* General driver defaults */
  438. #define NV_WATCHDOG_TIMEO (5*HZ)
  439. #define RX_RING_DEFAULT 512
  440. #define TX_RING_DEFAULT 256
  441. #define RX_RING_MIN 128
  442. #define TX_RING_MIN 64
  443. #define RING_MAX_DESC_VER_1 1024
  444. #define RING_MAX_DESC_VER_2_3 16384
  445. /* rx/tx mac addr + type + vlan + align + slack*/
  446. #define NV_RX_HEADERS (64)
  447. /* even more slack. */
  448. #define NV_RX_ALLOC_PAD (64)
  449. /* maximum mtu size */
  450. #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
  451. #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
  452. #define OOM_REFILL (1+HZ/20)
  453. #define POLL_WAIT (1+HZ/100)
  454. #define LINK_TIMEOUT (3*HZ)
  455. #define STATS_INTERVAL (10*HZ)
  456. /*
  457. * desc_ver values:
  458. * The nic supports three different descriptor types:
  459. * - DESC_VER_1: Original
  460. * - DESC_VER_2: support for jumbo frames.
  461. * - DESC_VER_3: 64-bit format.
  462. */
  463. #define DESC_VER_1 1
  464. #define DESC_VER_2 2
  465. #define DESC_VER_3 3
  466. /* PHY defines */
  467. #define PHY_OUI_MARVELL 0x5043
  468. #define PHY_OUI_CICADA 0x03f1
  469. #define PHY_OUI_VITESSE 0x01c1
  470. #define PHY_OUI_REALTEK 0x0732
  471. #define PHY_OUI_REALTEK2 0x0020
  472. #define PHYID1_OUI_MASK 0x03ff
  473. #define PHYID1_OUI_SHFT 6
  474. #define PHYID2_OUI_MASK 0xfc00
  475. #define PHYID2_OUI_SHFT 10
  476. #define PHYID2_MODEL_MASK 0x03f0
  477. #define PHY_MODEL_REALTEK_8211 0x0110
  478. #define PHY_REV_MASK 0x0001
  479. #define PHY_REV_REALTEK_8211B 0x0000
  480. #define PHY_REV_REALTEK_8211C 0x0001
  481. #define PHY_MODEL_REALTEK_8201 0x0200
  482. #define PHY_MODEL_MARVELL_E3016 0x0220
  483. #define PHY_MARVELL_E3016_INITMASK 0x0300
  484. #define PHY_CICADA_INIT1 0x0f000
  485. #define PHY_CICADA_INIT2 0x0e00
  486. #define PHY_CICADA_INIT3 0x01000
  487. #define PHY_CICADA_INIT4 0x0200
  488. #define PHY_CICADA_INIT5 0x0004
  489. #define PHY_CICADA_INIT6 0x02000
  490. #define PHY_VITESSE_INIT_REG1 0x1f
  491. #define PHY_VITESSE_INIT_REG2 0x10
  492. #define PHY_VITESSE_INIT_REG3 0x11
  493. #define PHY_VITESSE_INIT_REG4 0x12
  494. #define PHY_VITESSE_INIT_MSK1 0xc
  495. #define PHY_VITESSE_INIT_MSK2 0x0180
  496. #define PHY_VITESSE_INIT1 0x52b5
  497. #define PHY_VITESSE_INIT2 0xaf8a
  498. #define PHY_VITESSE_INIT3 0x8
  499. #define PHY_VITESSE_INIT4 0x8f8a
  500. #define PHY_VITESSE_INIT5 0xaf86
  501. #define PHY_VITESSE_INIT6 0x8f86
  502. #define PHY_VITESSE_INIT7 0xaf82
  503. #define PHY_VITESSE_INIT8 0x0100
  504. #define PHY_VITESSE_INIT9 0x8f82
  505. #define PHY_VITESSE_INIT10 0x0
  506. #define PHY_REALTEK_INIT_REG1 0x1f
  507. #define PHY_REALTEK_INIT_REG2 0x19
  508. #define PHY_REALTEK_INIT_REG3 0x13
  509. #define PHY_REALTEK_INIT_REG4 0x14
  510. #define PHY_REALTEK_INIT_REG5 0x18
  511. #define PHY_REALTEK_INIT_REG6 0x11
  512. #define PHY_REALTEK_INIT_REG7 0x01
  513. #define PHY_REALTEK_INIT1 0x0000
  514. #define PHY_REALTEK_INIT2 0x8e00
  515. #define PHY_REALTEK_INIT3 0x0001
  516. #define PHY_REALTEK_INIT4 0xad17
  517. #define PHY_REALTEK_INIT5 0xfb54
  518. #define PHY_REALTEK_INIT6 0xf5c7
  519. #define PHY_REALTEK_INIT7 0x1000
  520. #define PHY_REALTEK_INIT8 0x0003
  521. #define PHY_REALTEK_INIT9 0x0008
  522. #define PHY_REALTEK_INIT10 0x0005
  523. #define PHY_REALTEK_INIT11 0x0200
  524. #define PHY_REALTEK_INIT_MSK1 0x0003
  525. #define PHY_GIGABIT 0x0100
  526. #define PHY_TIMEOUT 0x1
  527. #define PHY_ERROR 0x2
  528. #define PHY_100 0x1
  529. #define PHY_1000 0x2
  530. #define PHY_HALF 0x100
  531. #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
  532. #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
  533. #define NV_PAUSEFRAME_RX_ENABLE 0x0004
  534. #define NV_PAUSEFRAME_TX_ENABLE 0x0008
  535. #define NV_PAUSEFRAME_RX_REQ 0x0010
  536. #define NV_PAUSEFRAME_TX_REQ 0x0020
  537. #define NV_PAUSEFRAME_AUTONEG 0x0040
  538. /* MSI/MSI-X defines */
  539. #define NV_MSI_X_MAX_VECTORS 8
  540. #define NV_MSI_X_VECTORS_MASK 0x000f
  541. #define NV_MSI_CAPABLE 0x0010
  542. #define NV_MSI_X_CAPABLE 0x0020
  543. #define NV_MSI_ENABLED 0x0040
  544. #define NV_MSI_X_ENABLED 0x0080
  545. #define NV_MSI_X_VECTOR_ALL 0x0
  546. #define NV_MSI_X_VECTOR_RX 0x0
  547. #define NV_MSI_X_VECTOR_TX 0x1
  548. #define NV_MSI_X_VECTOR_OTHER 0x2
  549. #define NV_MSI_PRIV_OFFSET 0x68
  550. #define NV_MSI_PRIV_VALUE 0xffffffff
  551. #define NV_RESTART_TX 0x1
  552. #define NV_RESTART_RX 0x2
  553. #define NV_TX_LIMIT_COUNT 16
  554. #define NV_DYNAMIC_THRESHOLD 4
  555. #define NV_DYNAMIC_MAX_QUIET_COUNT 2048
  556. /* statistics */
  557. struct nv_ethtool_str {
  558. char name[ETH_GSTRING_LEN];
  559. };
  560. static const struct nv_ethtool_str nv_estats_str[] = {
  561. { "tx_bytes" },
  562. { "tx_zero_rexmt" },
  563. { "tx_one_rexmt" },
  564. { "tx_many_rexmt" },
  565. { "tx_late_collision" },
  566. { "tx_fifo_errors" },
  567. { "tx_carrier_errors" },
  568. { "tx_excess_deferral" },
  569. { "tx_retry_error" },
  570. { "rx_frame_error" },
  571. { "rx_extra_byte" },
  572. { "rx_late_collision" },
  573. { "rx_runt" },
  574. { "rx_frame_too_long" },
  575. { "rx_over_errors" },
  576. { "rx_crc_errors" },
  577. { "rx_frame_align_error" },
  578. { "rx_length_error" },
  579. { "rx_unicast" },
  580. { "rx_multicast" },
  581. { "rx_broadcast" },
  582. { "rx_packets" },
  583. { "rx_errors_total" },
  584. { "tx_errors_total" },
  585. /* version 2 stats */
  586. { "tx_deferral" },
  587. { "tx_packets" },
  588. { "rx_bytes" },
  589. { "tx_pause" },
  590. { "rx_pause" },
  591. { "rx_drop_frame" },
  592. /* version 3 stats */
  593. { "tx_unicast" },
  594. { "tx_multicast" },
  595. { "tx_broadcast" }
  596. };
  597. struct nv_ethtool_stats {
  598. u64 tx_bytes;
  599. u64 tx_zero_rexmt;
  600. u64 tx_one_rexmt;
  601. u64 tx_many_rexmt;
  602. u64 tx_late_collision;
  603. u64 tx_fifo_errors;
  604. u64 tx_carrier_errors;
  605. u64 tx_excess_deferral;
  606. u64 tx_retry_error;
  607. u64 rx_frame_error;
  608. u64 rx_extra_byte;
  609. u64 rx_late_collision;
  610. u64 rx_runt;
  611. u64 rx_frame_too_long;
  612. u64 rx_over_errors;
  613. u64 rx_crc_errors;
  614. u64 rx_frame_align_error;
  615. u64 rx_length_error;
  616. u64 rx_unicast;
  617. u64 rx_multicast;
  618. u64 rx_broadcast;
  619. u64 rx_packets;
  620. u64 rx_errors_total;
  621. u64 tx_errors_total;
  622. /* version 2 stats */
  623. u64 tx_deferral;
  624. u64 tx_packets;
  625. u64 rx_bytes;
  626. u64 tx_pause;
  627. u64 rx_pause;
  628. u64 rx_drop_frame;
  629. /* version 3 stats */
  630. u64 tx_unicast;
  631. u64 tx_multicast;
  632. u64 tx_broadcast;
  633. };
  634. #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
  635. #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
  636. #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
  637. /* diagnostics */
  638. #define NV_TEST_COUNT_BASE 3
  639. #define NV_TEST_COUNT_EXTENDED 4
  640. static const struct nv_ethtool_str nv_etests_str[] = {
  641. { "link (online/offline)" },
  642. { "register (offline) " },
  643. { "interrupt (offline) " },
  644. { "loopback (offline) " }
  645. };
  646. struct register_test {
  647. __u32 reg;
  648. __u32 mask;
  649. };
  650. static const struct register_test nv_registers_test[] = {
  651. { NvRegUnknownSetupReg6, 0x01 },
  652. { NvRegMisc1, 0x03c },
  653. { NvRegOffloadConfig, 0x03ff },
  654. { NvRegMulticastAddrA, 0xffffffff },
  655. { NvRegTxWatermark, 0x0ff },
  656. { NvRegWakeUpFlags, 0x07777 },
  657. { 0,0 }
  658. };
  659. struct nv_skb_map {
  660. struct sk_buff *skb;
  661. dma_addr_t dma;
  662. unsigned int dma_len:31;
  663. unsigned int dma_single:1;
  664. struct ring_desc_ex *first_tx_desc;
  665. struct nv_skb_map *next_tx_ctx;
  666. };
  667. /*
  668. * SMP locking:
  669. * All hardware access under netdev_priv(dev)->lock, except the performance
  670. * critical parts:
  671. * - rx is (pseudo-) lockless: it relies on the single-threading provided
  672. * by the arch code for interrupts.
  673. * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  674. * needs netdev_priv(dev)->lock :-(
  675. * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
  676. */
  677. /* in dev: base, irq */
  678. struct fe_priv {
  679. spinlock_t lock;
  680. struct net_device *dev;
  681. struct napi_struct napi;
  682. /* General data:
  683. * Locking: spin_lock(&np->lock); */
  684. struct nv_ethtool_stats estats;
  685. int in_shutdown;
  686. u32 linkspeed;
  687. int duplex;
  688. int autoneg;
  689. int fixed_mode;
  690. int phyaddr;
  691. int wolenabled;
  692. unsigned int phy_oui;
  693. unsigned int phy_model;
  694. unsigned int phy_rev;
  695. u16 gigabit;
  696. int intr_test;
  697. int recover_error;
  698. int quiet_count;
  699. /* General data: RO fields */
  700. dma_addr_t ring_addr;
  701. struct pci_dev *pci_dev;
  702. u32 orig_mac[2];
  703. u32 events;
  704. u32 irqmask;
  705. u32 desc_ver;
  706. u32 txrxctl_bits;
  707. u32 vlanctl_bits;
  708. u32 driver_data;
  709. u32 device_id;
  710. u32 register_size;
  711. int rx_csum;
  712. u32 mac_in_use;
  713. int mgmt_version;
  714. int mgmt_sema;
  715. void __iomem *base;
  716. /* rx specific fields.
  717. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
  718. */
  719. union ring_type get_rx, put_rx, first_rx, last_rx;
  720. struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
  721. struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
  722. struct nv_skb_map *rx_skb;
  723. union ring_type rx_ring;
  724. unsigned int rx_buf_sz;
  725. unsigned int pkt_limit;
  726. struct timer_list oom_kick;
  727. struct timer_list nic_poll;
  728. struct timer_list stats_poll;
  729. u32 nic_poll_irq;
  730. int rx_ring_size;
  731. /* media detection workaround.
  732. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
  733. */
  734. int need_linktimer;
  735. unsigned long link_timeout;
  736. /*
  737. * tx specific fields.
  738. */
  739. union ring_type get_tx, put_tx, first_tx, last_tx;
  740. struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
  741. struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
  742. struct nv_skb_map *tx_skb;
  743. union ring_type tx_ring;
  744. u32 tx_flags;
  745. int tx_ring_size;
  746. int tx_limit;
  747. u32 tx_pkts_in_progress;
  748. struct nv_skb_map *tx_change_owner;
  749. struct nv_skb_map *tx_end_flip;
  750. int tx_stop;
  751. /* vlan fields */
  752. struct vlan_group *vlangrp;
  753. /* msi/msi-x fields */
  754. u32 msi_flags;
  755. struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
  756. /* flow control */
  757. u32 pause_flags;
  758. /* power saved state */
  759. u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
  760. /* for different msi-x irq type */
  761. char name_rx[IFNAMSIZ + 3]; /* -rx */
  762. char name_tx[IFNAMSIZ + 3]; /* -tx */
  763. char name_other[IFNAMSIZ + 6]; /* -other */
  764. };
  765. /*
  766. * Maximum number of loops until we assume that a bit in the irq mask
  767. * is stuck. Overridable with module param.
  768. */
  769. static int max_interrupt_work = 4;
  770. /*
  771. * Optimization can be either throuput mode or cpu mode
  772. *
  773. * Throughput Mode: Every tx and rx packet will generate an interrupt.
  774. * CPU Mode: Interrupts are controlled by a timer.
  775. */
  776. enum {
  777. NV_OPTIMIZATION_MODE_THROUGHPUT,
  778. NV_OPTIMIZATION_MODE_CPU,
  779. NV_OPTIMIZATION_MODE_DYNAMIC
  780. };
  781. static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
  782. /*
  783. * Poll interval for timer irq
  784. *
  785. * This interval determines how frequent an interrupt is generated.
  786. * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
  787. * Min = 0, and Max = 65535
  788. */
  789. static int poll_interval = -1;
  790. /*
  791. * MSI interrupts
  792. */
  793. enum {
  794. NV_MSI_INT_DISABLED,
  795. NV_MSI_INT_ENABLED
  796. };
  797. static int msi = NV_MSI_INT_ENABLED;
  798. /*
  799. * MSIX interrupts
  800. */
  801. enum {
  802. NV_MSIX_INT_DISABLED,
  803. NV_MSIX_INT_ENABLED
  804. };
  805. static int msix = NV_MSIX_INT_ENABLED;
  806. /*
  807. * DMA 64bit
  808. */
  809. enum {
  810. NV_DMA_64BIT_DISABLED,
  811. NV_DMA_64BIT_ENABLED
  812. };
  813. static int dma_64bit = NV_DMA_64BIT_ENABLED;
  814. /*
  815. * Crossover Detection
  816. * Realtek 8201 phy + some OEM boards do not work properly.
  817. */
  818. enum {
  819. NV_CROSSOVER_DETECTION_DISABLED,
  820. NV_CROSSOVER_DETECTION_ENABLED
  821. };
  822. static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
  823. /*
  824. * Power down phy when interface is down (persists through reboot;
  825. * older Linux and other OSes may not power it up again)
  826. */
  827. static int phy_power_down = 0;
  828. static inline struct fe_priv *get_nvpriv(struct net_device *dev)
  829. {
  830. return netdev_priv(dev);
  831. }
  832. static inline u8 __iomem *get_hwbase(struct net_device *dev)
  833. {
  834. return ((struct fe_priv *)netdev_priv(dev))->base;
  835. }
  836. static inline void pci_push(u8 __iomem *base)
  837. {
  838. /* force out pending posted writes */
  839. readl(base);
  840. }
  841. static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
  842. {
  843. return le32_to_cpu(prd->flaglen)
  844. & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
  845. }
  846. static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
  847. {
  848. return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
  849. }
  850. static bool nv_optimized(struct fe_priv *np)
  851. {
  852. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  853. return false;
  854. return true;
  855. }
  856. static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
  857. int delay, int delaymax, const char *msg)
  858. {
  859. u8 __iomem *base = get_hwbase(dev);
  860. pci_push(base);
  861. do {
  862. udelay(delay);
  863. delaymax -= delay;
  864. if (delaymax < 0) {
  865. if (msg)
  866. printk("%s", msg);
  867. return 1;
  868. }
  869. } while ((readl(base + offset) & mask) != target);
  870. return 0;
  871. }
  872. #define NV_SETUP_RX_RING 0x01
  873. #define NV_SETUP_TX_RING 0x02
  874. static inline u32 dma_low(dma_addr_t addr)
  875. {
  876. return addr;
  877. }
  878. static inline u32 dma_high(dma_addr_t addr)
  879. {
  880. return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
  881. }
  882. static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
  883. {
  884. struct fe_priv *np = get_nvpriv(dev);
  885. u8 __iomem *base = get_hwbase(dev);
  886. if (!nv_optimized(np)) {
  887. if (rxtx_flags & NV_SETUP_RX_RING) {
  888. writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
  889. }
  890. if (rxtx_flags & NV_SETUP_TX_RING) {
  891. writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
  892. }
  893. } else {
  894. if (rxtx_flags & NV_SETUP_RX_RING) {
  895. writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
  896. writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
  897. }
  898. if (rxtx_flags & NV_SETUP_TX_RING) {
  899. writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
  900. writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
  901. }
  902. }
  903. }
  904. static void free_rings(struct net_device *dev)
  905. {
  906. struct fe_priv *np = get_nvpriv(dev);
  907. if (!nv_optimized(np)) {
  908. if (np->rx_ring.orig)
  909. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
  910. np->rx_ring.orig, np->ring_addr);
  911. } else {
  912. if (np->rx_ring.ex)
  913. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
  914. np->rx_ring.ex, np->ring_addr);
  915. }
  916. if (np->rx_skb)
  917. kfree(np->rx_skb);
  918. if (np->tx_skb)
  919. kfree(np->tx_skb);
  920. }
  921. static int using_multi_irqs(struct net_device *dev)
  922. {
  923. struct fe_priv *np = get_nvpriv(dev);
  924. if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
  925. ((np->msi_flags & NV_MSI_X_ENABLED) &&
  926. ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
  927. return 0;
  928. else
  929. return 1;
  930. }
  931. static void nv_txrx_gate(struct net_device *dev, bool gate)
  932. {
  933. struct fe_priv *np = get_nvpriv(dev);
  934. u8 __iomem *base = get_hwbase(dev);
  935. u32 powerstate;
  936. if (!np->mac_in_use &&
  937. (np->driver_data & DEV_HAS_POWER_CNTRL)) {
  938. powerstate = readl(base + NvRegPowerState2);
  939. if (gate)
  940. powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
  941. else
  942. powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
  943. writel(powerstate, base + NvRegPowerState2);
  944. }
  945. }
  946. static void nv_enable_irq(struct net_device *dev)
  947. {
  948. struct fe_priv *np = get_nvpriv(dev);
  949. if (!using_multi_irqs(dev)) {
  950. if (np->msi_flags & NV_MSI_X_ENABLED)
  951. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  952. else
  953. enable_irq(np->pci_dev->irq);
  954. } else {
  955. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  956. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  957. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  958. }
  959. }
  960. static void nv_disable_irq(struct net_device *dev)
  961. {
  962. struct fe_priv *np = get_nvpriv(dev);
  963. if (!using_multi_irqs(dev)) {
  964. if (np->msi_flags & NV_MSI_X_ENABLED)
  965. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  966. else
  967. disable_irq(np->pci_dev->irq);
  968. } else {
  969. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  970. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  971. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  972. }
  973. }
  974. /* In MSIX mode, a write to irqmask behaves as XOR */
  975. static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
  976. {
  977. u8 __iomem *base = get_hwbase(dev);
  978. writel(mask, base + NvRegIrqMask);
  979. }
  980. static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
  981. {
  982. struct fe_priv *np = get_nvpriv(dev);
  983. u8 __iomem *base = get_hwbase(dev);
  984. if (np->msi_flags & NV_MSI_X_ENABLED) {
  985. writel(mask, base + NvRegIrqMask);
  986. } else {
  987. if (np->msi_flags & NV_MSI_ENABLED)
  988. writel(0, base + NvRegMSIIrqMask);
  989. writel(0, base + NvRegIrqMask);
  990. }
  991. }
  992. static void nv_napi_enable(struct net_device *dev)
  993. {
  994. struct fe_priv *np = get_nvpriv(dev);
  995. napi_enable(&np->napi);
  996. }
  997. static void nv_napi_disable(struct net_device *dev)
  998. {
  999. struct fe_priv *np = get_nvpriv(dev);
  1000. napi_disable(&np->napi);
  1001. }
  1002. #define MII_READ (-1)
  1003. /* mii_rw: read/write a register on the PHY.
  1004. *
  1005. * Caller must guarantee serialization
  1006. */
  1007. static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
  1008. {
  1009. u8 __iomem *base = get_hwbase(dev);
  1010. u32 reg;
  1011. int retval;
  1012. writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
  1013. reg = readl(base + NvRegMIIControl);
  1014. if (reg & NVREG_MIICTL_INUSE) {
  1015. writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
  1016. udelay(NV_MIIBUSY_DELAY);
  1017. }
  1018. reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
  1019. if (value != MII_READ) {
  1020. writel(value, base + NvRegMIIData);
  1021. reg |= NVREG_MIICTL_WRITE;
  1022. }
  1023. writel(reg, base + NvRegMIIControl);
  1024. if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
  1025. NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
  1026. dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
  1027. dev->name, miireg, addr);
  1028. retval = -1;
  1029. } else if (value != MII_READ) {
  1030. /* it was a write operation - fewer failures are detectable */
  1031. dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
  1032. dev->name, value, miireg, addr);
  1033. retval = 0;
  1034. } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
  1035. dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
  1036. dev->name, miireg, addr);
  1037. retval = -1;
  1038. } else {
  1039. retval = readl(base + NvRegMIIData);
  1040. dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
  1041. dev->name, miireg, addr, retval);
  1042. }
  1043. return retval;
  1044. }
  1045. static int phy_reset(struct net_device *dev, u32 bmcr_setup)
  1046. {
  1047. struct fe_priv *np = netdev_priv(dev);
  1048. u32 miicontrol;
  1049. unsigned int tries = 0;
  1050. miicontrol = BMCR_RESET | bmcr_setup;
  1051. if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
  1052. return -1;
  1053. }
  1054. /* wait for 500ms */
  1055. msleep(500);
  1056. /* must wait till reset is deasserted */
  1057. while (miicontrol & BMCR_RESET) {
  1058. msleep(10);
  1059. miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1060. /* FIXME: 100 tries seem excessive */
  1061. if (tries++ > 100)
  1062. return -1;
  1063. }
  1064. return 0;
  1065. }
  1066. static int phy_init(struct net_device *dev)
  1067. {
  1068. struct fe_priv *np = get_nvpriv(dev);
  1069. u8 __iomem *base = get_hwbase(dev);
  1070. u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
  1071. /* phy errata for E3016 phy */
  1072. if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
  1073. reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
  1074. reg &= ~PHY_MARVELL_E3016_INITMASK;
  1075. if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
  1076. printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
  1077. return PHY_ERROR;
  1078. }
  1079. }
  1080. if (np->phy_oui == PHY_OUI_REALTEK) {
  1081. if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1082. np->phy_rev == PHY_REV_REALTEK_8211B) {
  1083. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1084. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1085. return PHY_ERROR;
  1086. }
  1087. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
  1088. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1089. return PHY_ERROR;
  1090. }
  1091. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
  1092. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1093. return PHY_ERROR;
  1094. }
  1095. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
  1096. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1097. return PHY_ERROR;
  1098. }
  1099. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
  1100. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1101. return PHY_ERROR;
  1102. }
  1103. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
  1104. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1105. return PHY_ERROR;
  1106. }
  1107. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1108. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1109. return PHY_ERROR;
  1110. }
  1111. }
  1112. if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1113. np->phy_rev == PHY_REV_REALTEK_8211C) {
  1114. u32 powerstate = readl(base + NvRegPowerState2);
  1115. /* need to perform hw phy reset */
  1116. powerstate |= NVREG_POWERSTATE2_PHY_RESET;
  1117. writel(powerstate, base + NvRegPowerState2);
  1118. msleep(25);
  1119. powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
  1120. writel(powerstate, base + NvRegPowerState2);
  1121. msleep(25);
  1122. reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
  1123. reg |= PHY_REALTEK_INIT9;
  1124. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
  1125. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1126. return PHY_ERROR;
  1127. }
  1128. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
  1129. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1130. return PHY_ERROR;
  1131. }
  1132. reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
  1133. if (!(reg & PHY_REALTEK_INIT11)) {
  1134. reg |= PHY_REALTEK_INIT11;
  1135. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
  1136. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1137. return PHY_ERROR;
  1138. }
  1139. }
  1140. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1141. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1142. return PHY_ERROR;
  1143. }
  1144. }
  1145. if (np->phy_model == PHY_MODEL_REALTEK_8201) {
  1146. if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
  1147. phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
  1148. phy_reserved |= PHY_REALTEK_INIT7;
  1149. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
  1150. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1151. return PHY_ERROR;
  1152. }
  1153. }
  1154. }
  1155. }
  1156. /* set advertise register */
  1157. reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  1158. reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
  1159. if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
  1160. printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
  1161. return PHY_ERROR;
  1162. }
  1163. /* get phy interface type */
  1164. phyinterface = readl(base + NvRegPhyInterface);
  1165. /* see if gigabit phy */
  1166. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  1167. if (mii_status & PHY_GIGABIT) {
  1168. np->gigabit = PHY_GIGABIT;
  1169. mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  1170. mii_control_1000 &= ~ADVERTISE_1000HALF;
  1171. if (phyinterface & PHY_RGMII)
  1172. mii_control_1000 |= ADVERTISE_1000FULL;
  1173. else
  1174. mii_control_1000 &= ~ADVERTISE_1000FULL;
  1175. if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
  1176. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1177. return PHY_ERROR;
  1178. }
  1179. }
  1180. else
  1181. np->gigabit = 0;
  1182. mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1183. mii_control |= BMCR_ANENABLE;
  1184. if (np->phy_oui == PHY_OUI_REALTEK &&
  1185. np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1186. np->phy_rev == PHY_REV_REALTEK_8211C) {
  1187. /* start autoneg since we already performed hw reset above */
  1188. mii_control |= BMCR_ANRESTART;
  1189. if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
  1190. printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev));
  1191. return PHY_ERROR;
  1192. }
  1193. } else {
  1194. /* reset the phy
  1195. * (certain phys need bmcr to be setup with reset)
  1196. */
  1197. if (phy_reset(dev, mii_control)) {
  1198. printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
  1199. return PHY_ERROR;
  1200. }
  1201. }
  1202. /* phy vendor specific configuration */
  1203. if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
  1204. phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
  1205. phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
  1206. phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
  1207. if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
  1208. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1209. return PHY_ERROR;
  1210. }
  1211. phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
  1212. phy_reserved |= PHY_CICADA_INIT5;
  1213. if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
  1214. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1215. return PHY_ERROR;
  1216. }
  1217. }
  1218. if (np->phy_oui == PHY_OUI_CICADA) {
  1219. phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
  1220. phy_reserved |= PHY_CICADA_INIT6;
  1221. if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
  1222. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1223. return PHY_ERROR;
  1224. }
  1225. }
  1226. if (np->phy_oui == PHY_OUI_VITESSE) {
  1227. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
  1228. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1229. return PHY_ERROR;
  1230. }
  1231. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
  1232. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1233. return PHY_ERROR;
  1234. }
  1235. phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
  1236. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
  1237. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1238. return PHY_ERROR;
  1239. }
  1240. phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
  1241. phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
  1242. phy_reserved |= PHY_VITESSE_INIT3;
  1243. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
  1244. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1245. return PHY_ERROR;
  1246. }
  1247. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
  1248. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1249. return PHY_ERROR;
  1250. }
  1251. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
  1252. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1253. return PHY_ERROR;
  1254. }
  1255. phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
  1256. phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
  1257. phy_reserved |= PHY_VITESSE_INIT3;
  1258. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
  1259. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1260. return PHY_ERROR;
  1261. }
  1262. phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
  1263. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
  1264. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1265. return PHY_ERROR;
  1266. }
  1267. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
  1268. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1269. return PHY_ERROR;
  1270. }
  1271. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
  1272. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1273. return PHY_ERROR;
  1274. }
  1275. phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
  1276. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
  1277. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1278. return PHY_ERROR;
  1279. }
  1280. phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
  1281. phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
  1282. phy_reserved |= PHY_VITESSE_INIT8;
  1283. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
  1284. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1285. return PHY_ERROR;
  1286. }
  1287. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
  1288. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1289. return PHY_ERROR;
  1290. }
  1291. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
  1292. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1293. return PHY_ERROR;
  1294. }
  1295. }
  1296. if (np->phy_oui == PHY_OUI_REALTEK) {
  1297. if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1298. np->phy_rev == PHY_REV_REALTEK_8211B) {
  1299. /* reset could have cleared these out, set them back */
  1300. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1301. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1302. return PHY_ERROR;
  1303. }
  1304. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
  1305. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1306. return PHY_ERROR;
  1307. }
  1308. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
  1309. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1310. return PHY_ERROR;
  1311. }
  1312. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
  1313. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1314. return PHY_ERROR;
  1315. }
  1316. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
  1317. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1318. return PHY_ERROR;
  1319. }
  1320. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
  1321. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1322. return PHY_ERROR;
  1323. }
  1324. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1325. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1326. return PHY_ERROR;
  1327. }
  1328. }
  1329. if (np->phy_model == PHY_MODEL_REALTEK_8201) {
  1330. if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
  1331. phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
  1332. phy_reserved |= PHY_REALTEK_INIT7;
  1333. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
  1334. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1335. return PHY_ERROR;
  1336. }
  1337. }
  1338. if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
  1339. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
  1340. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1341. return PHY_ERROR;
  1342. }
  1343. phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
  1344. phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
  1345. phy_reserved |= PHY_REALTEK_INIT3;
  1346. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
  1347. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1348. return PHY_ERROR;
  1349. }
  1350. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1351. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1352. return PHY_ERROR;
  1353. }
  1354. }
  1355. }
  1356. }
  1357. /* some phys clear out pause advertisment on reset, set it back */
  1358. mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
  1359. /* restart auto negotiation, power down phy */
  1360. mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1361. mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
  1362. if (phy_power_down) {
  1363. mii_control |= BMCR_PDOWN;
  1364. }
  1365. if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
  1366. return PHY_ERROR;
  1367. }
  1368. return 0;
  1369. }
  1370. static void nv_start_rx(struct net_device *dev)
  1371. {
  1372. struct fe_priv *np = netdev_priv(dev);
  1373. u8 __iomem *base = get_hwbase(dev);
  1374. u32 rx_ctrl = readl(base + NvRegReceiverControl);
  1375. dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
  1376. /* Already running? Stop it. */
  1377. if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
  1378. rx_ctrl &= ~NVREG_RCVCTL_START;
  1379. writel(rx_ctrl, base + NvRegReceiverControl);
  1380. pci_push(base);
  1381. }
  1382. writel(np->linkspeed, base + NvRegLinkSpeed);
  1383. pci_push(base);
  1384. rx_ctrl |= NVREG_RCVCTL_START;
  1385. if (np->mac_in_use)
  1386. rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
  1387. writel(rx_ctrl, base + NvRegReceiverControl);
  1388. dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
  1389. dev->name, np->duplex, np->linkspeed);
  1390. pci_push(base);
  1391. }
  1392. static void nv_stop_rx(struct net_device *dev)
  1393. {
  1394. struct fe_priv *np = netdev_priv(dev);
  1395. u8 __iomem *base = get_hwbase(dev);
  1396. u32 rx_ctrl = readl(base + NvRegReceiverControl);
  1397. dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
  1398. if (!np->mac_in_use)
  1399. rx_ctrl &= ~NVREG_RCVCTL_START;
  1400. else
  1401. rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
  1402. writel(rx_ctrl, base + NvRegReceiverControl);
  1403. reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
  1404. NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
  1405. KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
  1406. udelay(NV_RXSTOP_DELAY2);
  1407. if (!np->mac_in_use)
  1408. writel(0, base + NvRegLinkSpeed);
  1409. }
  1410. static void nv_start_tx(struct net_device *dev)
  1411. {
  1412. struct fe_priv *np = netdev_priv(dev);
  1413. u8 __iomem *base = get_hwbase(dev);
  1414. u32 tx_ctrl = readl(base + NvRegTransmitterControl);
  1415. dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
  1416. tx_ctrl |= NVREG_XMITCTL_START;
  1417. if (np->mac_in_use)
  1418. tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
  1419. writel(tx_ctrl, base + NvRegTransmitterControl);
  1420. pci_push(base);
  1421. }
  1422. static void nv_stop_tx(struct net_device *dev)
  1423. {
  1424. struct fe_priv *np = netdev_priv(dev);
  1425. u8 __iomem *base = get_hwbase(dev);
  1426. u32 tx_ctrl = readl(base + NvRegTransmitterControl);
  1427. dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
  1428. if (!np->mac_in_use)
  1429. tx_ctrl &= ~NVREG_XMITCTL_START;
  1430. else
  1431. tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
  1432. writel(tx_ctrl, base + NvRegTransmitterControl);
  1433. reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
  1434. NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
  1435. KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
  1436. udelay(NV_TXSTOP_DELAY2);
  1437. if (!np->mac_in_use)
  1438. writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
  1439. base + NvRegTransmitPoll);
  1440. }
  1441. static void nv_start_rxtx(struct net_device *dev)
  1442. {
  1443. nv_start_rx(dev);
  1444. nv_start_tx(dev);
  1445. }
  1446. static void nv_stop_rxtx(struct net_device *dev)
  1447. {
  1448. nv_stop_rx(dev);
  1449. nv_stop_tx(dev);
  1450. }
  1451. static void nv_txrx_reset(struct net_device *dev)
  1452. {
  1453. struct fe_priv *np = netdev_priv(dev);
  1454. u8 __iomem *base = get_hwbase(dev);
  1455. dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
  1456. writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
  1457. pci_push(base);
  1458. udelay(NV_TXRX_RESET_DELAY);
  1459. writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1460. pci_push(base);
  1461. }
  1462. static void nv_mac_reset(struct net_device *dev)
  1463. {
  1464. struct fe_priv *np = netdev_priv(dev);
  1465. u8 __iomem *base = get_hwbase(dev);
  1466. u32 temp1, temp2, temp3;
  1467. dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
  1468. writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
  1469. pci_push(base);
  1470. /* save registers since they will be cleared on reset */
  1471. temp1 = readl(base + NvRegMacAddrA);
  1472. temp2 = readl(base + NvRegMacAddrB);
  1473. temp3 = readl(base + NvRegTransmitPoll);
  1474. writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
  1475. pci_push(base);
  1476. udelay(NV_MAC_RESET_DELAY);
  1477. writel(0, base + NvRegMacReset);
  1478. pci_push(base);
  1479. udelay(NV_MAC_RESET_DELAY);
  1480. /* restore saved registers */
  1481. writel(temp1, base + NvRegMacAddrA);
  1482. writel(temp2, base + NvRegMacAddrB);
  1483. writel(temp3, base + NvRegTransmitPoll);
  1484. writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1485. pci_push(base);
  1486. }
  1487. static void nv_get_hw_stats(struct net_device *dev)
  1488. {
  1489. struct fe_priv *np = netdev_priv(dev);
  1490. u8 __iomem *base = get_hwbase(dev);
  1491. np->estats.tx_bytes += readl(base + NvRegTxCnt);
  1492. np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
  1493. np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
  1494. np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
  1495. np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
  1496. np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
  1497. np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
  1498. np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
  1499. np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
  1500. np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
  1501. np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
  1502. np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
  1503. np->estats.rx_runt += readl(base + NvRegRxRunt);
  1504. np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
  1505. np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
  1506. np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
  1507. np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
  1508. np->estats.rx_length_error += readl(base + NvRegRxLenErr);
  1509. np->estats.rx_unicast += readl(base + NvRegRxUnicast);
  1510. np->estats.rx_multicast += readl(base + NvRegRxMulticast);
  1511. np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
  1512. np->estats.rx_packets =
  1513. np->estats.rx_unicast +
  1514. np->estats.rx_multicast +
  1515. np->estats.rx_broadcast;
  1516. np->estats.rx_errors_total =
  1517. np->estats.rx_crc_errors +
  1518. np->estats.rx_over_errors +
  1519. np->estats.rx_frame_error +
  1520. (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
  1521. np->estats.rx_late_collision +
  1522. np->estats.rx_runt +
  1523. np->estats.rx_frame_too_long;
  1524. np->estats.tx_errors_total =
  1525. np->estats.tx_late_collision +
  1526. np->estats.tx_fifo_errors +
  1527. np->estats.tx_carrier_errors +
  1528. np->estats.tx_excess_deferral +
  1529. np->estats.tx_retry_error;
  1530. if (np->driver_data & DEV_HAS_STATISTICS_V2) {
  1531. np->estats.tx_deferral += readl(base + NvRegTxDef);
  1532. np->estats.tx_packets += readl(base + NvRegTxFrame);
  1533. np->estats.rx_bytes += readl(base + NvRegRxCnt);
  1534. np->estats.tx_pause += readl(base + NvRegTxPause);
  1535. np->estats.rx_pause += readl(base + NvRegRxPause);
  1536. np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
  1537. }
  1538. if (np->driver_data & DEV_HAS_STATISTICS_V3) {
  1539. np->estats.tx_unicast += readl(base + NvRegTxUnicast);
  1540. np->estats.tx_multicast += readl(base + NvRegTxMulticast);
  1541. np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
  1542. }
  1543. }
  1544. /*
  1545. * nv_get_stats: dev->get_stats function
  1546. * Get latest stats value from the nic.
  1547. * Called with read_lock(&dev_base_lock) held for read -
  1548. * only synchronized against unregister_netdevice.
  1549. */
  1550. static struct net_device_stats *nv_get_stats(struct net_device *dev)
  1551. {
  1552. struct fe_priv *np = netdev_priv(dev);
  1553. /* If the nic supports hw counters then retrieve latest values */
  1554. if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
  1555. nv_get_hw_stats(dev);
  1556. /* copy to net_device stats */
  1557. dev->stats.tx_bytes = np->estats.tx_bytes;
  1558. dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
  1559. dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
  1560. dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
  1561. dev->stats.rx_over_errors = np->estats.rx_over_errors;
  1562. dev->stats.rx_errors = np->estats.rx_errors_total;
  1563. dev->stats.tx_errors = np->estats.tx_errors_total;
  1564. }
  1565. return &dev->stats;
  1566. }
  1567. /*
  1568. * nv_alloc_rx: fill rx ring entries.
  1569. * Return 1 if the allocations for the skbs failed and the
  1570. * rx engine is without Available descriptors
  1571. */
  1572. static int nv_alloc_rx(struct net_device *dev)
  1573. {
  1574. struct fe_priv *np = netdev_priv(dev);
  1575. struct ring_desc* less_rx;
  1576. less_rx = np->get_rx.orig;
  1577. if (less_rx-- == np->first_rx.orig)
  1578. less_rx = np->last_rx.orig;
  1579. while (np->put_rx.orig != less_rx) {
  1580. struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
  1581. if (skb) {
  1582. np->put_rx_ctx->skb = skb;
  1583. np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
  1584. skb->data,
  1585. skb_tailroom(skb),
  1586. PCI_DMA_FROMDEVICE);
  1587. np->put_rx_ctx->dma_len = skb_tailroom(skb);
  1588. np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
  1589. wmb();
  1590. np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
  1591. if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
  1592. np->put_rx.orig = np->first_rx.orig;
  1593. if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
  1594. np->put_rx_ctx = np->first_rx_ctx;
  1595. } else {
  1596. return 1;
  1597. }
  1598. }
  1599. return 0;
  1600. }
  1601. static int nv_alloc_rx_optimized(struct net_device *dev)
  1602. {
  1603. struct fe_priv *np = netdev_priv(dev);
  1604. struct ring_desc_ex* less_rx;
  1605. less_rx = np->get_rx.ex;
  1606. if (less_rx-- == np->first_rx.ex)
  1607. less_rx = np->last_rx.ex;
  1608. while (np->put_rx.ex != less_rx) {
  1609. struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
  1610. if (skb) {
  1611. np->put_rx_ctx->skb = skb;
  1612. np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
  1613. skb->data,
  1614. skb_tailroom(skb),
  1615. PCI_DMA_FROMDEVICE);
  1616. np->put_rx_ctx->dma_len = skb_tailroom(skb);
  1617. np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
  1618. np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
  1619. wmb();
  1620. np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
  1621. if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
  1622. np->put_rx.ex = np->first_rx.ex;
  1623. if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
  1624. np->put_rx_ctx = np->first_rx_ctx;
  1625. } else {
  1626. return 1;
  1627. }
  1628. }
  1629. return 0;
  1630. }
  1631. /* If rx bufs are exhausted called after 50ms to attempt to refresh */
  1632. static void nv_do_rx_refill(unsigned long data)
  1633. {
  1634. struct net_device *dev = (struct net_device *) data;
  1635. struct fe_priv *np = netdev_priv(dev);
  1636. /* Just reschedule NAPI rx processing */
  1637. napi_schedule(&np->napi);
  1638. }
  1639. static void nv_init_rx(struct net_device *dev)
  1640. {
  1641. struct fe_priv *np = netdev_priv(dev);
  1642. int i;
  1643. np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
  1644. if (!nv_optimized(np))
  1645. np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
  1646. else
  1647. np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
  1648. np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
  1649. np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
  1650. for (i = 0; i < np->rx_ring_size; i++) {
  1651. if (!nv_optimized(np)) {
  1652. np->rx_ring.orig[i].flaglen = 0;
  1653. np->rx_ring.orig[i].buf = 0;
  1654. } else {
  1655. np->rx_ring.ex[i].flaglen = 0;
  1656. np->rx_ring.ex[i].txvlan = 0;
  1657. np->rx_ring.ex[i].bufhigh = 0;
  1658. np->rx_ring.ex[i].buflow = 0;
  1659. }
  1660. np->rx_skb[i].skb = NULL;
  1661. np->rx_skb[i].dma = 0;
  1662. }
  1663. }
  1664. static void nv_init_tx(struct net_device *dev)
  1665. {
  1666. struct fe_priv *np = netdev_priv(dev);
  1667. int i;
  1668. np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
  1669. if (!nv_optimized(np))
  1670. np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
  1671. else
  1672. np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
  1673. np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
  1674. np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
  1675. np->tx_pkts_in_progress = 0;
  1676. np->tx_change_owner = NULL;
  1677. np->tx_end_flip = NULL;
  1678. np->tx_stop = 0;
  1679. for (i = 0; i < np->tx_ring_size; i++) {
  1680. if (!nv_optimized(np)) {
  1681. np->tx_ring.orig[i].flaglen = 0;
  1682. np->tx_ring.orig[i].buf = 0;
  1683. } else {
  1684. np->tx_ring.ex[i].flaglen = 0;
  1685. np->tx_ring.ex[i].txvlan = 0;
  1686. np->tx_ring.ex[i].bufhigh = 0;
  1687. np->tx_ring.ex[i].buflow = 0;
  1688. }
  1689. np->tx_skb[i].skb = NULL;
  1690. np->tx_skb[i].dma = 0;
  1691. np->tx_skb[i].dma_len = 0;
  1692. np->tx_skb[i].dma_single = 0;
  1693. np->tx_skb[i].first_tx_desc = NULL;
  1694. np->tx_skb[i].next_tx_ctx = NULL;
  1695. }
  1696. }
  1697. static int nv_init_ring(struct net_device *dev)
  1698. {
  1699. struct fe_priv *np = netdev_priv(dev);
  1700. nv_init_tx(dev);
  1701. nv_init_rx(dev);
  1702. if (!nv_optimized(np))
  1703. return nv_alloc_rx(dev);
  1704. else
  1705. return nv_alloc_rx_optimized(dev);
  1706. }
  1707. static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
  1708. {
  1709. if (tx_skb->dma) {
  1710. if (tx_skb->dma_single)
  1711. pci_unmap_single(np->pci_dev, tx_skb->dma,
  1712. tx_skb->dma_len,
  1713. PCI_DMA_TODEVICE);
  1714. else
  1715. pci_unmap_page(np->pci_dev, tx_skb->dma,
  1716. tx_skb->dma_len,
  1717. PCI_DMA_TODEVICE);
  1718. tx_skb->dma = 0;
  1719. }
  1720. }
  1721. static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
  1722. {
  1723. nv_unmap_txskb(np, tx_skb);
  1724. if (tx_skb->skb) {
  1725. dev_kfree_skb_any(tx_skb->skb);
  1726. tx_skb->skb = NULL;
  1727. return 1;
  1728. }
  1729. return 0;
  1730. }
  1731. static void nv_drain_tx(struct net_device *dev)
  1732. {
  1733. struct fe_priv *np = netdev_priv(dev);
  1734. unsigned int i;
  1735. for (i = 0; i < np->tx_ring_size; i++) {
  1736. if (!nv_optimized(np)) {
  1737. np->tx_ring.orig[i].flaglen = 0;
  1738. np->tx_ring.orig[i].buf = 0;
  1739. } else {
  1740. np->tx_ring.ex[i].flaglen = 0;
  1741. np->tx_ring.ex[i].txvlan = 0;
  1742. np->tx_ring.ex[i].bufhigh = 0;
  1743. np->tx_ring.ex[i].buflow = 0;
  1744. }
  1745. if (nv_release_txskb(np, &np->tx_skb[i]))
  1746. dev->stats.tx_dropped++;
  1747. np->tx_skb[i].dma = 0;
  1748. np->tx_skb[i].dma_len = 0;
  1749. np->tx_skb[i].dma_single = 0;
  1750. np->tx_skb[i].first_tx_desc = NULL;
  1751. np->tx_skb[i].next_tx_ctx = NULL;
  1752. }
  1753. np->tx_pkts_in_progress = 0;
  1754. np->tx_change_owner = NULL;
  1755. np->tx_end_flip = NULL;
  1756. }
  1757. static void nv_drain_rx(struct net_device *dev)
  1758. {
  1759. struct fe_priv *np = netdev_priv(dev);
  1760. int i;
  1761. for (i = 0; i < np->rx_ring_size; i++) {
  1762. if (!nv_optimized(np)) {
  1763. np->rx_ring.orig[i].flaglen = 0;
  1764. np->rx_ring.orig[i].buf = 0;
  1765. } else {
  1766. np->rx_ring.ex[i].flaglen = 0;
  1767. np->rx_ring.ex[i].txvlan = 0;
  1768. np->rx_ring.ex[i].bufhigh = 0;
  1769. np->rx_ring.ex[i].buflow = 0;
  1770. }
  1771. wmb();
  1772. if (np->rx_skb[i].skb) {
  1773. pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
  1774. (skb_end_pointer(np->rx_skb[i].skb) -
  1775. np->rx_skb[i].skb->data),
  1776. PCI_DMA_FROMDEVICE);
  1777. dev_kfree_skb(np->rx_skb[i].skb);
  1778. np->rx_skb[i].skb = NULL;
  1779. }
  1780. }
  1781. }
  1782. static void nv_drain_rxtx(struct net_device *dev)
  1783. {
  1784. nv_drain_tx(dev);
  1785. nv_drain_rx(dev);
  1786. }
  1787. static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
  1788. {
  1789. return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
  1790. }
  1791. static void nv_legacybackoff_reseed(struct net_device *dev)
  1792. {
  1793. u8 __iomem *base = get_hwbase(dev);
  1794. u32 reg;
  1795. u32 low;
  1796. int tx_status = 0;
  1797. reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
  1798. get_random_bytes(&low, sizeof(low));
  1799. reg |= low & NVREG_SLOTTIME_MASK;
  1800. /* Need to stop tx before change takes effect.
  1801. * Caller has already gained np->lock.
  1802. */
  1803. tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
  1804. if (tx_status)
  1805. nv_stop_tx(dev);
  1806. nv_stop_rx(dev);
  1807. writel(reg, base + NvRegSlotTime);
  1808. if (tx_status)
  1809. nv_start_tx(dev);
  1810. nv_start_rx(dev);
  1811. }
  1812. /* Gear Backoff Seeds */
  1813. #define BACKOFF_SEEDSET_ROWS 8
  1814. #define BACKOFF_SEEDSET_LFSRS 15
  1815. /* Known Good seed sets */
  1816. static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
  1817. {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
  1818. {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
  1819. {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
  1820. {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
  1821. {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
  1822. {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
  1823. {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
  1824. {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
  1825. static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
  1826. {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
  1827. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
  1828. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
  1829. {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
  1830. {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
  1831. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
  1832. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
  1833. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
  1834. static void nv_gear_backoff_reseed(struct net_device *dev)
  1835. {
  1836. u8 __iomem *base = get_hwbase(dev);
  1837. u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
  1838. u32 temp, seedset, combinedSeed;
  1839. int i;
  1840. /* Setup seed for free running LFSR */
  1841. /* We are going to read the time stamp counter 3 times
  1842. and swizzle bits around to increase randomness */
  1843. get_random_bytes(&miniseed1, sizeof(miniseed1));
  1844. miniseed1 &= 0x0fff;
  1845. if (miniseed1 == 0)
  1846. miniseed1 = 0xabc;
  1847. get_random_bytes(&miniseed2, sizeof(miniseed2));
  1848. miniseed2 &= 0x0fff;
  1849. if (miniseed2 == 0)
  1850. miniseed2 = 0xabc;
  1851. miniseed2_reversed =
  1852. ((miniseed2 & 0xF00) >> 8) |
  1853. (miniseed2 & 0x0F0) |
  1854. ((miniseed2 & 0x00F) << 8);
  1855. get_random_bytes(&miniseed3, sizeof(miniseed3));
  1856. miniseed3 &= 0x0fff;
  1857. if (miniseed3 == 0)
  1858. miniseed3 = 0xabc;
  1859. miniseed3_reversed =
  1860. ((miniseed3 & 0xF00) >> 8) |
  1861. (miniseed3 & 0x0F0) |
  1862. ((miniseed3 & 0x00F) << 8);
  1863. combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
  1864. (miniseed2 ^ miniseed3_reversed);
  1865. /* Seeds can not be zero */
  1866. if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
  1867. combinedSeed |= 0x08;
  1868. if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
  1869. combinedSeed |= 0x8000;
  1870. /* No need to disable tx here */
  1871. temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
  1872. temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
  1873. temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
  1874. writel(temp,base + NvRegBackOffControl);
  1875. /* Setup seeds for all gear LFSRs. */
  1876. get_random_bytes(&seedset, sizeof(seedset));
  1877. seedset = seedset % BACKOFF_SEEDSET_ROWS;
  1878. for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++)
  1879. {
  1880. temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
  1881. temp |= main_seedset[seedset][i-1] & 0x3ff;
  1882. temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
  1883. writel(temp, base + NvRegBackOffControl);
  1884. }
  1885. }
  1886. /*
  1887. * nv_start_xmit: dev->hard_start_xmit function
  1888. * Called with netif_tx_lock held.
  1889. */
  1890. static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1891. {
  1892. struct fe_priv *np = netdev_priv(dev);
  1893. u32 tx_flags = 0;
  1894. u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
  1895. unsigned int fragments = skb_shinfo(skb)->nr_frags;
  1896. unsigned int i;
  1897. u32 offset = 0;
  1898. u32 bcnt;
  1899. u32 size = skb_headlen(skb);
  1900. u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1901. u32 empty_slots;
  1902. struct ring_desc* put_tx;
  1903. struct ring_desc* start_tx;
  1904. struct ring_desc* prev_tx;
  1905. struct nv_skb_map* prev_tx_ctx;
  1906. unsigned long flags;
  1907. /* add fragments to entries count */
  1908. for (i = 0; i < fragments; i++) {
  1909. entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
  1910. ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1911. }
  1912. spin_lock_irqsave(&np->lock, flags);
  1913. empty_slots = nv_get_empty_tx_slots(np);
  1914. if (unlikely(empty_slots <= entries)) {
  1915. netif_stop_queue(dev);
  1916. np->tx_stop = 1;
  1917. spin_unlock_irqrestore(&np->lock, flags);
  1918. return NETDEV_TX_BUSY;
  1919. }
  1920. spin_unlock_irqrestore(&np->lock, flags);
  1921. start_tx = put_tx = np->put_tx.orig;
  1922. /* setup the header buffer */
  1923. do {
  1924. prev_tx = put_tx;
  1925. prev_tx_ctx = np->put_tx_ctx;
  1926. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1927. np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
  1928. PCI_DMA_TODEVICE);
  1929. np->put_tx_ctx->dma_len = bcnt;
  1930. np->put_tx_ctx->dma_single = 1;
  1931. put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
  1932. put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  1933. tx_flags = np->tx_flags;
  1934. offset += bcnt;
  1935. size -= bcnt;
  1936. if (unlikely(put_tx++ == np->last_tx.orig))
  1937. put_tx = np->first_tx.orig;
  1938. if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  1939. np->put_tx_ctx = np->first_tx_ctx;
  1940. } while (size);
  1941. /* setup the fragments */
  1942. for (i = 0; i < fragments; i++) {
  1943. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1944. u32 size = frag->size;
  1945. offset = 0;
  1946. do {
  1947. prev_tx = put_tx;
  1948. prev_tx_ctx = np->put_tx_ctx;
  1949. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1950. np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
  1951. PCI_DMA_TODEVICE);
  1952. np->put_tx_ctx->dma_len = bcnt;
  1953. np->put_tx_ctx->dma_single = 0;
  1954. put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
  1955. put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  1956. offset += bcnt;
  1957. size -= bcnt;
  1958. if (unlikely(put_tx++ == np->last_tx.orig))
  1959. put_tx = np->first_tx.orig;
  1960. if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  1961. np->put_tx_ctx = np->first_tx_ctx;
  1962. } while (size);
  1963. }
  1964. /* set last fragment flag */
  1965. prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
  1966. /* save skb in this slot's context area */
  1967. prev_tx_ctx->skb = skb;
  1968. if (skb_is_gso(skb))
  1969. tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  1970. else
  1971. tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
  1972. NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
  1973. spin_lock_irqsave(&np->lock, flags);
  1974. /* set tx flags */
  1975. start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
  1976. np->put_tx.orig = put_tx;
  1977. spin_unlock_irqrestore(&np->lock, flags);
  1978. dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
  1979. dev->name, entries, tx_flags_extra);
  1980. {
  1981. int j;
  1982. for (j=0; j<64; j++) {
  1983. if ((j%16) == 0)
  1984. dprintk("\n%03x:", j);
  1985. dprintk(" %02x", ((unsigned char*)skb->data)[j]);
  1986. }
  1987. dprintk("\n");
  1988. }
  1989. dev->trans_start = jiffies;
  1990. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1991. return NETDEV_TX_OK;
  1992. }
  1993. static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
  1994. struct net_device *dev)
  1995. {
  1996. struct fe_priv *np = netdev_priv(dev);
  1997. u32 tx_flags = 0;
  1998. u32 tx_flags_extra;
  1999. unsigned int fragments = skb_shinfo(skb)->nr_frags;
  2000. unsigned int i;
  2001. u32 offset = 0;
  2002. u32 bcnt;
  2003. u32 size = skb_headlen(skb);
  2004. u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  2005. u32 empty_slots;
  2006. struct ring_desc_ex* put_tx;
  2007. struct ring_desc_ex* start_tx;
  2008. struct ring_desc_ex* prev_tx;
  2009. struct nv_skb_map* prev_tx_ctx;
  2010. struct nv_skb_map* start_tx_ctx;
  2011. unsigned long flags;
  2012. /* add fragments to entries count */
  2013. for (i = 0; i < fragments; i++) {
  2014. entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
  2015. ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  2016. }
  2017. spin_lock_irqsave(&np->lock, flags);
  2018. empty_slots = nv_get_empty_tx_slots(np);
  2019. if (unlikely(empty_slots <= entries)) {
  2020. netif_stop_queue(dev);
  2021. np->tx_stop = 1;
  2022. spin_unlock_irqrestore(&np->lock, flags);
  2023. return NETDEV_TX_BUSY;
  2024. }
  2025. spin_unlock_irqrestore(&np->lock, flags);
  2026. start_tx = put_tx = np->put_tx.ex;
  2027. start_tx_ctx = np->put_tx_ctx;
  2028. /* setup the header buffer */
  2029. do {
  2030. prev_tx = put_tx;
  2031. prev_tx_ctx = np->put_tx_ctx;
  2032. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  2033. np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
  2034. PCI_DMA_TODEVICE);
  2035. np->put_tx_ctx->dma_len = bcnt;
  2036. np->put_tx_ctx->dma_single = 1;
  2037. put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
  2038. put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
  2039. put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  2040. tx_flags = NV_TX2_VALID;
  2041. offset += bcnt;
  2042. size -= bcnt;
  2043. if (unlikely(put_tx++ == np->last_tx.ex))
  2044. put_tx = np->first_tx.ex;
  2045. if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  2046. np->put_tx_ctx = np->first_tx_ctx;
  2047. } while (size);
  2048. /* setup the fragments */
  2049. for (i = 0; i < fragments; i++) {
  2050. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  2051. u32 size = frag->size;
  2052. offset = 0;
  2053. do {
  2054. prev_tx = put_tx;
  2055. prev_tx_ctx = np->put_tx_ctx;
  2056. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  2057. np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
  2058. PCI_DMA_TODEVICE);
  2059. np->put_tx_ctx->dma_len = bcnt;
  2060. np->put_tx_ctx->dma_single = 0;
  2061. put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
  2062. put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
  2063. put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  2064. offset += bcnt;
  2065. size -= bcnt;
  2066. if (unlikely(put_tx++ == np->last_tx.ex))
  2067. put_tx = np->first_tx.ex;
  2068. if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  2069. np->put_tx_ctx = np->first_tx_ctx;
  2070. } while (size);
  2071. }
  2072. /* set last fragment flag */
  2073. prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
  2074. /* save skb in this slot's context area */
  2075. prev_tx_ctx->skb = skb;
  2076. if (skb_is_gso(skb))
  2077. tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  2078. else
  2079. tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
  2080. NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
  2081. /* vlan tag */
  2082. if (likely(!np->vlangrp)) {
  2083. start_tx->txvlan = 0;
  2084. } else {
  2085. if (vlan_tx_tag_present(skb))
  2086. start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
  2087. else
  2088. start_tx->txvlan = 0;
  2089. }
  2090. spin_lock_irqsave(&np->lock, flags);
  2091. if (np->tx_limit) {
  2092. /* Limit the number of outstanding tx. Setup all fragments, but
  2093. * do not set the VALID bit on the first descriptor. Save a pointer
  2094. * to that descriptor and also for next skb_map element.
  2095. */
  2096. if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
  2097. if (!np->tx_change_owner)
  2098. np->tx_change_owner = start_tx_ctx;
  2099. /* remove VALID bit */
  2100. tx_flags &= ~NV_TX2_VALID;
  2101. start_tx_ctx->first_tx_desc = start_tx;
  2102. start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
  2103. np->tx_end_flip = np->put_tx_ctx;
  2104. } else {
  2105. np->tx_pkts_in_progress++;
  2106. }
  2107. }
  2108. /* set tx flags */
  2109. start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
  2110. np->put_tx.ex = put_tx;
  2111. spin_unlock_irqrestore(&np->lock, flags);
  2112. dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
  2113. dev->name, entries, tx_flags_extra);
  2114. {
  2115. int j;
  2116. for (j=0; j<64; j++) {
  2117. if ((j%16) == 0)
  2118. dprintk("\n%03x:", j);
  2119. dprintk(" %02x", ((unsigned char*)skb->data)[j]);
  2120. }
  2121. dprintk("\n");
  2122. }
  2123. dev->trans_start = jiffies;
  2124. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  2125. return NETDEV_TX_OK;
  2126. }
  2127. static inline void nv_tx_flip_ownership(struct net_device *dev)
  2128. {
  2129. struct fe_priv *np = netdev_priv(dev);
  2130. np->tx_pkts_in_progress--;
  2131. if (np->tx_change_owner) {
  2132. np->tx_change_owner->first_tx_desc->flaglen |=
  2133. cpu_to_le32(NV_TX2_VALID);
  2134. np->tx_pkts_in_progress++;
  2135. np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
  2136. if (np->tx_change_owner == np->tx_end_flip)
  2137. np->tx_change_owner = NULL;
  2138. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  2139. }
  2140. }
  2141. /*
  2142. * nv_tx_done: check for completed packets, release the skbs.
  2143. *
  2144. * Caller must own np->lock.
  2145. */
  2146. static int nv_tx_done(struct net_device *dev, int limit)
  2147. {
  2148. struct fe_priv *np = netdev_priv(dev);
  2149. u32 flags;
  2150. int tx_work = 0;
  2151. struct ring_desc* orig_get_tx = np->get_tx.orig;
  2152. while ((np->get_tx.orig != np->put_tx.orig) &&
  2153. !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
  2154. (tx_work < limit)) {
  2155. dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
  2156. dev->name, flags);
  2157. nv_unmap_txskb(np, np->get_tx_ctx);
  2158. if (np->desc_ver == DESC_VER_1) {
  2159. if (flags & NV_TX_LASTPACKET) {
  2160. if (flags & NV_TX_ERROR) {
  2161. if (flags & NV_TX_UNDERFLOW)
  2162. dev->stats.tx_fifo_errors++;
  2163. if (flags & NV_TX_CARRIERLOST)
  2164. dev->stats.tx_carrier_errors++;
  2165. if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
  2166. nv_legacybackoff_reseed(dev);
  2167. dev->stats.tx_errors++;
  2168. } else {
  2169. dev->stats.tx_packets++;
  2170. dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
  2171. }
  2172. dev_kfree_skb_any(np->get_tx_ctx->skb);
  2173. np->get_tx_ctx->skb = NULL;
  2174. tx_work++;
  2175. }
  2176. } else {
  2177. if (flags & NV_TX2_LASTPACKET) {
  2178. if (flags & NV_TX2_ERROR) {
  2179. if (flags & NV_TX2_UNDERFLOW)
  2180. dev->stats.tx_fifo_errors++;
  2181. if (flags & NV_TX2_CARRIERLOST)
  2182. dev->stats.tx_carrier_errors++;
  2183. if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
  2184. nv_legacybackoff_reseed(dev);
  2185. dev->stats.tx_errors++;
  2186. } else {
  2187. dev->stats.tx_packets++;
  2188. dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
  2189. }
  2190. dev_kfree_skb_any(np->get_tx_ctx->skb);
  2191. np->get_tx_ctx->skb = NULL;
  2192. tx_work++;
  2193. }
  2194. }
  2195. if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
  2196. np->get_tx.orig = np->first_tx.orig;
  2197. if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
  2198. np->get_tx_ctx = np->first_tx_ctx;
  2199. }
  2200. if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
  2201. np->tx_stop = 0;
  2202. netif_wake_queue(dev);
  2203. }
  2204. return tx_work;
  2205. }
  2206. static int nv_tx_done_optimized(struct net_device *dev, int limit)
  2207. {
  2208. struct fe_priv *np = netdev_priv(dev);
  2209. u32 flags;
  2210. int tx_work = 0;
  2211. struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
  2212. while ((np->get_tx.ex != np->put_tx.ex) &&
  2213. !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
  2214. (tx_work < limit)) {
  2215. dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
  2216. dev->name, flags);
  2217. nv_unmap_txskb(np, np->get_tx_ctx);
  2218. if (flags & NV_TX2_LASTPACKET) {
  2219. if (!(flags & NV_TX2_ERROR))
  2220. dev->stats.tx_packets++;
  2221. else {
  2222. if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
  2223. if (np->driver_data & DEV_HAS_GEAR_MODE)
  2224. nv_gear_backoff_reseed(dev);
  2225. else
  2226. nv_legacybackoff_reseed(dev);
  2227. }
  2228. }
  2229. dev_kfree_skb_any(np->get_tx_ctx->skb);
  2230. np->get_tx_ctx->skb = NULL;
  2231. tx_work++;
  2232. if (np->tx_limit) {
  2233. nv_tx_flip_ownership(dev);
  2234. }
  2235. }
  2236. if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
  2237. np->get_tx.ex = np->first_tx.ex;
  2238. if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
  2239. np->get_tx_ctx = np->first_tx_ctx;
  2240. }
  2241. if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
  2242. np->tx_stop = 0;
  2243. netif_wake_queue(dev);
  2244. }
  2245. return tx_work;
  2246. }
  2247. /*
  2248. * nv_tx_timeout: dev->tx_timeout function
  2249. * Called with netif_tx_lock held.
  2250. */
  2251. static void nv_tx_timeout(struct net_device *dev)
  2252. {
  2253. struct fe_priv *np = netdev_priv(dev);
  2254. u8 __iomem *base = get_hwbase(dev);
  2255. u32 status;
  2256. union ring_type put_tx;
  2257. int saved_tx_limit;
  2258. if (np->msi_flags & NV_MSI_X_ENABLED)
  2259. status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
  2260. else
  2261. status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
  2262. printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
  2263. {
  2264. int i;
  2265. printk(KERN_INFO "%s: Ring at %lx\n",
  2266. dev->name, (unsigned long)np->ring_addr);
  2267. printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
  2268. for (i=0;i<=np->register_size;i+= 32) {
  2269. printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
  2270. i,
  2271. readl(base + i + 0), readl(base + i + 4),
  2272. readl(base + i + 8), readl(base + i + 12),
  2273. readl(base + i + 16), readl(base + i + 20),
  2274. readl(base + i + 24), readl(base + i + 28));
  2275. }
  2276. printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
  2277. for (i=0;i<np->tx_ring_size;i+= 4) {
  2278. if (!nv_optimized(np)) {
  2279. printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
  2280. i,
  2281. le32_to_cpu(np->tx_ring.orig[i].buf),
  2282. le32_to_cpu(np->tx_ring.orig[i].flaglen),
  2283. le32_to_cpu(np->tx_ring.orig[i+1].buf),
  2284. le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
  2285. le32_to_cpu(np->tx_ring.orig[i+2].buf),
  2286. le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
  2287. le32_to_cpu(np->tx_ring.orig[i+3].buf),
  2288. le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
  2289. } else {
  2290. printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
  2291. i,
  2292. le32_to_cpu(np->tx_ring.ex[i].bufhigh),
  2293. le32_to_cpu(np->tx_ring.ex[i].buflow),
  2294. le32_to_cpu(np->tx_ring.ex[i].flaglen),
  2295. le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
  2296. le32_to_cpu(np->tx_ring.ex[i+1].buflow),
  2297. le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
  2298. le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
  2299. le32_to_cpu(np->tx_ring.ex[i+2].buflow),
  2300. le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
  2301. le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
  2302. le32_to_cpu(np->tx_ring.ex[i+3].buflow),
  2303. le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
  2304. }
  2305. }
  2306. }
  2307. spin_lock_irq(&np->lock);
  2308. /* 1) stop tx engine */
  2309. nv_stop_tx(dev);
  2310. /* 2) complete any outstanding tx and do not give HW any limited tx pkts */
  2311. saved_tx_limit = np->tx_limit;
  2312. np->tx_limit = 0; /* prevent giving HW any limited pkts */
  2313. np->tx_stop = 0; /* prevent waking tx queue */
  2314. if (!nv_optimized(np))
  2315. nv_tx_done(dev, np->tx_ring_size);
  2316. else
  2317. nv_tx_done_optimized(dev, np->tx_ring_size);
  2318. /* save current HW postion */
  2319. if (np->tx_change_owner)
  2320. put_tx.ex = np->tx_change_owner->first_tx_desc;
  2321. else
  2322. put_tx = np->put_tx;
  2323. /* 3) clear all tx state */
  2324. nv_drain_tx(dev);
  2325. nv_init_tx(dev);
  2326. /* 4) restore state to current HW position */
  2327. np->get_tx = np->put_tx = put_tx;
  2328. np->tx_limit = saved_tx_limit;
  2329. /* 5) restart tx engine */
  2330. nv_start_tx(dev);
  2331. netif_wake_queue(dev);
  2332. spin_unlock_irq(&np->lock);
  2333. }
  2334. /*
  2335. * Called when the nic notices a mismatch between the actual data len on the
  2336. * wire and the len indicated in the 802 header
  2337. */
  2338. static int nv_getlen(struct net_device *dev, void *packet, int datalen)
  2339. {
  2340. int hdrlen; /* length of the 802 header */
  2341. int protolen; /* length as stored in the proto field */
  2342. /* 1) calculate len according to header */
  2343. if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
  2344. protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
  2345. hdrlen = VLAN_HLEN;
  2346. } else {
  2347. protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
  2348. hdrlen = ETH_HLEN;
  2349. }
  2350. dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
  2351. dev->name, datalen, protolen, hdrlen);
  2352. if (protolen > ETH_DATA_LEN)
  2353. return datalen; /* Value in proto field not a len, no checks possible */
  2354. protolen += hdrlen;
  2355. /* consistency checks: */
  2356. if (datalen > ETH_ZLEN) {
  2357. if (datalen >= protolen) {
  2358. /* more data on wire than in 802 header, trim of
  2359. * additional data.
  2360. */
  2361. dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
  2362. dev->name, protolen);
  2363. return protolen;
  2364. } else {
  2365. /* less data on wire than mentioned in header.
  2366. * Discard the packet.
  2367. */
  2368. dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
  2369. dev->name);
  2370. return -1;
  2371. }
  2372. } else {
  2373. /* short packet. Accept only if 802 values are also short */
  2374. if (protolen > ETH_ZLEN) {
  2375. dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
  2376. dev->name);
  2377. return -1;
  2378. }
  2379. dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
  2380. dev->name, datalen);
  2381. return datalen;
  2382. }
  2383. }
  2384. static int nv_rx_process(struct net_device *dev, int limit)
  2385. {
  2386. struct fe_priv *np = netdev_priv(dev);
  2387. u32 flags;
  2388. int rx_work = 0;
  2389. struct sk_buff *skb;
  2390. int len;
  2391. while((np->get_rx.orig != np->put_rx.orig) &&
  2392. !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
  2393. (rx_work < limit)) {
  2394. dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
  2395. dev->name, flags);
  2396. /*
  2397. * the packet is for us - immediately tear down the pci mapping.
  2398. * TODO: check if a prefetch of the first cacheline improves
  2399. * the performance.
  2400. */
  2401. pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
  2402. np->get_rx_ctx->dma_len,
  2403. PCI_DMA_FROMDEVICE);
  2404. skb = np->get_rx_ctx->skb;
  2405. np->get_rx_ctx->skb = NULL;
  2406. {
  2407. int j;
  2408. dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
  2409. for (j=0; j<64; j++) {
  2410. if ((j%16) == 0)
  2411. dprintk("\n%03x:", j);
  2412. dprintk(" %02x", ((unsigned char*)skb->data)[j]);
  2413. }
  2414. dprintk("\n");
  2415. }
  2416. /* look at what we actually got: */
  2417. if (np->desc_ver == DESC_VER_1) {
  2418. if (likely(flags & NV_RX_DESCRIPTORVALID)) {
  2419. len = flags & LEN_MASK_V1;
  2420. if (unlikely(flags & NV_RX_ERROR)) {
  2421. if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
  2422. len = nv_getlen(dev, skb->data, len);
  2423. if (len < 0) {
  2424. dev->stats.rx_errors++;
  2425. dev_kfree_skb(skb);
  2426. goto next_pkt;
  2427. }
  2428. }
  2429. /* framing errors are soft errors */
  2430. else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
  2431. if (flags & NV_RX_SUBSTRACT1) {
  2432. len--;
  2433. }
  2434. }
  2435. /* the rest are hard errors */
  2436. else {
  2437. if (flags & NV_RX_MISSEDFRAME)
  2438. dev->stats.rx_missed_errors++;
  2439. if (flags & NV_RX_CRCERR)
  2440. dev->stats.rx_crc_errors++;
  2441. if (flags & NV_RX_OVERFLOW)
  2442. dev->stats.rx_over_errors++;
  2443. dev->stats.rx_errors++;
  2444. dev_kfree_skb(skb);
  2445. goto next_pkt;
  2446. }
  2447. }
  2448. } else {
  2449. dev_kfree_skb(skb);
  2450. goto next_pkt;
  2451. }
  2452. } else {
  2453. if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
  2454. len = flags & LEN_MASK_V2;
  2455. if (unlikely(flags & NV_RX2_ERROR)) {
  2456. if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
  2457. len = nv_getlen(dev, skb->data, len);
  2458. if (len < 0) {
  2459. dev->stats.rx_errors++;
  2460. dev_kfree_skb(skb);
  2461. goto next_pkt;
  2462. }
  2463. }
  2464. /* framing errors are soft errors */
  2465. else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
  2466. if (flags & NV_RX2_SUBSTRACT1) {
  2467. len--;
  2468. }
  2469. }
  2470. /* the rest are hard errors */
  2471. else {
  2472. if (flags & NV_RX2_CRCERR)
  2473. dev->stats.rx_crc_errors++;
  2474. if (flags & NV_RX2_OVERFLOW)
  2475. dev->stats.rx_over_errors++;
  2476. dev->stats.rx_errors++;
  2477. dev_kfree_skb(skb);
  2478. goto next_pkt;
  2479. }
  2480. }
  2481. if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
  2482. ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
  2483. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2484. } else {
  2485. dev_kfree_skb(skb);
  2486. goto next_pkt;
  2487. }
  2488. }
  2489. /* got a valid packet - forward it to the network core */
  2490. skb_put(skb, len);
  2491. skb->protocol = eth_type_trans(skb, dev);
  2492. dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
  2493. dev->name, len, skb->protocol);
  2494. napi_gro_receive(&np->napi, skb);
  2495. dev->stats.rx_packets++;
  2496. dev->stats.rx_bytes += len;
  2497. next_pkt:
  2498. if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
  2499. np->get_rx.orig = np->first_rx.orig;
  2500. if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
  2501. np->get_rx_ctx = np->first_rx_ctx;
  2502. rx_work++;
  2503. }
  2504. return rx_work;
  2505. }
  2506. static int nv_rx_process_optimized(struct net_device *dev, int limit)
  2507. {
  2508. struct fe_priv *np = netdev_priv(dev);
  2509. u32 flags;
  2510. u32 vlanflags = 0;
  2511. int rx_work = 0;
  2512. struct sk_buff *skb;
  2513. int len;
  2514. while((np->get_rx.ex != np->put_rx.ex) &&
  2515. !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
  2516. (rx_work < limit)) {
  2517. dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
  2518. dev->name, flags);
  2519. /*
  2520. * the packet is for us - immediately tear down the pci mapping.
  2521. * TODO: check if a prefetch of the first cacheline improves
  2522. * the performance.
  2523. */
  2524. pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
  2525. np->get_rx_ctx->dma_len,
  2526. PCI_DMA_FROMDEVICE);
  2527. skb = np->get_rx_ctx->skb;
  2528. np->get_rx_ctx->skb = NULL;
  2529. {
  2530. int j;
  2531. dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
  2532. for (j=0; j<64; j++) {
  2533. if ((j%16) == 0)
  2534. dprintk("\n%03x:", j);
  2535. dprintk(" %02x", ((unsigned char*)skb->data)[j]);
  2536. }
  2537. dprintk("\n");
  2538. }
  2539. /* look at what we actually got: */
  2540. if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
  2541. len = flags & LEN_MASK_V2;
  2542. if (unlikely(flags & NV_RX2_ERROR)) {
  2543. if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
  2544. len = nv_getlen(dev, skb->data, len);
  2545. if (len < 0) {
  2546. dev_kfree_skb(skb);
  2547. goto next_pkt;
  2548. }
  2549. }
  2550. /* framing errors are soft errors */
  2551. else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
  2552. if (flags & NV_RX2_SUBSTRACT1) {
  2553. len--;
  2554. }
  2555. }
  2556. /* the rest are hard errors */
  2557. else {
  2558. dev_kfree_skb(skb);
  2559. goto next_pkt;
  2560. }
  2561. }
  2562. if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
  2563. ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
  2564. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2565. /* got a valid packet - forward it to the network core */
  2566. skb_put(skb, len);
  2567. skb->protocol = eth_type_trans(skb, dev);
  2568. prefetch(skb->data);
  2569. dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
  2570. dev->name, len, skb->protocol);
  2571. if (likely(!np->vlangrp)) {
  2572. napi_gro_receive(&np->napi, skb);
  2573. } else {
  2574. vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
  2575. if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
  2576. vlan_gro_receive(&np->napi, np->vlangrp,
  2577. vlanflags & NV_RX3_VLAN_TAG_MASK, skb);
  2578. } else {
  2579. napi_gro_receive(&np->napi, skb);
  2580. }
  2581. }
  2582. dev->stats.rx_packets++;
  2583. dev->stats.rx_bytes += len;
  2584. } else {
  2585. dev_kfree_skb(skb);
  2586. }
  2587. next_pkt:
  2588. if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
  2589. np->get_rx.ex = np->first_rx.ex;
  2590. if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
  2591. np->get_rx_ctx = np->first_rx_ctx;
  2592. rx_work++;
  2593. }
  2594. return rx_work;
  2595. }
  2596. static void set_bufsize(struct net_device *dev)
  2597. {
  2598. struct fe_priv *np = netdev_priv(dev);
  2599. if (dev->mtu <= ETH_DATA_LEN)
  2600. np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
  2601. else
  2602. np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
  2603. }
  2604. /*
  2605. * nv_change_mtu: dev->change_mtu function
  2606. * Called with dev_base_lock held for read.
  2607. */
  2608. static int nv_change_mtu(struct net_device *dev, int new_mtu)
  2609. {
  2610. struct fe_priv *np = netdev_priv(dev);
  2611. int old_mtu;
  2612. if (new_mtu < 64 || new_mtu > np->pkt_limit)
  2613. return -EINVAL;
  2614. old_mtu = dev->mtu;
  2615. dev->mtu = new_mtu;
  2616. /* return early if the buffer sizes will not change */
  2617. if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
  2618. return 0;
  2619. if (old_mtu == new_mtu)
  2620. return 0;
  2621. /* synchronized against open : rtnl_lock() held by caller */
  2622. if (netif_running(dev)) {
  2623. u8 __iomem *base = get_hwbase(dev);
  2624. /*
  2625. * It seems that the nic preloads valid ring entries into an
  2626. * internal buffer. The procedure for flushing everything is
  2627. * guessed, there is probably a simpler approach.
  2628. * Changing the MTU is a rare event, it shouldn't matter.
  2629. */
  2630. nv_disable_irq(dev);
  2631. nv_napi_disable(dev);
  2632. netif_tx_lock_bh(dev);
  2633. netif_addr_lock(dev);
  2634. spin_lock(&np->lock);
  2635. /* stop engines */
  2636. nv_stop_rxtx(dev);
  2637. nv_txrx_reset(dev);
  2638. /* drain rx queue */
  2639. nv_drain_rxtx(dev);
  2640. /* reinit driver view of the rx queue */
  2641. set_bufsize(dev);
  2642. if (nv_init_ring(dev)) {
  2643. if (!np->in_shutdown)
  2644. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  2645. }
  2646. /* reinit nic view of the rx queue */
  2647. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  2648. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  2649. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  2650. base + NvRegRingSizes);
  2651. pci_push(base);
  2652. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  2653. pci_push(base);
  2654. /* restart rx engine */
  2655. nv_start_rxtx(dev);
  2656. spin_unlock(&np->lock);
  2657. netif_addr_unlock(dev);
  2658. netif_tx_unlock_bh(dev);
  2659. nv_napi_enable(dev);
  2660. nv_enable_irq(dev);
  2661. }
  2662. return 0;
  2663. }
  2664. static void nv_copy_mac_to_hw(struct net_device *dev)
  2665. {
  2666. u8 __iomem *base = get_hwbase(dev);
  2667. u32 mac[2];
  2668. mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
  2669. (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
  2670. mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
  2671. writel(mac[0], base + NvRegMacAddrA);
  2672. writel(mac[1], base + NvRegMacAddrB);
  2673. }
  2674. /*
  2675. * nv_set_mac_address: dev->set_mac_address function
  2676. * Called with rtnl_lock() held.
  2677. */
  2678. static int nv_set_mac_address(struct net_device *dev, void *addr)
  2679. {
  2680. struct fe_priv *np = netdev_priv(dev);
  2681. struct sockaddr *macaddr = (struct sockaddr*)addr;
  2682. if (!is_valid_ether_addr(macaddr->sa_data))
  2683. return -EADDRNOTAVAIL;
  2684. /* synchronized against open : rtnl_lock() held by caller */
  2685. memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
  2686. if (netif_running(dev)) {
  2687. netif_tx_lock_bh(dev);
  2688. netif_addr_lock(dev);
  2689. spin_lock_irq(&np->lock);
  2690. /* stop rx engine */
  2691. nv_stop_rx(dev);
  2692. /* set mac address */
  2693. nv_copy_mac_to_hw(dev);
  2694. /* restart rx engine */
  2695. nv_start_rx(dev);
  2696. spin_unlock_irq(&np->lock);
  2697. netif_addr_unlock(dev);
  2698. netif_tx_unlock_bh(dev);
  2699. } else {
  2700. nv_copy_mac_to_hw(dev);
  2701. }
  2702. return 0;
  2703. }
  2704. /*
  2705. * nv_set_multicast: dev->set_multicast function
  2706. * Called with netif_tx_lock held.
  2707. */
  2708. static void nv_set_multicast(struct net_device *dev)
  2709. {
  2710. struct fe_priv *np = netdev_priv(dev);
  2711. u8 __iomem *base = get_hwbase(dev);
  2712. u32 addr[2];
  2713. u32 mask[2];
  2714. u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
  2715. memset(addr, 0, sizeof(addr));
  2716. memset(mask, 0, sizeof(mask));
  2717. if (dev->flags & IFF_PROMISC) {
  2718. pff |= NVREG_PFF_PROMISC;
  2719. } else {
  2720. pff |= NVREG_PFF_MYADDR;
  2721. if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
  2722. u32 alwaysOff[2];
  2723. u32 alwaysOn[2];
  2724. alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
  2725. if (dev->flags & IFF_ALLMULTI) {
  2726. alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
  2727. } else {
  2728. struct netdev_hw_addr *ha;
  2729. netdev_for_each_mc_addr(ha, dev) {
  2730. unsigned char *addr = ha->addr;
  2731. u32 a, b;
  2732. a = le32_to_cpu(*(__le32 *) addr);
  2733. b = le16_to_cpu(*(__le16 *) (&addr[4]));
  2734. alwaysOn[0] &= a;
  2735. alwaysOff[0] &= ~a;
  2736. alwaysOn[1] &= b;
  2737. alwaysOff[1] &= ~b;
  2738. }
  2739. }
  2740. addr[0] = alwaysOn[0];
  2741. addr[1] = alwaysOn[1];
  2742. mask[0] = alwaysOn[0] | alwaysOff[0];
  2743. mask[1] = alwaysOn[1] | alwaysOff[1];
  2744. } else {
  2745. mask[0] = NVREG_MCASTMASKA_NONE;
  2746. mask[1] = NVREG_MCASTMASKB_NONE;
  2747. }
  2748. }
  2749. addr[0] |= NVREG_MCASTADDRA_FORCE;
  2750. pff |= NVREG_PFF_ALWAYS;
  2751. spin_lock_irq(&np->lock);
  2752. nv_stop_rx(dev);
  2753. writel(addr[0], base + NvRegMulticastAddrA);
  2754. writel(addr[1], base + NvRegMulticastAddrB);
  2755. writel(mask[0], base + NvRegMulticastMaskA);
  2756. writel(mask[1], base + NvRegMulticastMaskB);
  2757. writel(pff, base + NvRegPacketFilterFlags);
  2758. dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
  2759. dev->name);
  2760. nv_start_rx(dev);
  2761. spin_unlock_irq(&np->lock);
  2762. }
  2763. static void nv_update_pause(struct net_device *dev, u32 pause_flags)
  2764. {
  2765. struct fe_priv *np = netdev_priv(dev);
  2766. u8 __iomem *base = get_hwbase(dev);
  2767. np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
  2768. if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
  2769. u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
  2770. if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
  2771. writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
  2772. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2773. } else {
  2774. writel(pff, base + NvRegPacketFilterFlags);
  2775. }
  2776. }
  2777. if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
  2778. u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
  2779. if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
  2780. u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
  2781. if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
  2782. pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
  2783. if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
  2784. pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
  2785. /* limit the number of tx pause frames to a default of 8 */
  2786. writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
  2787. }
  2788. writel(pause_enable, base + NvRegTxPauseFrame);
  2789. writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
  2790. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2791. } else {
  2792. writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
  2793. writel(regmisc, base + NvRegMisc1);
  2794. }
  2795. }
  2796. }
  2797. /**
  2798. * nv_update_linkspeed: Setup the MAC according to the link partner
  2799. * @dev: Network device to be configured
  2800. *
  2801. * The function queries the PHY and checks if there is a link partner.
  2802. * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
  2803. * set to 10 MBit HD.
  2804. *
  2805. * The function returns 0 if there is no link partner and 1 if there is
  2806. * a good link partner.
  2807. */
  2808. static int nv_update_linkspeed(struct net_device *dev)
  2809. {
  2810. struct fe_priv *np = netdev_priv(dev);
  2811. u8 __iomem *base = get_hwbase(dev);
  2812. int adv = 0;
  2813. int lpa = 0;
  2814. int adv_lpa, adv_pause, lpa_pause;
  2815. int newls = np->linkspeed;
  2816. int newdup = np->duplex;
  2817. int mii_status;
  2818. int retval = 0;
  2819. u32 control_1000, status_1000, phyreg, pause_flags, txreg;
  2820. u32 txrxFlags = 0;
  2821. u32 phy_exp;
  2822. /* BMSR_LSTATUS is latched, read it twice:
  2823. * we want the current value.
  2824. */
  2825. mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  2826. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  2827. if (!(mii_status & BMSR_LSTATUS)) {
  2828. dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
  2829. dev->name);
  2830. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2831. newdup = 0;
  2832. retval = 0;
  2833. goto set_speed;
  2834. }
  2835. if (np->autoneg == 0) {
  2836. dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
  2837. dev->name, np->fixed_mode);
  2838. if (np->fixed_mode & LPA_100FULL) {
  2839. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  2840. newdup = 1;
  2841. } else if (np->fixed_mode & LPA_100HALF) {
  2842. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  2843. newdup = 0;
  2844. } else if (np->fixed_mode & LPA_10FULL) {
  2845. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2846. newdup = 1;
  2847. } else {
  2848. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2849. newdup = 0;
  2850. }
  2851. retval = 1;
  2852. goto set_speed;
  2853. }
  2854. /* check auto negotiation is complete */
  2855. if (!(mii_status & BMSR_ANEGCOMPLETE)) {
  2856. /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
  2857. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2858. newdup = 0;
  2859. retval = 0;
  2860. dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
  2861. goto set_speed;
  2862. }
  2863. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  2864. lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
  2865. dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
  2866. dev->name, adv, lpa);
  2867. retval = 1;
  2868. if (np->gigabit == PHY_GIGABIT) {
  2869. control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  2870. status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
  2871. if ((control_1000 & ADVERTISE_1000FULL) &&
  2872. (status_1000 & LPA_1000FULL)) {
  2873. dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
  2874. dev->name);
  2875. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
  2876. newdup = 1;
  2877. goto set_speed;
  2878. }
  2879. }
  2880. /* FIXME: handle parallel detection properly */
  2881. adv_lpa = lpa & adv;
  2882. if (adv_lpa & LPA_100FULL) {
  2883. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  2884. newdup = 1;
  2885. } else if (adv_lpa & LPA_100HALF) {
  2886. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  2887. newdup = 0;
  2888. } else if (adv_lpa & LPA_10FULL) {
  2889. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2890. newdup = 1;
  2891. } else if (adv_lpa & LPA_10HALF) {
  2892. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2893. newdup = 0;
  2894. } else {
  2895. dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
  2896. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2897. newdup = 0;
  2898. }
  2899. set_speed:
  2900. if (np->duplex == newdup && np->linkspeed == newls)
  2901. return retval;
  2902. dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
  2903. dev->name, np->linkspeed, np->duplex, newls, newdup);
  2904. np->duplex = newdup;
  2905. np->linkspeed = newls;
  2906. /* The transmitter and receiver must be restarted for safe update */
  2907. if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
  2908. txrxFlags |= NV_RESTART_TX;
  2909. nv_stop_tx(dev);
  2910. }
  2911. if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
  2912. txrxFlags |= NV_RESTART_RX;
  2913. nv_stop_rx(dev);
  2914. }
  2915. if (np->gigabit == PHY_GIGABIT) {
  2916. phyreg = readl(base + NvRegSlotTime);
  2917. phyreg &= ~(0x3FF00);
  2918. if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
  2919. ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
  2920. phyreg |= NVREG_SLOTTIME_10_100_FULL;
  2921. else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
  2922. phyreg |= NVREG_SLOTTIME_1000_FULL;
  2923. writel(phyreg, base + NvRegSlotTime);
  2924. }
  2925. phyreg = readl(base + NvRegPhyInterface);
  2926. phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
  2927. if (np->duplex == 0)
  2928. phyreg |= PHY_HALF;
  2929. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
  2930. phyreg |= PHY_100;
  2931. else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
  2932. phyreg |= PHY_1000;
  2933. writel(phyreg, base + NvRegPhyInterface);
  2934. phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
  2935. if (phyreg & PHY_RGMII) {
  2936. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
  2937. txreg = NVREG_TX_DEFERRAL_RGMII_1000;
  2938. } else {
  2939. if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
  2940. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
  2941. txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
  2942. else
  2943. txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
  2944. } else {
  2945. txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
  2946. }
  2947. }
  2948. } else {
  2949. if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
  2950. txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
  2951. else
  2952. txreg = NVREG_TX_DEFERRAL_DEFAULT;
  2953. }
  2954. writel(txreg, base + NvRegTxDeferral);
  2955. if (np->desc_ver == DESC_VER_1) {
  2956. txreg = NVREG_TX_WM_DESC1_DEFAULT;
  2957. } else {
  2958. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
  2959. txreg = NVREG_TX_WM_DESC2_3_1000;
  2960. else
  2961. txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
  2962. }
  2963. writel(txreg, base + NvRegTxWatermark);
  2964. writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
  2965. base + NvRegMisc1);
  2966. pci_push(base);
  2967. writel(np->linkspeed, base + NvRegLinkSpeed);
  2968. pci_push(base);
  2969. pause_flags = 0;
  2970. /* setup pause frame */
  2971. if (np->duplex != 0) {
  2972. if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
  2973. adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
  2974. lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
  2975. switch (adv_pause) {
  2976. case ADVERTISE_PAUSE_CAP:
  2977. if (lpa_pause & LPA_PAUSE_CAP) {
  2978. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2979. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  2980. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2981. }
  2982. break;
  2983. case ADVERTISE_PAUSE_ASYM:
  2984. if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
  2985. {
  2986. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2987. }
  2988. break;
  2989. case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
  2990. if (lpa_pause & LPA_PAUSE_CAP)
  2991. {
  2992. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2993. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  2994. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2995. }
  2996. if (lpa_pause == LPA_PAUSE_ASYM)
  2997. {
  2998. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2999. }
  3000. break;
  3001. }
  3002. } else {
  3003. pause_flags = np->pause_flags;
  3004. }
  3005. }
  3006. nv_update_pause(dev, pause_flags);
  3007. if (txrxFlags & NV_RESTART_TX)
  3008. nv_start_tx(dev);
  3009. if (txrxFlags & NV_RESTART_RX)
  3010. nv_start_rx(dev);
  3011. return retval;
  3012. }
  3013. static void nv_linkchange(struct net_device *dev)
  3014. {
  3015. if (nv_update_linkspeed(dev)) {
  3016. if (!netif_carrier_ok(dev)) {
  3017. netif_carrier_on(dev);
  3018. printk(KERN_INFO "%s: link up.\n", dev->name);
  3019. nv_txrx_gate(dev, false);
  3020. nv_start_rx(dev);
  3021. }
  3022. } else {
  3023. if (netif_carrier_ok(dev)) {
  3024. netif_carrier_off(dev);
  3025. printk(KERN_INFO "%s: link down.\n", dev->name);
  3026. nv_txrx_gate(dev, true);
  3027. nv_stop_rx(dev);
  3028. }
  3029. }
  3030. }
  3031. static void nv_link_irq(struct net_device *dev)
  3032. {
  3033. u8 __iomem *base = get_hwbase(dev);
  3034. u32 miistat;
  3035. miistat = readl(base + NvRegMIIStatus);
  3036. writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
  3037. dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
  3038. if (miistat & (NVREG_MIISTAT_LINKCHANGE))
  3039. nv_linkchange(dev);
  3040. dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
  3041. }
  3042. static void nv_msi_workaround(struct fe_priv *np)
  3043. {
  3044. /* Need to toggle the msi irq mask within the ethernet device,
  3045. * otherwise, future interrupts will not be detected.
  3046. */
  3047. if (np->msi_flags & NV_MSI_ENABLED) {
  3048. u8 __iomem *base = np->base;
  3049. writel(0, base + NvRegMSIIrqMask);
  3050. writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
  3051. }
  3052. }
  3053. static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
  3054. {
  3055. struct fe_priv *np = netdev_priv(dev);
  3056. if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
  3057. if (total_work > NV_DYNAMIC_THRESHOLD) {
  3058. /* transition to poll based interrupts */
  3059. np->quiet_count = 0;
  3060. if (np->irqmask != NVREG_IRQMASK_CPU) {
  3061. np->irqmask = NVREG_IRQMASK_CPU;
  3062. return 1;
  3063. }
  3064. } else {
  3065. if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
  3066. np->quiet_count++;
  3067. } else {
  3068. /* reached a period of low activity, switch
  3069. to per tx/rx packet interrupts */
  3070. if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
  3071. np->irqmask = NVREG_IRQMASK_THROUGHPUT;
  3072. return 1;
  3073. }
  3074. }
  3075. }
  3076. }
  3077. return 0;
  3078. }
  3079. static irqreturn_t nv_nic_irq(int foo, void *data)
  3080. {
  3081. struct net_device *dev = (struct net_device *) data;
  3082. struct fe_priv *np = netdev_priv(dev);
  3083. u8 __iomem *base = get_hwbase(dev);
  3084. dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
  3085. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  3086. np->events = readl(base + NvRegIrqStatus);
  3087. writel(np->events, base + NvRegIrqStatus);
  3088. } else {
  3089. np->events = readl(base + NvRegMSIXIrqStatus);
  3090. writel(np->events, base + NvRegMSIXIrqStatus);
  3091. }
  3092. dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
  3093. if (!(np->events & np->irqmask))
  3094. return IRQ_NONE;
  3095. nv_msi_workaround(np);
  3096. if (napi_schedule_prep(&np->napi)) {
  3097. /*
  3098. * Disable further irq's (msix not enabled with napi)
  3099. */
  3100. writel(0, base + NvRegIrqMask);
  3101. __napi_schedule(&np->napi);
  3102. }
  3103. dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
  3104. return IRQ_HANDLED;
  3105. }
  3106. /**
  3107. * All _optimized functions are used to help increase performance
  3108. * (reduce CPU and increase throughput). They use descripter version 3,
  3109. * compiler directives, and reduce memory accesses.
  3110. */
  3111. static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
  3112. {
  3113. struct net_device *dev = (struct net_device *) data;
  3114. struct fe_priv *np = netdev_priv(dev);
  3115. u8 __iomem *base = get_hwbase(dev);
  3116. dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
  3117. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  3118. np->events = readl(base + NvRegIrqStatus);
  3119. writel(np->events, base + NvRegIrqStatus);
  3120. } else {
  3121. np->events = readl(base + NvRegMSIXIrqStatus);
  3122. writel(np->events, base + NvRegMSIXIrqStatus);
  3123. }
  3124. dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
  3125. if (!(np->events & np->irqmask))
  3126. return IRQ_NONE;
  3127. nv_msi_workaround(np);
  3128. if (napi_schedule_prep(&np->napi)) {
  3129. /*
  3130. * Disable further irq's (msix not enabled with napi)
  3131. */
  3132. writel(0, base + NvRegIrqMask);
  3133. __napi_schedule(&np->napi);
  3134. }
  3135. dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
  3136. return IRQ_HANDLED;
  3137. }
  3138. static irqreturn_t nv_nic_irq_tx(int foo, void *data)
  3139. {
  3140. struct net_device *dev = (struct net_device *) data;
  3141. struct fe_priv *np = netdev_priv(dev);
  3142. u8 __iomem *base = get_hwbase(dev);
  3143. u32 events;
  3144. int i;
  3145. unsigned long flags;
  3146. dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
  3147. for (i=0; ; i++) {
  3148. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
  3149. writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
  3150. dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
  3151. if (!(events & np->irqmask))
  3152. break;
  3153. spin_lock_irqsave(&np->lock, flags);
  3154. nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
  3155. spin_unlock_irqrestore(&np->lock, flags);
  3156. if (unlikely(i > max_interrupt_work)) {
  3157. spin_lock_irqsave(&np->lock, flags);
  3158. /* disable interrupts on the nic */
  3159. writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
  3160. pci_push(base);
  3161. if (!np->in_shutdown) {
  3162. np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
  3163. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3164. }
  3165. spin_unlock_irqrestore(&np->lock, flags);
  3166. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
  3167. break;
  3168. }
  3169. }
  3170. dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
  3171. return IRQ_RETVAL(i);
  3172. }
  3173. static int nv_napi_poll(struct napi_struct *napi, int budget)
  3174. {
  3175. struct fe_priv *np = container_of(napi, struct fe_priv, napi);
  3176. struct net_device *dev = np->dev;
  3177. u8 __iomem *base = get_hwbase(dev);
  3178. unsigned long flags;
  3179. int retcode;
  3180. int rx_count, tx_work=0, rx_work=0;
  3181. do {
  3182. if (!nv_optimized(np)) {
  3183. spin_lock_irqsave(&np->lock, flags);
  3184. tx_work += nv_tx_done(dev, np->tx_ring_size);
  3185. spin_unlock_irqrestore(&np->lock, flags);
  3186. rx_count = nv_rx_process(dev, budget);
  3187. retcode = nv_alloc_rx(dev);
  3188. } else {
  3189. spin_lock_irqsave(&np->lock, flags);
  3190. tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
  3191. spin_unlock_irqrestore(&np->lock, flags);
  3192. rx_count = nv_rx_process_optimized(dev, budget);
  3193. retcode = nv_alloc_rx_optimized(dev);
  3194. }
  3195. } while (retcode == 0 &&
  3196. rx_count > 0 && (rx_work += rx_count) < budget);
  3197. if (retcode) {
  3198. spin_lock_irqsave(&np->lock, flags);
  3199. if (!np->in_shutdown)
  3200. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3201. spin_unlock_irqrestore(&np->lock, flags);
  3202. }
  3203. nv_change_interrupt_mode(dev, tx_work + rx_work);
  3204. if (unlikely(np->events & NVREG_IRQ_LINK)) {
  3205. spin_lock_irqsave(&np->lock, flags);
  3206. nv_link_irq(dev);
  3207. spin_unlock_irqrestore(&np->lock, flags);
  3208. }
  3209. if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
  3210. spin_lock_irqsave(&np->lock, flags);
  3211. nv_linkchange(dev);
  3212. spin_unlock_irqrestore(&np->lock, flags);
  3213. np->link_timeout = jiffies + LINK_TIMEOUT;
  3214. }
  3215. if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
  3216. spin_lock_irqsave(&np->lock, flags);
  3217. if (!np->in_shutdown) {
  3218. np->nic_poll_irq = np->irqmask;
  3219. np->recover_error = 1;
  3220. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3221. }
  3222. spin_unlock_irqrestore(&np->lock, flags);
  3223. napi_complete(napi);
  3224. return rx_work;
  3225. }
  3226. if (rx_work < budget) {
  3227. /* re-enable interrupts
  3228. (msix not enabled in napi) */
  3229. napi_complete(napi);
  3230. writel(np->irqmask, base + NvRegIrqMask);
  3231. }
  3232. return rx_work;
  3233. }
  3234. static irqreturn_t nv_nic_irq_rx(int foo, void *data)
  3235. {
  3236. struct net_device *dev = (struct net_device *) data;
  3237. struct fe_priv *np = netdev_priv(dev);
  3238. u8 __iomem *base = get_hwbase(dev);
  3239. u32 events;
  3240. int i;
  3241. unsigned long flags;
  3242. dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
  3243. for (i=0; ; i++) {
  3244. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
  3245. writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
  3246. dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
  3247. if (!(events & np->irqmask))
  3248. break;
  3249. if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
  3250. if (unlikely(nv_alloc_rx_optimized(dev))) {
  3251. spin_lock_irqsave(&np->lock, flags);
  3252. if (!np->in_shutdown)
  3253. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3254. spin_unlock_irqrestore(&np->lock, flags);
  3255. }
  3256. }
  3257. if (unlikely(i > max_interrupt_work)) {
  3258. spin_lock_irqsave(&np->lock, flags);
  3259. /* disable interrupts on the nic */
  3260. writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
  3261. pci_push(base);
  3262. if (!np->in_shutdown) {
  3263. np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
  3264. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3265. }
  3266. spin_unlock_irqrestore(&np->lock, flags);
  3267. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
  3268. break;
  3269. }
  3270. }
  3271. dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
  3272. return IRQ_RETVAL(i);
  3273. }
  3274. static irqreturn_t nv_nic_irq_other(int foo, void *data)
  3275. {
  3276. struct net_device *dev = (struct net_device *) data;
  3277. struct fe_priv *np = netdev_priv(dev);
  3278. u8 __iomem *base = get_hwbase(dev);
  3279. u32 events;
  3280. int i;
  3281. unsigned long flags;
  3282. dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
  3283. for (i=0; ; i++) {
  3284. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
  3285. writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
  3286. dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
  3287. if (!(events & np->irqmask))
  3288. break;
  3289. /* check tx in case we reached max loop limit in tx isr */
  3290. spin_lock_irqsave(&np->lock, flags);
  3291. nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
  3292. spin_unlock_irqrestore(&np->lock, flags);
  3293. if (events & NVREG_IRQ_LINK) {
  3294. spin_lock_irqsave(&np->lock, flags);
  3295. nv_link_irq(dev);
  3296. spin_unlock_irqrestore(&np->lock, flags);
  3297. }
  3298. if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
  3299. spin_lock_irqsave(&np->lock, flags);
  3300. nv_linkchange(dev);
  3301. spin_unlock_irqrestore(&np->lock, flags);
  3302. np->link_timeout = jiffies + LINK_TIMEOUT;
  3303. }
  3304. if (events & NVREG_IRQ_RECOVER_ERROR) {
  3305. spin_lock_irq(&np->lock);
  3306. /* disable interrupts on the nic */
  3307. writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
  3308. pci_push(base);
  3309. if (!np->in_shutdown) {
  3310. np->nic_poll_irq |= NVREG_IRQ_OTHER;
  3311. np->recover_error = 1;
  3312. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3313. }
  3314. spin_unlock_irq(&np->lock);
  3315. break;
  3316. }
  3317. if (unlikely(i > max_interrupt_work)) {
  3318. spin_lock_irqsave(&np->lock, flags);
  3319. /* disable interrupts on the nic */
  3320. writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
  3321. pci_push(base);
  3322. if (!np->in_shutdown) {
  3323. np->nic_poll_irq |= NVREG_IRQ_OTHER;
  3324. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3325. }
  3326. spin_unlock_irqrestore(&np->lock, flags);
  3327. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
  3328. break;
  3329. }
  3330. }
  3331. dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
  3332. return IRQ_RETVAL(i);
  3333. }
  3334. static irqreturn_t nv_nic_irq_test(int foo, void *data)
  3335. {
  3336. struct net_device *dev = (struct net_device *) data;
  3337. struct fe_priv *np = netdev_priv(dev);
  3338. u8 __iomem *base = get_hwbase(dev);
  3339. u32 events;
  3340. dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
  3341. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  3342. events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
  3343. writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
  3344. } else {
  3345. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
  3346. writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
  3347. }
  3348. pci_push(base);
  3349. dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
  3350. if (!(events & NVREG_IRQ_TIMER))
  3351. return IRQ_RETVAL(0);
  3352. nv_msi_workaround(np);
  3353. spin_lock(&np->lock);
  3354. np->intr_test = 1;
  3355. spin_unlock(&np->lock);
  3356. dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
  3357. return IRQ_RETVAL(1);
  3358. }
  3359. static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
  3360. {
  3361. u8 __iomem *base = get_hwbase(dev);
  3362. int i;
  3363. u32 msixmap = 0;
  3364. /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
  3365. * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
  3366. * the remaining 8 interrupts.
  3367. */
  3368. for (i = 0; i < 8; i++) {
  3369. if ((irqmask >> i) & 0x1) {
  3370. msixmap |= vector << (i << 2);
  3371. }
  3372. }
  3373. writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
  3374. msixmap = 0;
  3375. for (i = 0; i < 8; i++) {
  3376. if ((irqmask >> (i + 8)) & 0x1) {
  3377. msixmap |= vector << (i << 2);
  3378. }
  3379. }
  3380. writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
  3381. }
  3382. static int nv_request_irq(struct net_device *dev, int intr_test)
  3383. {
  3384. struct fe_priv *np = get_nvpriv(dev);
  3385. u8 __iomem *base = get_hwbase(dev);
  3386. int ret = 1;
  3387. int i;
  3388. irqreturn_t (*handler)(int foo, void *data);
  3389. if (intr_test) {
  3390. handler = nv_nic_irq_test;
  3391. } else {
  3392. if (nv_optimized(np))
  3393. handler = nv_nic_irq_optimized;
  3394. else
  3395. handler = nv_nic_irq;
  3396. }
  3397. if (np->msi_flags & NV_MSI_X_CAPABLE) {
  3398. for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
  3399. np->msi_x_entry[i].entry = i;
  3400. }
  3401. if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
  3402. np->msi_flags |= NV_MSI_X_ENABLED;
  3403. if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
  3404. /* Request irq for rx handling */
  3405. sprintf(np->name_rx, "%s-rx", dev->name);
  3406. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
  3407. nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
  3408. printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
  3409. pci_disable_msix(np->pci_dev);
  3410. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3411. goto out_err;
  3412. }
  3413. /* Request irq for tx handling */
  3414. sprintf(np->name_tx, "%s-tx", dev->name);
  3415. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
  3416. nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
  3417. printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
  3418. pci_disable_msix(np->pci_dev);
  3419. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3420. goto out_free_rx;
  3421. }
  3422. /* Request irq for link and timer handling */
  3423. sprintf(np->name_other, "%s-other", dev->name);
  3424. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
  3425. nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
  3426. printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
  3427. pci_disable_msix(np->pci_dev);
  3428. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3429. goto out_free_tx;
  3430. }
  3431. /* map interrupts to their respective vector */
  3432. writel(0, base + NvRegMSIXMap0);
  3433. writel(0, base + NvRegMSIXMap1);
  3434. set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
  3435. set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
  3436. set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
  3437. } else {
  3438. /* Request irq for all interrupts */
  3439. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
  3440. printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
  3441. pci_disable_msix(np->pci_dev);
  3442. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3443. goto out_err;
  3444. }
  3445. /* map interrupts to vector 0 */
  3446. writel(0, base + NvRegMSIXMap0);
  3447. writel(0, base + NvRegMSIXMap1);
  3448. }
  3449. }
  3450. }
  3451. if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
  3452. if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
  3453. np->msi_flags |= NV_MSI_ENABLED;
  3454. dev->irq = np->pci_dev->irq;
  3455. if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
  3456. printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
  3457. pci_disable_msi(np->pci_dev);
  3458. np->msi_flags &= ~NV_MSI_ENABLED;
  3459. dev->irq = np->pci_dev->irq;
  3460. goto out_err;
  3461. }
  3462. /* map interrupts to vector 0 */
  3463. writel(0, base + NvRegMSIMap0);
  3464. writel(0, base + NvRegMSIMap1);
  3465. /* enable msi vector 0 */
  3466. writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
  3467. }
  3468. }
  3469. if (ret != 0) {
  3470. if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
  3471. goto out_err;
  3472. }
  3473. return 0;
  3474. out_free_tx:
  3475. free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
  3476. out_free_rx:
  3477. free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
  3478. out_err:
  3479. return 1;
  3480. }
  3481. static void nv_free_irq(struct net_device *dev)
  3482. {
  3483. struct fe_priv *np = get_nvpriv(dev);
  3484. int i;
  3485. if (np->msi_flags & NV_MSI_X_ENABLED) {
  3486. for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
  3487. free_irq(np->msi_x_entry[i].vector, dev);
  3488. }
  3489. pci_disable_msix(np->pci_dev);
  3490. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3491. } else {
  3492. free_irq(np->pci_dev->irq, dev);
  3493. if (np->msi_flags & NV_MSI_ENABLED) {
  3494. pci_disable_msi(np->pci_dev);
  3495. np->msi_flags &= ~NV_MSI_ENABLED;
  3496. }
  3497. }
  3498. }
  3499. static void nv_do_nic_poll(unsigned long data)
  3500. {
  3501. struct net_device *dev = (struct net_device *) data;
  3502. struct fe_priv *np = netdev_priv(dev);
  3503. u8 __iomem *base = get_hwbase(dev);
  3504. u32 mask = 0;
  3505. /*
  3506. * First disable irq(s) and then
  3507. * reenable interrupts on the nic, we have to do this before calling
  3508. * nv_nic_irq because that may decide to do otherwise
  3509. */
  3510. if (!using_multi_irqs(dev)) {
  3511. if (np->msi_flags & NV_MSI_X_ENABLED)
  3512. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  3513. else
  3514. disable_irq_lockdep(np->pci_dev->irq);
  3515. mask = np->irqmask;
  3516. } else {
  3517. if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
  3518. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  3519. mask |= NVREG_IRQ_RX_ALL;
  3520. }
  3521. if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
  3522. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  3523. mask |= NVREG_IRQ_TX_ALL;
  3524. }
  3525. if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
  3526. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  3527. mask |= NVREG_IRQ_OTHER;
  3528. }
  3529. }
  3530. /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
  3531. if (np->recover_error) {
  3532. np->recover_error = 0;
  3533. printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name);
  3534. if (netif_running(dev)) {
  3535. netif_tx_lock_bh(dev);
  3536. netif_addr_lock(dev);
  3537. spin_lock(&np->lock);
  3538. /* stop engines */
  3539. nv_stop_rxtx(dev);
  3540. if (np->driver_data & DEV_HAS_POWER_CNTRL)
  3541. nv_mac_reset(dev);
  3542. nv_txrx_reset(dev);
  3543. /* drain rx queue */
  3544. nv_drain_rxtx(dev);
  3545. /* reinit driver view of the rx queue */
  3546. set_bufsize(dev);
  3547. if (nv_init_ring(dev)) {
  3548. if (!np->in_shutdown)
  3549. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3550. }
  3551. /* reinit nic view of the rx queue */
  3552. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  3553. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  3554. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  3555. base + NvRegRingSizes);
  3556. pci_push(base);
  3557. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  3558. pci_push(base);
  3559. /* clear interrupts */
  3560. if (!(np->msi_flags & NV_MSI_X_ENABLED))
  3561. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  3562. else
  3563. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  3564. /* restart rx engine */
  3565. nv_start_rxtx(dev);
  3566. spin_unlock(&np->lock);
  3567. netif_addr_unlock(dev);
  3568. netif_tx_unlock_bh(dev);
  3569. }
  3570. }
  3571. writel(mask, base + NvRegIrqMask);
  3572. pci_push(base);
  3573. if (!using_multi_irqs(dev)) {
  3574. np->nic_poll_irq = 0;
  3575. if (nv_optimized(np))
  3576. nv_nic_irq_optimized(0, dev);
  3577. else
  3578. nv_nic_irq(0, dev);
  3579. if (np->msi_flags & NV_MSI_X_ENABLED)
  3580. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  3581. else
  3582. enable_irq_lockdep(np->pci_dev->irq);
  3583. } else {
  3584. if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
  3585. np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
  3586. nv_nic_irq_rx(0, dev);
  3587. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  3588. }
  3589. if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
  3590. np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
  3591. nv_nic_irq_tx(0, dev);
  3592. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  3593. }
  3594. if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
  3595. np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
  3596. nv_nic_irq_other(0, dev);
  3597. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  3598. }
  3599. }
  3600. }
  3601. #ifdef CONFIG_NET_POLL_CONTROLLER
  3602. static void nv_poll_controller(struct net_device *dev)
  3603. {
  3604. nv_do_nic_poll((unsigned long) dev);
  3605. }
  3606. #endif
  3607. static void nv_do_stats_poll(unsigned long data)
  3608. {
  3609. struct net_device *dev = (struct net_device *) data;
  3610. struct fe_priv *np = netdev_priv(dev);
  3611. nv_get_hw_stats(dev);
  3612. if (!np->in_shutdown)
  3613. mod_timer(&np->stats_poll,
  3614. round_jiffies(jiffies + STATS_INTERVAL));
  3615. }
  3616. static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  3617. {
  3618. struct fe_priv *np = netdev_priv(dev);
  3619. strcpy(info->driver, DRV_NAME);
  3620. strcpy(info->version, FORCEDETH_VERSION);
  3621. strcpy(info->bus_info, pci_name(np->pci_dev));
  3622. }
  3623. static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
  3624. {
  3625. struct fe_priv *np = netdev_priv(dev);
  3626. wolinfo->supported = WAKE_MAGIC;
  3627. spin_lock_irq(&np->lock);
  3628. if (np->wolenabled)
  3629. wolinfo->wolopts = WAKE_MAGIC;
  3630. spin_unlock_irq(&np->lock);
  3631. }
  3632. static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
  3633. {
  3634. struct fe_priv *np = netdev_priv(dev);
  3635. u8 __iomem *base = get_hwbase(dev);
  3636. u32 flags = 0;
  3637. if (wolinfo->wolopts == 0) {
  3638. np->wolenabled = 0;
  3639. } else if (wolinfo->wolopts & WAKE_MAGIC) {
  3640. np->wolenabled = 1;
  3641. flags = NVREG_WAKEUPFLAGS_ENABLE;
  3642. }
  3643. if (netif_running(dev)) {
  3644. spin_lock_irq(&np->lock);
  3645. writel(flags, base + NvRegWakeUpFlags);
  3646. spin_unlock_irq(&np->lock);
  3647. }
  3648. return 0;
  3649. }
  3650. static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  3651. {
  3652. struct fe_priv *np = netdev_priv(dev);
  3653. int adv;
  3654. spin_lock_irq(&np->lock);
  3655. ecmd->port = PORT_MII;
  3656. if (!netif_running(dev)) {
  3657. /* We do not track link speed / duplex setting if the
  3658. * interface is disabled. Force a link check */
  3659. if (nv_update_linkspeed(dev)) {
  3660. if (!netif_carrier_ok(dev))
  3661. netif_carrier_on(dev);
  3662. } else {
  3663. if (netif_carrier_ok(dev))
  3664. netif_carrier_off(dev);
  3665. }
  3666. }
  3667. if (netif_carrier_ok(dev)) {
  3668. switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
  3669. case NVREG_LINKSPEED_10:
  3670. ecmd->speed = SPEED_10;
  3671. break;
  3672. case NVREG_LINKSPEED_100:
  3673. ecmd->speed = SPEED_100;
  3674. break;
  3675. case NVREG_LINKSPEED_1000:
  3676. ecmd->speed = SPEED_1000;
  3677. break;
  3678. }
  3679. ecmd->duplex = DUPLEX_HALF;
  3680. if (np->duplex)
  3681. ecmd->duplex = DUPLEX_FULL;
  3682. } else {
  3683. ecmd->speed = -1;
  3684. ecmd->duplex = -1;
  3685. }
  3686. ecmd->autoneg = np->autoneg;
  3687. ecmd->advertising = ADVERTISED_MII;
  3688. if (np->autoneg) {
  3689. ecmd->advertising |= ADVERTISED_Autoneg;
  3690. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  3691. if (adv & ADVERTISE_10HALF)
  3692. ecmd->advertising |= ADVERTISED_10baseT_Half;
  3693. if (adv & ADVERTISE_10FULL)
  3694. ecmd->advertising |= ADVERTISED_10baseT_Full;
  3695. if (adv & ADVERTISE_100HALF)
  3696. ecmd->advertising |= ADVERTISED_100baseT_Half;
  3697. if (adv & ADVERTISE_100FULL)
  3698. ecmd->advertising |= ADVERTISED_100baseT_Full;
  3699. if (np->gigabit == PHY_GIGABIT) {
  3700. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  3701. if (adv & ADVERTISE_1000FULL)
  3702. ecmd->advertising |= ADVERTISED_1000baseT_Full;
  3703. }
  3704. }
  3705. ecmd->supported = (SUPPORTED_Autoneg |
  3706. SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
  3707. SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
  3708. SUPPORTED_MII);
  3709. if (np->gigabit == PHY_GIGABIT)
  3710. ecmd->supported |= SUPPORTED_1000baseT_Full;
  3711. ecmd->phy_address = np->phyaddr;
  3712. ecmd->transceiver = XCVR_EXTERNAL;
  3713. /* ignore maxtxpkt, maxrxpkt for now */
  3714. spin_unlock_irq(&np->lock);
  3715. return 0;
  3716. }
  3717. static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  3718. {
  3719. struct fe_priv *np = netdev_priv(dev);
  3720. if (ecmd->port != PORT_MII)
  3721. return -EINVAL;
  3722. if (ecmd->transceiver != XCVR_EXTERNAL)
  3723. return -EINVAL;
  3724. if (ecmd->phy_address != np->phyaddr) {
  3725. /* TODO: support switching between multiple phys. Should be
  3726. * trivial, but not enabled due to lack of test hardware. */
  3727. return -EINVAL;
  3728. }
  3729. if (ecmd->autoneg == AUTONEG_ENABLE) {
  3730. u32 mask;
  3731. mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
  3732. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
  3733. if (np->gigabit == PHY_GIGABIT)
  3734. mask |= ADVERTISED_1000baseT_Full;
  3735. if ((ecmd->advertising & mask) == 0)
  3736. return -EINVAL;
  3737. } else if (ecmd->autoneg == AUTONEG_DISABLE) {
  3738. /* Note: autonegotiation disable, speed 1000 intentionally
  3739. * forbidden - noone should need that. */
  3740. if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
  3741. return -EINVAL;
  3742. if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
  3743. return -EINVAL;
  3744. } else {
  3745. return -EINVAL;
  3746. }
  3747. netif_carrier_off(dev);
  3748. if (netif_running(dev)) {
  3749. unsigned long flags;
  3750. nv_disable_irq(dev);
  3751. netif_tx_lock_bh(dev);
  3752. netif_addr_lock(dev);
  3753. /* with plain spinlock lockdep complains */
  3754. spin_lock_irqsave(&np->lock, flags);
  3755. /* stop engines */
  3756. /* FIXME:
  3757. * this can take some time, and interrupts are disabled
  3758. * due to spin_lock_irqsave, but let's hope no daemon
  3759. * is going to change the settings very often...
  3760. * Worst case:
  3761. * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
  3762. * + some minor delays, which is up to a second approximately
  3763. */
  3764. nv_stop_rxtx(dev);
  3765. spin_unlock_irqrestore(&np->lock, flags);
  3766. netif_addr_unlock(dev);
  3767. netif_tx_unlock_bh(dev);
  3768. }
  3769. if (ecmd->autoneg == AUTONEG_ENABLE) {
  3770. int adv, bmcr;
  3771. np->autoneg = 1;
  3772. /* advertise only what has been requested */
  3773. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  3774. adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  3775. if (ecmd->advertising & ADVERTISED_10baseT_Half)
  3776. adv |= ADVERTISE_10HALF;
  3777. if (ecmd->advertising & ADVERTISED_10baseT_Full)
  3778. adv |= ADVERTISE_10FULL;
  3779. if (ecmd->advertising & ADVERTISED_100baseT_Half)
  3780. adv |= ADVERTISE_100HALF;
  3781. if (ecmd->advertising & ADVERTISED_100baseT_Full)
  3782. adv |= ADVERTISE_100FULL;
  3783. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
  3784. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  3785. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  3786. adv |= ADVERTISE_PAUSE_ASYM;
  3787. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  3788. if (np->gigabit == PHY_GIGABIT) {
  3789. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  3790. adv &= ~ADVERTISE_1000FULL;
  3791. if (ecmd->advertising & ADVERTISED_1000baseT_Full)
  3792. adv |= ADVERTISE_1000FULL;
  3793. mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
  3794. }
  3795. if (netif_running(dev))
  3796. printk(KERN_INFO "%s: link down.\n", dev->name);
  3797. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  3798. if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
  3799. bmcr |= BMCR_ANENABLE;
  3800. /* reset the phy in order for settings to stick,
  3801. * and cause autoneg to start */
  3802. if (phy_reset(dev, bmcr)) {
  3803. printk(KERN_INFO "%s: phy reset failed\n", dev->name);
  3804. return -EINVAL;
  3805. }
  3806. } else {
  3807. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  3808. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  3809. }
  3810. } else {
  3811. int adv, bmcr;
  3812. np->autoneg = 0;
  3813. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  3814. adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  3815. if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
  3816. adv |= ADVERTISE_10HALF;
  3817. if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
  3818. adv |= ADVERTISE_10FULL;
  3819. if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
  3820. adv |= ADVERTISE_100HALF;
  3821. if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
  3822. adv |= ADVERTISE_100FULL;
  3823. np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
  3824. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
  3825. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  3826. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  3827. }
  3828. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
  3829. adv |= ADVERTISE_PAUSE_ASYM;
  3830. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  3831. }
  3832. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  3833. np->fixed_mode = adv;
  3834. if (np->gigabit == PHY_GIGABIT) {
  3835. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  3836. adv &= ~ADVERTISE_1000FULL;
  3837. mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
  3838. }
  3839. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  3840. bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
  3841. if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
  3842. bmcr |= BMCR_FULLDPLX;
  3843. if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
  3844. bmcr |= BMCR_SPEED100;
  3845. if (np->phy_oui == PHY_OUI_MARVELL) {
  3846. /* reset the phy in order for forced mode settings to stick */
  3847. if (phy_reset(dev, bmcr)) {
  3848. printk(KERN_INFO "%s: phy reset failed\n", dev->name);
  3849. return -EINVAL;
  3850. }
  3851. } else {
  3852. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  3853. if (netif_running(dev)) {
  3854. /* Wait a bit and then reconfigure the nic. */
  3855. udelay(10);
  3856. nv_linkchange(dev);
  3857. }
  3858. }
  3859. }
  3860. if (netif_running(dev)) {
  3861. nv_start_rxtx(dev);
  3862. nv_enable_irq(dev);
  3863. }
  3864. return 0;
  3865. }
  3866. #define FORCEDETH_REGS_VER 1
  3867. static int nv_get_regs_len(struct net_device *dev)
  3868. {
  3869. struct fe_priv *np = netdev_priv(dev);
  3870. return np->register_size;
  3871. }
  3872. static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
  3873. {
  3874. struct fe_priv *np = netdev_priv(dev);
  3875. u8 __iomem *base = get_hwbase(dev);
  3876. u32 *rbuf = buf;
  3877. int i;
  3878. regs->version = FORCEDETH_REGS_VER;
  3879. spin_lock_irq(&np->lock);
  3880. for (i = 0;i <= np->register_size/sizeof(u32); i++)
  3881. rbuf[i] = readl(base + i*sizeof(u32));
  3882. spin_unlock_irq(&np->lock);
  3883. }
  3884. static int nv_nway_reset(struct net_device *dev)
  3885. {
  3886. struct fe_priv *np = netdev_priv(dev);
  3887. int ret;
  3888. if (np->autoneg) {
  3889. int bmcr;
  3890. netif_carrier_off(dev);
  3891. if (netif_running(dev)) {
  3892. nv_disable_irq(dev);
  3893. netif_tx_lock_bh(dev);
  3894. netif_addr_lock(dev);
  3895. spin_lock(&np->lock);
  3896. /* stop engines */
  3897. nv_stop_rxtx(dev);
  3898. spin_unlock(&np->lock);
  3899. netif_addr_unlock(dev);
  3900. netif_tx_unlock_bh(dev);
  3901. printk(KERN_INFO "%s: link down.\n", dev->name);
  3902. }
  3903. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  3904. if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
  3905. bmcr |= BMCR_ANENABLE;
  3906. /* reset the phy in order for settings to stick*/
  3907. if (phy_reset(dev, bmcr)) {
  3908. printk(KERN_INFO "%s: phy reset failed\n", dev->name);
  3909. return -EINVAL;
  3910. }
  3911. } else {
  3912. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  3913. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  3914. }
  3915. if (netif_running(dev)) {
  3916. nv_start_rxtx(dev);
  3917. nv_enable_irq(dev);
  3918. }
  3919. ret = 0;
  3920. } else {
  3921. ret = -EINVAL;
  3922. }
  3923. return ret;
  3924. }
  3925. static int nv_set_tso(struct net_device *dev, u32 value)
  3926. {
  3927. struct fe_priv *np = netdev_priv(dev);
  3928. if ((np->driver_data & DEV_HAS_CHECKSUM))
  3929. return ethtool_op_set_tso(dev, value);
  3930. else
  3931. return -EOPNOTSUPP;
  3932. }
  3933. static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  3934. {
  3935. struct fe_priv *np = netdev_priv(dev);
  3936. ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
  3937. ring->rx_mini_max_pending = 0;
  3938. ring->rx_jumbo_max_pending = 0;
  3939. ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
  3940. ring->rx_pending = np->rx_ring_size;
  3941. ring->rx_mini_pending = 0;
  3942. ring->rx_jumbo_pending = 0;
  3943. ring->tx_pending = np->tx_ring_size;
  3944. }
  3945. static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  3946. {
  3947. struct fe_priv *np = netdev_priv(dev);
  3948. u8 __iomem *base = get_hwbase(dev);
  3949. u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
  3950. dma_addr_t ring_addr;
  3951. if (ring->rx_pending < RX_RING_MIN ||
  3952. ring->tx_pending < TX_RING_MIN ||
  3953. ring->rx_mini_pending != 0 ||
  3954. ring->rx_jumbo_pending != 0 ||
  3955. (np->desc_ver == DESC_VER_1 &&
  3956. (ring->rx_pending > RING_MAX_DESC_VER_1 ||
  3957. ring->tx_pending > RING_MAX_DESC_VER_1)) ||
  3958. (np->desc_ver != DESC_VER_1 &&
  3959. (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
  3960. ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
  3961. return -EINVAL;
  3962. }
  3963. /* allocate new rings */
  3964. if (!nv_optimized(np)) {
  3965. rxtx_ring = pci_alloc_consistent(np->pci_dev,
  3966. sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
  3967. &ring_addr);
  3968. } else {
  3969. rxtx_ring = pci_alloc_consistent(np->pci_dev,
  3970. sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
  3971. &ring_addr);
  3972. }
  3973. rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
  3974. tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
  3975. if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
  3976. /* fall back to old rings */
  3977. if (!nv_optimized(np)) {
  3978. if (rxtx_ring)
  3979. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
  3980. rxtx_ring, ring_addr);
  3981. } else {
  3982. if (rxtx_ring)
  3983. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
  3984. rxtx_ring, ring_addr);
  3985. }
  3986. if (rx_skbuff)
  3987. kfree(rx_skbuff);
  3988. if (tx_skbuff)
  3989. kfree(tx_skbuff);
  3990. goto exit;
  3991. }
  3992. if (netif_running(dev)) {
  3993. nv_disable_irq(dev);
  3994. nv_napi_disable(dev);
  3995. netif_tx_lock_bh(dev);
  3996. netif_addr_lock(dev);
  3997. spin_lock(&np->lock);
  3998. /* stop engines */
  3999. nv_stop_rxtx(dev);
  4000. nv_txrx_reset(dev);
  4001. /* drain queues */
  4002. nv_drain_rxtx(dev);
  4003. /* delete queues */
  4004. free_rings(dev);
  4005. }
  4006. /* set new values */
  4007. np->rx_ring_size = ring->rx_pending;
  4008. np->tx_ring_size = ring->tx_pending;
  4009. if (!nv_optimized(np)) {
  4010. np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
  4011. np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
  4012. } else {
  4013. np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
  4014. np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
  4015. }
  4016. np->rx_skb = (struct nv_skb_map*)rx_skbuff;
  4017. np->tx_skb = (struct nv_skb_map*)tx_skbuff;
  4018. np->ring_addr = ring_addr;
  4019. memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
  4020. memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
  4021. if (netif_running(dev)) {
  4022. /* reinit driver view of the queues */
  4023. set_bufsize(dev);
  4024. if (nv_init_ring(dev)) {
  4025. if (!np->in_shutdown)
  4026. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  4027. }
  4028. /* reinit nic view of the queues */
  4029. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  4030. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  4031. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  4032. base + NvRegRingSizes);
  4033. pci_push(base);
  4034. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  4035. pci_push(base);
  4036. /* restart engines */
  4037. nv_start_rxtx(dev);
  4038. spin_unlock(&np->lock);
  4039. netif_addr_unlock(dev);
  4040. netif_tx_unlock_bh(dev);
  4041. nv_napi_enable(dev);
  4042. nv_enable_irq(dev);
  4043. }
  4044. return 0;
  4045. exit:
  4046. return -ENOMEM;
  4047. }
  4048. static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
  4049. {
  4050. struct fe_priv *np = netdev_priv(dev);
  4051. pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
  4052. pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
  4053. pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
  4054. }
  4055. static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
  4056. {
  4057. struct fe_priv *np = netdev_priv(dev);
  4058. int adv, bmcr;
  4059. if ((!np->autoneg && np->duplex == 0) ||
  4060. (np->autoneg && !pause->autoneg && np->duplex == 0)) {
  4061. printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
  4062. dev->name);
  4063. return -EINVAL;
  4064. }
  4065. if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
  4066. printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
  4067. return -EINVAL;
  4068. }
  4069. netif_carrier_off(dev);
  4070. if (netif_running(dev)) {
  4071. nv_disable_irq(dev);
  4072. netif_tx_lock_bh(dev);
  4073. netif_addr_lock(dev);
  4074. spin_lock(&np->lock);
  4075. /* stop engines */
  4076. nv_stop_rxtx(dev);
  4077. spin_unlock(&np->lock);
  4078. netif_addr_unlock(dev);
  4079. netif_tx_unlock_bh(dev);
  4080. }
  4081. np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
  4082. if (pause->rx_pause)
  4083. np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
  4084. if (pause->tx_pause)
  4085. np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
  4086. if (np->autoneg && pause->autoneg) {
  4087. np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
  4088. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  4089. adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  4090. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
  4091. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  4092. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  4093. adv |= ADVERTISE_PAUSE_ASYM;
  4094. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  4095. if (netif_running(dev))
  4096. printk(KERN_INFO "%s: link down.\n", dev->name);
  4097. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  4098. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  4099. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  4100. } else {
  4101. np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
  4102. if (pause->rx_pause)
  4103. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  4104. if (pause->tx_pause)
  4105. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  4106. if (!netif_running(dev))
  4107. nv_update_linkspeed(dev);
  4108. else
  4109. nv_update_pause(dev, np->pause_flags);
  4110. }
  4111. if (netif_running(dev)) {
  4112. nv_start_rxtx(dev);
  4113. nv_enable_irq(dev);
  4114. }
  4115. return 0;
  4116. }
  4117. static u32 nv_get_rx_csum(struct net_device *dev)
  4118. {
  4119. struct fe_priv *np = netdev_priv(dev);
  4120. return (np->rx_csum) != 0;
  4121. }
  4122. static int nv_set_rx_csum(struct net_device *dev, u32 data)
  4123. {
  4124. struct fe_priv *np = netdev_priv(dev);
  4125. u8 __iomem *base = get_hwbase(dev);
  4126. int retcode = 0;
  4127. if (np->driver_data & DEV_HAS_CHECKSUM) {
  4128. if (data) {
  4129. np->rx_csum = 1;
  4130. np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  4131. } else {
  4132. np->rx_csum = 0;
  4133. /* vlan is dependent on rx checksum offload */
  4134. if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
  4135. np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
  4136. }
  4137. if (netif_running(dev)) {
  4138. spin_lock_irq(&np->lock);
  4139. writel(np->txrxctl_bits, base + NvRegTxRxControl);
  4140. spin_unlock_irq(&np->lock);
  4141. }
  4142. } else {
  4143. return -EINVAL;
  4144. }
  4145. return retcode;
  4146. }
  4147. static int nv_set_tx_csum(struct net_device *dev, u32 data)
  4148. {
  4149. struct fe_priv *np = netdev_priv(dev);
  4150. if (np->driver_data & DEV_HAS_CHECKSUM)
  4151. return ethtool_op_set_tx_csum(dev, data);
  4152. else
  4153. return -EOPNOTSUPP;
  4154. }
  4155. static int nv_set_sg(struct net_device *dev, u32 data)
  4156. {
  4157. struct fe_priv *np = netdev_priv(dev);
  4158. if (np->driver_data & DEV_HAS_CHECKSUM)
  4159. return ethtool_op_set_sg(dev, data);
  4160. else
  4161. return -EOPNOTSUPP;
  4162. }
  4163. static int nv_get_sset_count(struct net_device *dev, int sset)
  4164. {
  4165. struct fe_priv *np = netdev_priv(dev);
  4166. switch (sset) {
  4167. case ETH_SS_TEST:
  4168. if (np->driver_data & DEV_HAS_TEST_EXTENDED)
  4169. return NV_TEST_COUNT_EXTENDED;
  4170. else
  4171. return NV_TEST_COUNT_BASE;
  4172. case ETH_SS_STATS:
  4173. if (np->driver_data & DEV_HAS_STATISTICS_V3)
  4174. return NV_DEV_STATISTICS_V3_COUNT;
  4175. else if (np->driver_data & DEV_HAS_STATISTICS_V2)
  4176. return NV_DEV_STATISTICS_V2_COUNT;
  4177. else if (np->driver_data & DEV_HAS_STATISTICS_V1)
  4178. return NV_DEV_STATISTICS_V1_COUNT;
  4179. else
  4180. return 0;
  4181. default:
  4182. return -EOPNOTSUPP;
  4183. }
  4184. }
  4185. static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
  4186. {
  4187. struct fe_priv *np = netdev_priv(dev);
  4188. /* update stats */
  4189. nv_do_stats_poll((unsigned long)dev);
  4190. memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
  4191. }
  4192. static int nv_link_test(struct net_device *dev)
  4193. {
  4194. struct fe_priv *np = netdev_priv(dev);
  4195. int mii_status;
  4196. mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  4197. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  4198. /* check phy link status */
  4199. if (!(mii_status & BMSR_LSTATUS))
  4200. return 0;
  4201. else
  4202. return 1;
  4203. }
  4204. static int nv_register_test(struct net_device *dev)
  4205. {
  4206. u8 __iomem *base = get_hwbase(dev);
  4207. int i = 0;
  4208. u32 orig_read, new_read;
  4209. do {
  4210. orig_read = readl(base + nv_registers_test[i].reg);
  4211. /* xor with mask to toggle bits */
  4212. orig_read ^= nv_registers_test[i].mask;
  4213. writel(orig_read, base + nv_registers_test[i].reg);
  4214. new_read = readl(base + nv_registers_test[i].reg);
  4215. if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
  4216. return 0;
  4217. /* restore original value */
  4218. orig_read ^= nv_registers_test[i].mask;
  4219. writel(orig_read, base + nv_registers_test[i].reg);
  4220. } while (nv_registers_test[++i].reg != 0);
  4221. return 1;
  4222. }
  4223. static int nv_interrupt_test(struct net_device *dev)
  4224. {
  4225. struct fe_priv *np = netdev_priv(dev);
  4226. u8 __iomem *base = get_hwbase(dev);
  4227. int ret = 1;
  4228. int testcnt;
  4229. u32 save_msi_flags, save_poll_interval = 0;
  4230. if (netif_running(dev)) {
  4231. /* free current irq */
  4232. nv_free_irq(dev);
  4233. save_poll_interval = readl(base+NvRegPollingInterval);
  4234. }
  4235. /* flag to test interrupt handler */
  4236. np->intr_test = 0;
  4237. /* setup test irq */
  4238. save_msi_flags = np->msi_flags;
  4239. np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
  4240. np->msi_flags |= 0x001; /* setup 1 vector */
  4241. if (nv_request_irq(dev, 1))
  4242. return 0;
  4243. /* setup timer interrupt */
  4244. writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
  4245. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  4246. nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
  4247. /* wait for at least one interrupt */
  4248. msleep(100);
  4249. spin_lock_irq(&np->lock);
  4250. /* flag should be set within ISR */
  4251. testcnt = np->intr_test;
  4252. if (!testcnt)
  4253. ret = 2;
  4254. nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
  4255. if (!(np->msi_flags & NV_MSI_X_ENABLED))
  4256. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  4257. else
  4258. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  4259. spin_unlock_irq(&np->lock);
  4260. nv_free_irq(dev);
  4261. np->msi_flags = save_msi_flags;
  4262. if (netif_running(dev)) {
  4263. writel(save_poll_interval, base + NvRegPollingInterval);
  4264. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  4265. /* restore original irq */
  4266. if (nv_request_irq(dev, 0))
  4267. return 0;
  4268. }
  4269. return ret;
  4270. }
  4271. static int nv_loopback_test(struct net_device *dev)
  4272. {
  4273. struct fe_priv *np = netdev_priv(dev);
  4274. u8 __iomem *base = get_hwbase(dev);
  4275. struct sk_buff *tx_skb, *rx_skb;
  4276. dma_addr_t test_dma_addr;
  4277. u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
  4278. u32 flags;
  4279. int len, i, pkt_len;
  4280. u8 *pkt_data;
  4281. u32 filter_flags = 0;
  4282. u32 misc1_flags = 0;
  4283. int ret = 1;
  4284. if (netif_running(dev)) {
  4285. nv_disable_irq(dev);
  4286. filter_flags = readl(base + NvRegPacketFilterFlags);
  4287. misc1_flags = readl(base + NvRegMisc1);
  4288. } else {
  4289. nv_txrx_reset(dev);
  4290. }
  4291. /* reinit driver view of the rx queue */
  4292. set_bufsize(dev);
  4293. nv_init_ring(dev);
  4294. /* setup hardware for loopback */
  4295. writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
  4296. writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
  4297. /* reinit nic view of the rx queue */
  4298. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  4299. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  4300. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  4301. base + NvRegRingSizes);
  4302. pci_push(base);
  4303. /* restart rx engine */
  4304. nv_start_rxtx(dev);
  4305. /* setup packet for tx */
  4306. pkt_len = ETH_DATA_LEN;
  4307. tx_skb = dev_alloc_skb(pkt_len);
  4308. if (!tx_skb) {
  4309. printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
  4310. " of %s\n", dev->name);
  4311. ret = 0;
  4312. goto out;
  4313. }
  4314. test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
  4315. skb_tailroom(tx_skb),
  4316. PCI_DMA_FROMDEVICE);
  4317. pkt_data = skb_put(tx_skb, pkt_len);
  4318. for (i = 0; i < pkt_len; i++)
  4319. pkt_data[i] = (u8)(i & 0xff);
  4320. if (!nv_optimized(np)) {
  4321. np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
  4322. np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
  4323. } else {
  4324. np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
  4325. np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
  4326. np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
  4327. }
  4328. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  4329. pci_push(get_hwbase(dev));
  4330. msleep(500);
  4331. /* check for rx of the packet */
  4332. if (!nv_optimized(np)) {
  4333. flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
  4334. len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
  4335. } else {
  4336. flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
  4337. len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
  4338. }
  4339. if (flags & NV_RX_AVAIL) {
  4340. ret = 0;
  4341. } else if (np->desc_ver == DESC_VER_1) {
  4342. if (flags & NV_RX_ERROR)
  4343. ret = 0;
  4344. } else {
  4345. if (flags & NV_RX2_ERROR) {
  4346. ret = 0;
  4347. }
  4348. }
  4349. if (ret) {
  4350. if (len != pkt_len) {
  4351. ret = 0;
  4352. dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
  4353. dev->name, len, pkt_len);
  4354. } else {
  4355. rx_skb = np->rx_skb[0].skb;
  4356. for (i = 0; i < pkt_len; i++) {
  4357. if (rx_skb->data[i] != (u8)(i & 0xff)) {
  4358. ret = 0;
  4359. dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
  4360. dev->name, i);
  4361. break;
  4362. }
  4363. }
  4364. }
  4365. } else {
  4366. dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
  4367. }
  4368. pci_unmap_single(np->pci_dev, test_dma_addr,
  4369. (skb_end_pointer(tx_skb) - tx_skb->data),
  4370. PCI_DMA_TODEVICE);
  4371. dev_kfree_skb_any(tx_skb);
  4372. out:
  4373. /* stop engines */
  4374. nv_stop_rxtx(dev);
  4375. nv_txrx_reset(dev);
  4376. /* drain rx queue */
  4377. nv_drain_rxtx(dev);
  4378. if (netif_running(dev)) {
  4379. writel(misc1_flags, base + NvRegMisc1);
  4380. writel(filter_flags, base + NvRegPacketFilterFlags);
  4381. nv_enable_irq(dev);
  4382. }
  4383. return ret;
  4384. }
  4385. static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
  4386. {
  4387. struct fe_priv *np = netdev_priv(dev);
  4388. u8 __iomem *base = get_hwbase(dev);
  4389. int result;
  4390. memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
  4391. if (!nv_link_test(dev)) {
  4392. test->flags |= ETH_TEST_FL_FAILED;
  4393. buffer[0] = 1;
  4394. }
  4395. if (test->flags & ETH_TEST_FL_OFFLINE) {
  4396. if (netif_running(dev)) {
  4397. netif_stop_queue(dev);
  4398. nv_napi_disable(dev);
  4399. netif_tx_lock_bh(dev);
  4400. netif_addr_lock(dev);
  4401. spin_lock_irq(&np->lock);
  4402. nv_disable_hw_interrupts(dev, np->irqmask);
  4403. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  4404. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  4405. } else {
  4406. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  4407. }
  4408. /* stop engines */
  4409. nv_stop_rxtx(dev);
  4410. nv_txrx_reset(dev);
  4411. /* drain rx queue */
  4412. nv_drain_rxtx(dev);
  4413. spin_unlock_irq(&np->lock);
  4414. netif_addr_unlock(dev);
  4415. netif_tx_unlock_bh(dev);
  4416. }
  4417. if (!nv_register_test(dev)) {
  4418. test->flags |= ETH_TEST_FL_FAILED;
  4419. buffer[1] = 1;
  4420. }
  4421. result = nv_interrupt_test(dev);
  4422. if (result != 1) {
  4423. test->flags |= ETH_TEST_FL_FAILED;
  4424. buffer[2] = 1;
  4425. }
  4426. if (result == 0) {
  4427. /* bail out */
  4428. return;
  4429. }
  4430. if (!nv_loopback_test(dev)) {
  4431. test->flags |= ETH_TEST_FL_FAILED;
  4432. buffer[3] = 1;
  4433. }
  4434. if (netif_running(dev)) {
  4435. /* reinit driver view of the rx queue */
  4436. set_bufsize(dev);
  4437. if (nv_init_ring(dev)) {
  4438. if (!np->in_shutdown)
  4439. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  4440. }
  4441. /* reinit nic view of the rx queue */
  4442. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  4443. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  4444. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  4445. base + NvRegRingSizes);
  4446. pci_push(base);
  4447. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  4448. pci_push(base);
  4449. /* restart rx engine */
  4450. nv_start_rxtx(dev);
  4451. netif_start_queue(dev);
  4452. nv_napi_enable(dev);
  4453. nv_enable_hw_interrupts(dev, np->irqmask);
  4454. }
  4455. }
  4456. }
  4457. static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
  4458. {
  4459. switch (stringset) {
  4460. case ETH_SS_STATS:
  4461. memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
  4462. break;
  4463. case ETH_SS_TEST:
  4464. memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
  4465. break;
  4466. }
  4467. }
  4468. static const struct ethtool_ops ops = {
  4469. .get_drvinfo = nv_get_drvinfo,
  4470. .get_link = ethtool_op_get_link,
  4471. .get_wol = nv_get_wol,
  4472. .set_wol = nv_set_wol,
  4473. .get_settings = nv_get_settings,
  4474. .set_settings = nv_set_settings,
  4475. .get_regs_len = nv_get_regs_len,
  4476. .get_regs = nv_get_regs,
  4477. .nway_reset = nv_nway_reset,
  4478. .set_tso = nv_set_tso,
  4479. .get_ringparam = nv_get_ringparam,
  4480. .set_ringparam = nv_set_ringparam,
  4481. .get_pauseparam = nv_get_pauseparam,
  4482. .set_pauseparam = nv_set_pauseparam,
  4483. .get_rx_csum = nv_get_rx_csum,
  4484. .set_rx_csum = nv_set_rx_csum,
  4485. .set_tx_csum = nv_set_tx_csum,
  4486. .set_sg = nv_set_sg,
  4487. .get_strings = nv_get_strings,
  4488. .get_ethtool_stats = nv_get_ethtool_stats,
  4489. .get_sset_count = nv_get_sset_count,
  4490. .self_test = nv_self_test,
  4491. };
  4492. static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
  4493. {
  4494. struct fe_priv *np = get_nvpriv(dev);
  4495. spin_lock_irq(&np->lock);
  4496. /* save vlan group */
  4497. np->vlangrp = grp;
  4498. if (grp) {
  4499. /* enable vlan on MAC */
  4500. np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
  4501. } else {
  4502. /* disable vlan on MAC */
  4503. np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
  4504. np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
  4505. }
  4506. writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  4507. spin_unlock_irq(&np->lock);
  4508. }
  4509. /* The mgmt unit and driver use a semaphore to access the phy during init */
  4510. static int nv_mgmt_acquire_sema(struct net_device *dev)
  4511. {
  4512. struct fe_priv *np = netdev_priv(dev);
  4513. u8 __iomem *base = get_hwbase(dev);
  4514. int i;
  4515. u32 tx_ctrl, mgmt_sema;
  4516. for (i = 0; i < 10; i++) {
  4517. mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
  4518. if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
  4519. break;
  4520. msleep(500);
  4521. }
  4522. if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
  4523. return 0;
  4524. for (i = 0; i < 2; i++) {
  4525. tx_ctrl = readl(base + NvRegTransmitterControl);
  4526. tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
  4527. writel(tx_ctrl, base + NvRegTransmitterControl);
  4528. /* verify that semaphore was acquired */
  4529. tx_ctrl = readl(base + NvRegTransmitterControl);
  4530. if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
  4531. ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
  4532. np->mgmt_sema = 1;
  4533. return 1;
  4534. }
  4535. else
  4536. udelay(50);
  4537. }
  4538. return 0;
  4539. }
  4540. static void nv_mgmt_release_sema(struct net_device *dev)
  4541. {
  4542. struct fe_priv *np = netdev_priv(dev);
  4543. u8 __iomem *base = get_hwbase(dev);
  4544. u32 tx_ctrl;
  4545. if (np->driver_data & DEV_HAS_MGMT_UNIT) {
  4546. if (np->mgmt_sema) {
  4547. tx_ctrl = readl(base + NvRegTransmitterControl);
  4548. tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
  4549. writel(tx_ctrl, base + NvRegTransmitterControl);
  4550. }
  4551. }
  4552. }
  4553. static int nv_mgmt_get_version(struct net_device *dev)
  4554. {
  4555. struct fe_priv *np = netdev_priv(dev);
  4556. u8 __iomem *base = get_hwbase(dev);
  4557. u32 data_ready = readl(base + NvRegTransmitterControl);
  4558. u32 data_ready2 = 0;
  4559. unsigned long start;
  4560. int ready = 0;
  4561. writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
  4562. writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
  4563. start = jiffies;
  4564. while (time_before(jiffies, start + 5*HZ)) {
  4565. data_ready2 = readl(base + NvRegTransmitterControl);
  4566. if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
  4567. ready = 1;
  4568. break;
  4569. }
  4570. schedule_timeout_uninterruptible(1);
  4571. }
  4572. if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
  4573. return 0;
  4574. np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
  4575. return 1;
  4576. }
  4577. static int nv_open(struct net_device *dev)
  4578. {
  4579. struct fe_priv *np = netdev_priv(dev);
  4580. u8 __iomem *base = get_hwbase(dev);
  4581. int ret = 1;
  4582. int oom, i;
  4583. u32 low;
  4584. dprintk(KERN_DEBUG "nv_open: begin\n");
  4585. /* power up phy */
  4586. mii_rw(dev, np->phyaddr, MII_BMCR,
  4587. mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
  4588. nv_txrx_gate(dev, false);
  4589. /* erase previous misconfiguration */
  4590. if (np->driver_data & DEV_HAS_POWER_CNTRL)
  4591. nv_mac_reset(dev);
  4592. writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
  4593. writel(0, base + NvRegMulticastAddrB);
  4594. writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
  4595. writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
  4596. writel(0, base + NvRegPacketFilterFlags);
  4597. writel(0, base + NvRegTransmitterControl);
  4598. writel(0, base + NvRegReceiverControl);
  4599. writel(0, base + NvRegAdapterControl);
  4600. if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
  4601. writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
  4602. /* initialize descriptor rings */
  4603. set_bufsize(dev);
  4604. oom = nv_init_ring(dev);
  4605. writel(0, base + NvRegLinkSpeed);
  4606. writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
  4607. nv_txrx_reset(dev);
  4608. writel(0, base + NvRegUnknownSetupReg6);
  4609. np->in_shutdown = 0;
  4610. /* give hw rings */
  4611. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  4612. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  4613. base + NvRegRingSizes);
  4614. writel(np->linkspeed, base + NvRegLinkSpeed);
  4615. if (np->desc_ver == DESC_VER_1)
  4616. writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
  4617. else
  4618. writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
  4619. writel(np->txrxctl_bits, base + NvRegTxRxControl);
  4620. writel(np->vlanctl_bits, base + NvRegVlanControl);
  4621. pci_push(base);
  4622. writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
  4623. reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
  4624. NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
  4625. KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
  4626. writel(0, base + NvRegMIIMask);
  4627. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  4628. writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  4629. writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
  4630. writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
  4631. writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
  4632. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  4633. writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
  4634. get_random_bytes(&low, sizeof(low));
  4635. low &= NVREG_SLOTTIME_MASK;
  4636. if (np->desc_ver == DESC_VER_1) {
  4637. writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
  4638. } else {
  4639. if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
  4640. /* setup legacy backoff */
  4641. writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
  4642. } else {
  4643. writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
  4644. nv_gear_backoff_reseed(dev);
  4645. }
  4646. }
  4647. writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
  4648. writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
  4649. if (poll_interval == -1) {
  4650. if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
  4651. writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
  4652. else
  4653. writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
  4654. }
  4655. else
  4656. writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
  4657. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  4658. writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
  4659. base + NvRegAdapterControl);
  4660. writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
  4661. writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
  4662. if (np->wolenabled)
  4663. writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
  4664. i = readl(base + NvRegPowerState);
  4665. if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
  4666. writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
  4667. pci_push(base);
  4668. udelay(10);
  4669. writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
  4670. nv_disable_hw_interrupts(dev, np->irqmask);
  4671. pci_push(base);
  4672. writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  4673. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  4674. pci_push(base);
  4675. if (nv_request_irq(dev, 0)) {
  4676. goto out_drain;
  4677. }
  4678. /* ask for interrupts */
  4679. nv_enable_hw_interrupts(dev, np->irqmask);
  4680. spin_lock_irq(&np->lock);
  4681. writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
  4682. writel(0, base + NvRegMulticastAddrB);
  4683. writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
  4684. writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
  4685. writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
  4686. /* One manual link speed update: Interrupts are enabled, future link
  4687. * speed changes cause interrupts and are handled by nv_link_irq().
  4688. */
  4689. {
  4690. u32 miistat;
  4691. miistat = readl(base + NvRegMIIStatus);
  4692. writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  4693. dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
  4694. }
  4695. /* set linkspeed to invalid value, thus force nv_update_linkspeed
  4696. * to init hw */
  4697. np->linkspeed = 0;
  4698. ret = nv_update_linkspeed(dev);
  4699. nv_start_rxtx(dev);
  4700. netif_start_queue(dev);
  4701. nv_napi_enable(dev);
  4702. if (ret) {
  4703. netif_carrier_on(dev);
  4704. } else {
  4705. printk(KERN_INFO "%s: no link during initialization.\n", dev->name);
  4706. netif_carrier_off(dev);
  4707. }
  4708. if (oom)
  4709. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  4710. /* start statistics timer */
  4711. if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
  4712. mod_timer(&np->stats_poll,
  4713. round_jiffies(jiffies + STATS_INTERVAL));
  4714. spin_unlock_irq(&np->lock);
  4715. return 0;
  4716. out_drain:
  4717. nv_drain_rxtx(dev);
  4718. return ret;
  4719. }
  4720. static int nv_close(struct net_device *dev)
  4721. {
  4722. struct fe_priv *np = netdev_priv(dev);
  4723. u8 __iomem *base;
  4724. spin_lock_irq(&np->lock);
  4725. np->in_shutdown = 1;
  4726. spin_unlock_irq(&np->lock);
  4727. nv_napi_disable(dev);
  4728. synchronize_irq(np->pci_dev->irq);
  4729. del_timer_sync(&np->oom_kick);
  4730. del_timer_sync(&np->nic_poll);
  4731. del_timer_sync(&np->stats_poll);
  4732. netif_stop_queue(dev);
  4733. spin_lock_irq(&np->lock);
  4734. nv_stop_rxtx(dev);
  4735. nv_txrx_reset(dev);
  4736. /* disable interrupts on the nic or we will lock up */
  4737. base = get_hwbase(dev);
  4738. nv_disable_hw_interrupts(dev, np->irqmask);
  4739. pci_push(base);
  4740. dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
  4741. spin_unlock_irq(&np->lock);
  4742. nv_free_irq(dev);
  4743. nv_drain_rxtx(dev);
  4744. if (np->wolenabled || !phy_power_down) {
  4745. nv_txrx_gate(dev, false);
  4746. writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
  4747. nv_start_rx(dev);
  4748. } else {
  4749. /* power down phy */
  4750. mii_rw(dev, np->phyaddr, MII_BMCR,
  4751. mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
  4752. nv_txrx_gate(dev, true);
  4753. }
  4754. /* FIXME: power down nic */
  4755. return 0;
  4756. }
  4757. static const struct net_device_ops nv_netdev_ops = {
  4758. .ndo_open = nv_open,
  4759. .ndo_stop = nv_close,
  4760. .ndo_get_stats = nv_get_stats,
  4761. .ndo_start_xmit = nv_start_xmit,
  4762. .ndo_tx_timeout = nv_tx_timeout,
  4763. .ndo_change_mtu = nv_change_mtu,
  4764. .ndo_validate_addr = eth_validate_addr,
  4765. .ndo_set_mac_address = nv_set_mac_address,
  4766. .ndo_set_multicast_list = nv_set_multicast,
  4767. .ndo_vlan_rx_register = nv_vlan_rx_register,
  4768. #ifdef CONFIG_NET_POLL_CONTROLLER
  4769. .ndo_poll_controller = nv_poll_controller,
  4770. #endif
  4771. };
  4772. static const struct net_device_ops nv_netdev_ops_optimized = {
  4773. .ndo_open = nv_open,
  4774. .ndo_stop = nv_close,
  4775. .ndo_get_stats = nv_get_stats,
  4776. .ndo_start_xmit = nv_start_xmit_optimized,
  4777. .ndo_tx_timeout = nv_tx_timeout,
  4778. .ndo_change_mtu = nv_change_mtu,
  4779. .ndo_validate_addr = eth_validate_addr,
  4780. .ndo_set_mac_address = nv_set_mac_address,
  4781. .ndo_set_multicast_list = nv_set_multicast,
  4782. .ndo_vlan_rx_register = nv_vlan_rx_register,
  4783. #ifdef CONFIG_NET_POLL_CONTROLLER
  4784. .ndo_poll_controller = nv_poll_controller,
  4785. #endif
  4786. };
  4787. static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
  4788. {
  4789. struct net_device *dev;
  4790. struct fe_priv *np;
  4791. unsigned long addr;
  4792. u8 __iomem *base;
  4793. int err, i;
  4794. u32 powerstate, txreg;
  4795. u32 phystate_orig = 0, phystate;
  4796. int phyinitialized = 0;
  4797. static int printed_version;
  4798. if (!printed_version++)
  4799. printk(KERN_INFO "%s: Reverse Engineered nForce ethernet"
  4800. " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION);
  4801. dev = alloc_etherdev(sizeof(struct fe_priv));
  4802. err = -ENOMEM;
  4803. if (!dev)
  4804. goto out;
  4805. np = netdev_priv(dev);
  4806. np->dev = dev;
  4807. np->pci_dev = pci_dev;
  4808. spin_lock_init(&np->lock);
  4809. SET_NETDEV_DEV(dev, &pci_dev->dev);
  4810. init_timer(&np->oom_kick);
  4811. np->oom_kick.data = (unsigned long) dev;
  4812. np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
  4813. init_timer(&np->nic_poll);
  4814. np->nic_poll.data = (unsigned long) dev;
  4815. np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
  4816. init_timer(&np->stats_poll);
  4817. np->stats_poll.data = (unsigned long) dev;
  4818. np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
  4819. err = pci_enable_device(pci_dev);
  4820. if (err)
  4821. goto out_free;
  4822. pci_set_master(pci_dev);
  4823. err = pci_request_regions(pci_dev, DRV_NAME);
  4824. if (err < 0)
  4825. goto out_disable;
  4826. if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
  4827. np->register_size = NV_PCI_REGSZ_VER3;
  4828. else if (id->driver_data & DEV_HAS_STATISTICS_V1)
  4829. np->register_size = NV_PCI_REGSZ_VER2;
  4830. else
  4831. np->register_size = NV_PCI_REGSZ_VER1;
  4832. err = -EINVAL;
  4833. addr = 0;
  4834. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  4835. dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
  4836. pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
  4837. pci_resource_len(pci_dev, i),
  4838. pci_resource_flags(pci_dev, i));
  4839. if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
  4840. pci_resource_len(pci_dev, i) >= np->register_size) {
  4841. addr = pci_resource_start(pci_dev, i);
  4842. break;
  4843. }
  4844. }
  4845. if (i == DEVICE_COUNT_RESOURCE) {
  4846. dev_printk(KERN_INFO, &pci_dev->dev,
  4847. "Couldn't find register window\n");
  4848. goto out_relreg;
  4849. }
  4850. /* copy of driver data */
  4851. np->driver_data = id->driver_data;
  4852. /* copy of device id */
  4853. np->device_id = id->device;
  4854. /* handle different descriptor versions */
  4855. if (id->driver_data & DEV_HAS_HIGH_DMA) {
  4856. /* packet format 3: supports 40-bit addressing */
  4857. np->desc_ver = DESC_VER_3;
  4858. np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
  4859. if (dma_64bit) {
  4860. if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
  4861. dev_printk(KERN_INFO, &pci_dev->dev,
  4862. "64-bit DMA failed, using 32-bit addressing\n");
  4863. else
  4864. dev->features |= NETIF_F_HIGHDMA;
  4865. if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
  4866. dev_printk(KERN_INFO, &pci_dev->dev,
  4867. "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
  4868. }
  4869. }
  4870. } else if (id->driver_data & DEV_HAS_LARGEDESC) {
  4871. /* packet format 2: supports jumbo frames */
  4872. np->desc_ver = DESC_VER_2;
  4873. np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
  4874. } else {
  4875. /* original packet format */
  4876. np->desc_ver = DESC_VER_1;
  4877. np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
  4878. }
  4879. np->pkt_limit = NV_PKTLIMIT_1;
  4880. if (id->driver_data & DEV_HAS_LARGEDESC)
  4881. np->pkt_limit = NV_PKTLIMIT_2;
  4882. if (id->driver_data & DEV_HAS_CHECKSUM) {
  4883. np->rx_csum = 1;
  4884. np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  4885. dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
  4886. dev->features |= NETIF_F_TSO;
  4887. dev->features |= NETIF_F_GRO;
  4888. }
  4889. np->vlanctl_bits = 0;
  4890. if (id->driver_data & DEV_HAS_VLAN) {
  4891. np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
  4892. dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
  4893. }
  4894. np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
  4895. if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
  4896. (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
  4897. (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
  4898. np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
  4899. }
  4900. err = -ENOMEM;
  4901. np->base = ioremap(addr, np->register_size);
  4902. if (!np->base)
  4903. goto out_relreg;
  4904. dev->base_addr = (unsigned long)np->base;
  4905. dev->irq = pci_dev->irq;
  4906. np->rx_ring_size = RX_RING_DEFAULT;
  4907. np->tx_ring_size = TX_RING_DEFAULT;
  4908. if (!nv_optimized(np)) {
  4909. np->rx_ring.orig = pci_alloc_consistent(pci_dev,
  4910. sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
  4911. &np->ring_addr);
  4912. if (!np->rx_ring.orig)
  4913. goto out_unmap;
  4914. np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
  4915. } else {
  4916. np->rx_ring.ex = pci_alloc_consistent(pci_dev,
  4917. sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
  4918. &np->ring_addr);
  4919. if (!np->rx_ring.ex)
  4920. goto out_unmap;
  4921. np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
  4922. }
  4923. np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
  4924. np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
  4925. if (!np->rx_skb || !np->tx_skb)
  4926. goto out_freering;
  4927. if (!nv_optimized(np))
  4928. dev->netdev_ops = &nv_netdev_ops;
  4929. else
  4930. dev->netdev_ops = &nv_netdev_ops_optimized;
  4931. netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
  4932. SET_ETHTOOL_OPS(dev, &ops);
  4933. dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
  4934. pci_set_drvdata(pci_dev, dev);
  4935. /* read the mac address */
  4936. base = get_hwbase(dev);
  4937. np->orig_mac[0] = readl(base + NvRegMacAddrA);
  4938. np->orig_mac[1] = readl(base + NvRegMacAddrB);
  4939. /* check the workaround bit for correct mac address order */
  4940. txreg = readl(base + NvRegTransmitPoll);
  4941. if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
  4942. /* mac address is already in correct order */
  4943. dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
  4944. dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
  4945. dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
  4946. dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
  4947. dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
  4948. dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
  4949. } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
  4950. /* mac address is already in correct order */
  4951. dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
  4952. dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
  4953. dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
  4954. dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
  4955. dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
  4956. dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
  4957. /*
  4958. * Set orig mac address back to the reversed version.
  4959. * This flag will be cleared during low power transition.
  4960. * Therefore, we should always put back the reversed address.
  4961. */
  4962. np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
  4963. (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
  4964. np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
  4965. } else {
  4966. /* need to reverse mac address to correct order */
  4967. dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
  4968. dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
  4969. dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
  4970. dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
  4971. dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
  4972. dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
  4973. writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
  4974. printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n");
  4975. }
  4976. memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
  4977. if (!is_valid_ether_addr(dev->perm_addr)) {
  4978. /*
  4979. * Bad mac address. At least one bios sets the mac address
  4980. * to 01:23:45:67:89:ab
  4981. */
  4982. dev_printk(KERN_ERR, &pci_dev->dev,
  4983. "Invalid Mac address detected: %pM\n",
  4984. dev->dev_addr);
  4985. dev_printk(KERN_ERR, &pci_dev->dev,
  4986. "Please complain to your hardware vendor. Switching to a random MAC.\n");
  4987. random_ether_addr(dev->dev_addr);
  4988. }
  4989. dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
  4990. pci_name(pci_dev), dev->dev_addr);
  4991. /* set mac address */
  4992. nv_copy_mac_to_hw(dev);
  4993. /* Workaround current PCI init glitch: wakeup bits aren't
  4994. * being set from PCI PM capability.
  4995. */
  4996. device_init_wakeup(&pci_dev->dev, 1);
  4997. /* disable WOL */
  4998. writel(0, base + NvRegWakeUpFlags);
  4999. np->wolenabled = 0;
  5000. if (id->driver_data & DEV_HAS_POWER_CNTRL) {
  5001. /* take phy and nic out of low power mode */
  5002. powerstate = readl(base + NvRegPowerState2);
  5003. powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
  5004. if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
  5005. pci_dev->revision >= 0xA3)
  5006. powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
  5007. writel(powerstate, base + NvRegPowerState2);
  5008. }
  5009. if (np->desc_ver == DESC_VER_1) {
  5010. np->tx_flags = NV_TX_VALID;
  5011. } else {
  5012. np->tx_flags = NV_TX2_VALID;
  5013. }
  5014. np->msi_flags = 0;
  5015. if ((id->driver_data & DEV_HAS_MSI) && msi) {
  5016. np->msi_flags |= NV_MSI_CAPABLE;
  5017. }
  5018. if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
  5019. /* msix has had reported issues when modifying irqmask
  5020. as in the case of napi, therefore, disable for now
  5021. */
  5022. #if 0
  5023. np->msi_flags |= NV_MSI_X_CAPABLE;
  5024. #endif
  5025. }
  5026. if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
  5027. np->irqmask = NVREG_IRQMASK_CPU;
  5028. if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
  5029. np->msi_flags |= 0x0001;
  5030. } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
  5031. !(id->driver_data & DEV_NEED_TIMERIRQ)) {
  5032. /* start off in throughput mode */
  5033. np->irqmask = NVREG_IRQMASK_THROUGHPUT;
  5034. /* remove support for msix mode */
  5035. np->msi_flags &= ~NV_MSI_X_CAPABLE;
  5036. } else {
  5037. optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
  5038. np->irqmask = NVREG_IRQMASK_THROUGHPUT;
  5039. if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
  5040. np->msi_flags |= 0x0003;
  5041. }
  5042. if (id->driver_data & DEV_NEED_TIMERIRQ)
  5043. np->irqmask |= NVREG_IRQ_TIMER;
  5044. if (id->driver_data & DEV_NEED_LINKTIMER) {
  5045. dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
  5046. np->need_linktimer = 1;
  5047. np->link_timeout = jiffies + LINK_TIMEOUT;
  5048. } else {
  5049. dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
  5050. np->need_linktimer = 0;
  5051. }
  5052. /* Limit the number of tx's outstanding for hw bug */
  5053. if (id->driver_data & DEV_NEED_TX_LIMIT) {
  5054. np->tx_limit = 1;
  5055. if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
  5056. pci_dev->revision >= 0xA2)
  5057. np->tx_limit = 0;
  5058. }
  5059. /* clear phy state and temporarily halt phy interrupts */
  5060. writel(0, base + NvRegMIIMask);
  5061. phystate = readl(base + NvRegAdapterControl);
  5062. if (phystate & NVREG_ADAPTCTL_RUNNING) {
  5063. phystate_orig = 1;
  5064. phystate &= ~NVREG_ADAPTCTL_RUNNING;
  5065. writel(phystate, base + NvRegAdapterControl);
  5066. }
  5067. writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  5068. if (id->driver_data & DEV_HAS_MGMT_UNIT) {
  5069. /* management unit running on the mac? */
  5070. if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
  5071. (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
  5072. nv_mgmt_acquire_sema(dev) &&
  5073. nv_mgmt_get_version(dev)) {
  5074. np->mac_in_use = 1;
  5075. if (np->mgmt_version > 0) {
  5076. np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
  5077. }
  5078. dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
  5079. pci_name(pci_dev), np->mac_in_use);
  5080. /* management unit setup the phy already? */
  5081. if (np->mac_in_use &&
  5082. ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
  5083. NVREG_XMITCTL_SYNC_PHY_INIT)) {
  5084. /* phy is inited by mgmt unit */
  5085. phyinitialized = 1;
  5086. dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n",
  5087. pci_name(pci_dev));
  5088. } else {
  5089. /* we need to init the phy */
  5090. }
  5091. }
  5092. }
  5093. /* find a suitable phy */
  5094. for (i = 1; i <= 32; i++) {
  5095. int id1, id2;
  5096. int phyaddr = i & 0x1F;
  5097. spin_lock_irq(&np->lock);
  5098. id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
  5099. spin_unlock_irq(&np->lock);
  5100. if (id1 < 0 || id1 == 0xffff)
  5101. continue;
  5102. spin_lock_irq(&np->lock);
  5103. id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
  5104. spin_unlock_irq(&np->lock);
  5105. if (id2 < 0 || id2 == 0xffff)
  5106. continue;
  5107. np->phy_model = id2 & PHYID2_MODEL_MASK;
  5108. id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
  5109. id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
  5110. dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
  5111. pci_name(pci_dev), id1, id2, phyaddr);
  5112. np->phyaddr = phyaddr;
  5113. np->phy_oui = id1 | id2;
  5114. /* Realtek hardcoded phy id1 to all zero's on certain phys */
  5115. if (np->phy_oui == PHY_OUI_REALTEK2)
  5116. np->phy_oui = PHY_OUI_REALTEK;
  5117. /* Setup phy revision for Realtek */
  5118. if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
  5119. np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
  5120. break;
  5121. }
  5122. if (i == 33) {
  5123. dev_printk(KERN_INFO, &pci_dev->dev,
  5124. "open: Could not find a valid PHY.\n");
  5125. goto out_error;
  5126. }
  5127. if (!phyinitialized) {
  5128. /* reset it */
  5129. phy_init(dev);
  5130. } else {
  5131. /* see if it is a gigabit phy */
  5132. u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  5133. if (mii_status & PHY_GIGABIT) {
  5134. np->gigabit = PHY_GIGABIT;
  5135. }
  5136. }
  5137. /* set default link speed settings */
  5138. np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  5139. np->duplex = 0;
  5140. np->autoneg = 1;
  5141. err = register_netdev(dev);
  5142. if (err) {
  5143. dev_printk(KERN_INFO, &pci_dev->dev,
  5144. "unable to register netdev: %d\n", err);
  5145. goto out_error;
  5146. }
  5147. dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, "
  5148. "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
  5149. dev->name,
  5150. np->phy_oui,
  5151. np->phyaddr,
  5152. dev->dev_addr[0],
  5153. dev->dev_addr[1],
  5154. dev->dev_addr[2],
  5155. dev->dev_addr[3],
  5156. dev->dev_addr[4],
  5157. dev->dev_addr[5]);
  5158. dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
  5159. dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
  5160. dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
  5161. "csum " : "",
  5162. dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
  5163. "vlan " : "",
  5164. id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
  5165. id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
  5166. id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
  5167. np->gigabit == PHY_GIGABIT ? "gbit " : "",
  5168. np->need_linktimer ? "lnktim " : "",
  5169. np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
  5170. np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
  5171. np->desc_ver);
  5172. return 0;
  5173. out_error:
  5174. if (phystate_orig)
  5175. writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
  5176. pci_set_drvdata(pci_dev, NULL);
  5177. out_freering:
  5178. free_rings(dev);
  5179. out_unmap:
  5180. iounmap(get_hwbase(dev));
  5181. out_relreg:
  5182. pci_release_regions(pci_dev);
  5183. out_disable:
  5184. pci_disable_device(pci_dev);
  5185. out_free:
  5186. free_netdev(dev);
  5187. out:
  5188. return err;
  5189. }
  5190. static void nv_restore_phy(struct net_device *dev)
  5191. {
  5192. struct fe_priv *np = netdev_priv(dev);
  5193. u16 phy_reserved, mii_control;
  5194. if (np->phy_oui == PHY_OUI_REALTEK &&
  5195. np->phy_model == PHY_MODEL_REALTEK_8201 &&
  5196. phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
  5197. mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
  5198. phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
  5199. phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
  5200. phy_reserved |= PHY_REALTEK_INIT8;
  5201. mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
  5202. mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
  5203. /* restart auto negotiation */
  5204. mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  5205. mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
  5206. mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
  5207. }
  5208. }
  5209. static void nv_restore_mac_addr(struct pci_dev *pci_dev)
  5210. {
  5211. struct net_device *dev = pci_get_drvdata(pci_dev);
  5212. struct fe_priv *np = netdev_priv(dev);
  5213. u8 __iomem *base = get_hwbase(dev);
  5214. /* special op: write back the misordered MAC address - otherwise
  5215. * the next nv_probe would see a wrong address.
  5216. */
  5217. writel(np->orig_mac[0], base + NvRegMacAddrA);
  5218. writel(np->orig_mac[1], base + NvRegMacAddrB);
  5219. writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
  5220. base + NvRegTransmitPoll);
  5221. }
  5222. static void __devexit nv_remove(struct pci_dev *pci_dev)
  5223. {
  5224. struct net_device *dev = pci_get_drvdata(pci_dev);
  5225. unregister_netdev(dev);
  5226. nv_restore_mac_addr(pci_dev);
  5227. /* restore any phy related changes */
  5228. nv_restore_phy(dev);
  5229. nv_mgmt_release_sema(dev);
  5230. /* free all structures */
  5231. free_rings(dev);
  5232. iounmap(get_hwbase(dev));
  5233. pci_release_regions(pci_dev);
  5234. pci_disable_device(pci_dev);
  5235. free_netdev(dev);
  5236. pci_set_drvdata(pci_dev, NULL);
  5237. }
  5238. #ifdef CONFIG_PM
  5239. static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
  5240. {
  5241. struct net_device *dev = pci_get_drvdata(pdev);
  5242. struct fe_priv *np = netdev_priv(dev);
  5243. u8 __iomem *base = get_hwbase(dev);
  5244. int i;
  5245. if (netif_running(dev)) {
  5246. // Gross.
  5247. nv_close(dev);
  5248. }
  5249. netif_device_detach(dev);
  5250. /* save non-pci configuration space */
  5251. for (i = 0;i <= np->register_size/sizeof(u32); i++)
  5252. np->saved_config_space[i] = readl(base + i*sizeof(u32));
  5253. pci_save_state(pdev);
  5254. pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
  5255. pci_disable_device(pdev);
  5256. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  5257. return 0;
  5258. }
  5259. static int nv_resume(struct pci_dev *pdev)
  5260. {
  5261. struct net_device *dev = pci_get_drvdata(pdev);
  5262. struct fe_priv *np = netdev_priv(dev);
  5263. u8 __iomem *base = get_hwbase(dev);
  5264. int i, rc = 0;
  5265. pci_set_power_state(pdev, PCI_D0);
  5266. pci_restore_state(pdev);
  5267. /* ack any pending wake events, disable PME */
  5268. pci_enable_wake(pdev, PCI_D0, 0);
  5269. /* restore non-pci configuration space */
  5270. for (i = 0;i <= np->register_size/sizeof(u32); i++)
  5271. writel(np->saved_config_space[i], base+i*sizeof(u32));
  5272. if (np->driver_data & DEV_NEED_MSI_FIX)
  5273. pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
  5274. /* restore phy state, including autoneg */
  5275. phy_init(dev);
  5276. netif_device_attach(dev);
  5277. if (netif_running(dev)) {
  5278. rc = nv_open(dev);
  5279. nv_set_multicast(dev);
  5280. }
  5281. return rc;
  5282. }
  5283. static void nv_shutdown(struct pci_dev *pdev)
  5284. {
  5285. struct net_device *dev = pci_get_drvdata(pdev);
  5286. struct fe_priv *np = netdev_priv(dev);
  5287. if (netif_running(dev))
  5288. nv_close(dev);
  5289. /*
  5290. * Restore the MAC so a kernel started by kexec won't get confused.
  5291. * If we really go for poweroff, we must not restore the MAC,
  5292. * otherwise the MAC for WOL will be reversed at least on some boards.
  5293. */
  5294. if (system_state != SYSTEM_POWER_OFF) {
  5295. nv_restore_mac_addr(pdev);
  5296. }
  5297. pci_disable_device(pdev);
  5298. /*
  5299. * Apparently it is not possible to reinitialise from D3 hot,
  5300. * only put the device into D3 if we really go for poweroff.
  5301. */
  5302. if (system_state == SYSTEM_POWER_OFF) {
  5303. if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
  5304. pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
  5305. pci_set_power_state(pdev, PCI_D3hot);
  5306. }
  5307. }
  5308. #else
  5309. #define nv_suspend NULL
  5310. #define nv_shutdown NULL
  5311. #define nv_resume NULL
  5312. #endif /* CONFIG_PM */
  5313. static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
  5314. { /* nForce Ethernet Controller */
  5315. PCI_DEVICE(0x10DE, 0x01C3),
  5316. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  5317. },
  5318. { /* nForce2 Ethernet Controller */
  5319. PCI_DEVICE(0x10DE, 0x0066),
  5320. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  5321. },
  5322. { /* nForce3 Ethernet Controller */
  5323. PCI_DEVICE(0x10DE, 0x00D6),
  5324. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  5325. },
  5326. { /* nForce3 Ethernet Controller */
  5327. PCI_DEVICE(0x10DE, 0x0086),
  5328. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  5329. },
  5330. { /* nForce3 Ethernet Controller */
  5331. PCI_DEVICE(0x10DE, 0x008C),
  5332. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  5333. },
  5334. { /* nForce3 Ethernet Controller */
  5335. PCI_DEVICE(0x10DE, 0x00E6),
  5336. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  5337. },
  5338. { /* nForce3 Ethernet Controller */
  5339. PCI_DEVICE(0x10DE, 0x00DF),
  5340. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  5341. },
  5342. { /* CK804 Ethernet Controller */
  5343. PCI_DEVICE(0x10DE, 0x0056),
  5344. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
  5345. },
  5346. { /* CK804 Ethernet Controller */
  5347. PCI_DEVICE(0x10DE, 0x0057),
  5348. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
  5349. },
  5350. { /* MCP04 Ethernet Controller */
  5351. PCI_DEVICE(0x10DE, 0x0037),
  5352. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
  5353. },
  5354. { /* MCP04 Ethernet Controller */
  5355. PCI_DEVICE(0x10DE, 0x0038),
  5356. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
  5357. },
  5358. { /* MCP51 Ethernet Controller */
  5359. PCI_DEVICE(0x10DE, 0x0268),
  5360. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
  5361. },
  5362. { /* MCP51 Ethernet Controller */
  5363. PCI_DEVICE(0x10DE, 0x0269),
  5364. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
  5365. },
  5366. { /* MCP55 Ethernet Controller */
  5367. PCI_DEVICE(0x10DE, 0x0372),
  5368. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
  5369. },
  5370. { /* MCP55 Ethernet Controller */
  5371. PCI_DEVICE(0x10DE, 0x0373),
  5372. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
  5373. },
  5374. { /* MCP61 Ethernet Controller */
  5375. PCI_DEVICE(0x10DE, 0x03E5),
  5376. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
  5377. },
  5378. { /* MCP61 Ethernet Controller */
  5379. PCI_DEVICE(0x10DE, 0x03E6),
  5380. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
  5381. },
  5382. { /* MCP61 Ethernet Controller */
  5383. PCI_DEVICE(0x10DE, 0x03EE),
  5384. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
  5385. },
  5386. { /* MCP61 Ethernet Controller */
  5387. PCI_DEVICE(0x10DE, 0x03EF),
  5388. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
  5389. },
  5390. { /* MCP65 Ethernet Controller */
  5391. PCI_DEVICE(0x10DE, 0x0450),
  5392. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5393. },
  5394. { /* MCP65 Ethernet Controller */
  5395. PCI_DEVICE(0x10DE, 0x0451),
  5396. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5397. },
  5398. { /* MCP65 Ethernet Controller */
  5399. PCI_DEVICE(0x10DE, 0x0452),
  5400. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5401. },
  5402. { /* MCP65 Ethernet Controller */
  5403. PCI_DEVICE(0x10DE, 0x0453),
  5404. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5405. },
  5406. { /* MCP67 Ethernet Controller */
  5407. PCI_DEVICE(0x10DE, 0x054C),
  5408. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5409. },
  5410. { /* MCP67 Ethernet Controller */
  5411. PCI_DEVICE(0x10DE, 0x054D),
  5412. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5413. },
  5414. { /* MCP67 Ethernet Controller */
  5415. PCI_DEVICE(0x10DE, 0x054E),
  5416. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5417. },
  5418. { /* MCP67 Ethernet Controller */
  5419. PCI_DEVICE(0x10DE, 0x054F),
  5420. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5421. },
  5422. { /* MCP73 Ethernet Controller */
  5423. PCI_DEVICE(0x10DE, 0x07DC),
  5424. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5425. },
  5426. { /* MCP73 Ethernet Controller */
  5427. PCI_DEVICE(0x10DE, 0x07DD),
  5428. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5429. },
  5430. { /* MCP73 Ethernet Controller */
  5431. PCI_DEVICE(0x10DE, 0x07DE),
  5432. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5433. },
  5434. { /* MCP73 Ethernet Controller */
  5435. PCI_DEVICE(0x10DE, 0x07DF),
  5436. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5437. },
  5438. { /* MCP77 Ethernet Controller */
  5439. PCI_DEVICE(0x10DE, 0x0760),
  5440. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5441. },
  5442. { /* MCP77 Ethernet Controller */
  5443. PCI_DEVICE(0x10DE, 0x0761),
  5444. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5445. },
  5446. { /* MCP77 Ethernet Controller */
  5447. PCI_DEVICE(0x10DE, 0x0762),
  5448. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5449. },
  5450. { /* MCP77 Ethernet Controller */
  5451. PCI_DEVICE(0x10DE, 0x0763),
  5452. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5453. },
  5454. { /* MCP79 Ethernet Controller */
  5455. PCI_DEVICE(0x10DE, 0x0AB0),
  5456. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5457. },
  5458. { /* MCP79 Ethernet Controller */
  5459. PCI_DEVICE(0x10DE, 0x0AB1),
  5460. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5461. },
  5462. { /* MCP79 Ethernet Controller */
  5463. PCI_DEVICE(0x10DE, 0x0AB2),
  5464. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5465. },
  5466. { /* MCP79 Ethernet Controller */
  5467. PCI_DEVICE(0x10DE, 0x0AB3),
  5468. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5469. },
  5470. { /* MCP89 Ethernet Controller */
  5471. PCI_DEVICE(0x10DE, 0x0D7D),
  5472. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
  5473. },
  5474. {0,},
  5475. };
  5476. static struct pci_driver driver = {
  5477. .name = DRV_NAME,
  5478. .id_table = pci_tbl,
  5479. .probe = nv_probe,
  5480. .remove = __devexit_p(nv_remove),
  5481. .suspend = nv_suspend,
  5482. .resume = nv_resume,
  5483. .shutdown = nv_shutdown,
  5484. };
  5485. static int __init init_nic(void)
  5486. {
  5487. return pci_register_driver(&driver);
  5488. }
  5489. static void __exit exit_nic(void)
  5490. {
  5491. pci_unregister_driver(&driver);
  5492. }
  5493. module_param(max_interrupt_work, int, 0);
  5494. MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
  5495. module_param(optimization_mode, int, 0);
  5496. MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
  5497. module_param(poll_interval, int, 0);
  5498. MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
  5499. module_param(msi, int, 0);
  5500. MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
  5501. module_param(msix, int, 0);
  5502. MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
  5503. module_param(dma_64bit, int, 0);
  5504. MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
  5505. module_param(phy_cross, int, 0);
  5506. MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
  5507. module_param(phy_power_down, int, 0);
  5508. MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
  5509. MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
  5510. MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
  5511. MODULE_LICENSE("GPL");
  5512. MODULE_DEVICE_TABLE(pci, pci_tbl);
  5513. module_init(init_nic);
  5514. module_exit(exit_nic);