forcedeth.c 192 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399
  1. /*
  2. * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
  3. *
  4. * Note: This driver is a cleanroom reimplementation based on reverse
  5. * engineered documentation written by Carl-Daniel Hailfinger
  6. * and Andrew de Quincey.
  7. *
  8. * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
  9. * trademarks of NVIDIA Corporation in the United States and other
  10. * countries.
  11. *
  12. * Copyright (C) 2003,4,5 Manfred Spraul
  13. * Copyright (C) 2004 Andrew de Quincey (wol support)
  14. * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
  15. * IRQ rate fixes, bigendian fixes, cleanups, verification)
  16. * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
  17. *
  18. * This program is free software; you can redistribute it and/or modify
  19. * it under the terms of the GNU General Public License as published by
  20. * the Free Software Foundation; either version 2 of the License, or
  21. * (at your option) any later version.
  22. *
  23. * This program is distributed in the hope that it will be useful,
  24. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  25. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  26. * GNU General Public License for more details.
  27. *
  28. * You should have received a copy of the GNU General Public License
  29. * along with this program; if not, write to the Free Software
  30. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  31. *
  32. * Known bugs:
  33. * We suspect that on some hardware no TX done interrupts are generated.
  34. * This means recovery from netif_stop_queue only happens if the hw timer
  35. * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
  36. * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
  37. * If your hardware reliably generates tx done interrupts, then you can remove
  38. * DEV_NEED_TIMERIRQ from the driver_data flags.
  39. * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  40. * superfluous timer interrupts from the nic.
  41. */
  42. #define FORCEDETH_VERSION "0.64"
  43. #define DRV_NAME "forcedeth"
  44. #include <linux/module.h>
  45. #include <linux/types.h>
  46. #include <linux/pci.h>
  47. #include <linux/interrupt.h>
  48. #include <linux/netdevice.h>
  49. #include <linux/etherdevice.h>
  50. #include <linux/delay.h>
  51. #include <linux/spinlock.h>
  52. #include <linux/ethtool.h>
  53. #include <linux/timer.h>
  54. #include <linux/skbuff.h>
  55. #include <linux/mii.h>
  56. #include <linux/random.h>
  57. #include <linux/init.h>
  58. #include <linux/if_vlan.h>
  59. #include <linux/dma-mapping.h>
  60. #include <asm/irq.h>
  61. #include <asm/io.h>
  62. #include <asm/uaccess.h>
  63. #include <asm/system.h>
  64. #if 0
  65. #define dprintk printk
  66. #else
  67. #define dprintk(x...) do { } while (0)
  68. #endif
  69. #define TX_WORK_PER_LOOP 64
  70. #define RX_WORK_PER_LOOP 64
  71. /*
  72. * Hardware access:
  73. */
  74. #define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */
  75. #define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */
  76. #define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */
  77. #define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */
  78. #define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */
  79. #define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */
  80. #define DEV_HAS_MSI 0x0000040 /* device supports MSI */
  81. #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
  82. #define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
  83. #define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
  84. #define DEV_HAS_STATISTICS_V2 0x0000600 /* device supports hw statistics version 2 */
  85. #define DEV_HAS_STATISTICS_V3 0x0000e00 /* device supports hw statistics version 3 */
  86. #define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
  87. #define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
  88. #define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
  89. #define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */
  90. #define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */
  91. #define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */
  92. #define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */
  93. #define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */
  94. #define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */
  95. #define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */
  96. #define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */
  97. #define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */
  98. #define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */
  99. enum {
  100. NvRegIrqStatus = 0x000,
  101. #define NVREG_IRQSTAT_MIIEVENT 0x040
  102. #define NVREG_IRQSTAT_MASK 0x83ff
  103. NvRegIrqMask = 0x004,
  104. #define NVREG_IRQ_RX_ERROR 0x0001
  105. #define NVREG_IRQ_RX 0x0002
  106. #define NVREG_IRQ_RX_NOBUF 0x0004
  107. #define NVREG_IRQ_TX_ERR 0x0008
  108. #define NVREG_IRQ_TX_OK 0x0010
  109. #define NVREG_IRQ_TIMER 0x0020
  110. #define NVREG_IRQ_LINK 0x0040
  111. #define NVREG_IRQ_RX_FORCED 0x0080
  112. #define NVREG_IRQ_TX_FORCED 0x0100
  113. #define NVREG_IRQ_RECOVER_ERROR 0x8200
  114. #define NVREG_IRQMASK_THROUGHPUT 0x00df
  115. #define NVREG_IRQMASK_CPU 0x0060
  116. #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
  117. #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
  118. #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
  119. NvRegUnknownSetupReg6 = 0x008,
  120. #define NVREG_UNKSETUP6_VAL 3
  121. /*
  122. * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
  123. * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
  124. */
  125. NvRegPollingInterval = 0x00c,
  126. #define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */
  127. #define NVREG_POLL_DEFAULT_CPU 13
  128. NvRegMSIMap0 = 0x020,
  129. NvRegMSIMap1 = 0x024,
  130. NvRegMSIIrqMask = 0x030,
  131. #define NVREG_MSI_VECTOR_0_ENABLED 0x01
  132. NvRegMisc1 = 0x080,
  133. #define NVREG_MISC1_PAUSE_TX 0x01
  134. #define NVREG_MISC1_HD 0x02
  135. #define NVREG_MISC1_FORCE 0x3b0f3c
  136. NvRegMacReset = 0x34,
  137. #define NVREG_MAC_RESET_ASSERT 0x0F3
  138. NvRegTransmitterControl = 0x084,
  139. #define NVREG_XMITCTL_START 0x01
  140. #define NVREG_XMITCTL_MGMT_ST 0x40000000
  141. #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
  142. #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
  143. #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
  144. #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
  145. #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
  146. #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
  147. #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
  148. #define NVREG_XMITCTL_HOST_LOADED 0x00004000
  149. #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
  150. #define NVREG_XMITCTL_DATA_START 0x00100000
  151. #define NVREG_XMITCTL_DATA_READY 0x00010000
  152. #define NVREG_XMITCTL_DATA_ERROR 0x00020000
  153. NvRegTransmitterStatus = 0x088,
  154. #define NVREG_XMITSTAT_BUSY 0x01
  155. NvRegPacketFilterFlags = 0x8c,
  156. #define NVREG_PFF_PAUSE_RX 0x08
  157. #define NVREG_PFF_ALWAYS 0x7F0000
  158. #define NVREG_PFF_PROMISC 0x80
  159. #define NVREG_PFF_MYADDR 0x20
  160. #define NVREG_PFF_LOOPBACK 0x10
  161. NvRegOffloadConfig = 0x90,
  162. #define NVREG_OFFLOAD_HOMEPHY 0x601
  163. #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
  164. NvRegReceiverControl = 0x094,
  165. #define NVREG_RCVCTL_START 0x01
  166. #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
  167. NvRegReceiverStatus = 0x98,
  168. #define NVREG_RCVSTAT_BUSY 0x01
  169. NvRegSlotTime = 0x9c,
  170. #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
  171. #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
  172. #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
  173. #define NVREG_SLOTTIME_HALF 0x0000ff00
  174. #define NVREG_SLOTTIME_DEFAULT 0x00007f00
  175. #define NVREG_SLOTTIME_MASK 0x000000ff
  176. NvRegTxDeferral = 0xA0,
  177. #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
  178. #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
  179. #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
  180. #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
  181. #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
  182. #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
  183. NvRegRxDeferral = 0xA4,
  184. #define NVREG_RX_DEFERRAL_DEFAULT 0x16
  185. NvRegMacAddrA = 0xA8,
  186. NvRegMacAddrB = 0xAC,
  187. NvRegMulticastAddrA = 0xB0,
  188. #define NVREG_MCASTADDRA_FORCE 0x01
  189. NvRegMulticastAddrB = 0xB4,
  190. NvRegMulticastMaskA = 0xB8,
  191. #define NVREG_MCASTMASKA_NONE 0xffffffff
  192. NvRegMulticastMaskB = 0xBC,
  193. #define NVREG_MCASTMASKB_NONE 0xffff
  194. NvRegPhyInterface = 0xC0,
  195. #define PHY_RGMII 0x10000000
  196. NvRegBackOffControl = 0xC4,
  197. #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
  198. #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
  199. #define NVREG_BKOFFCTRL_SELECT 24
  200. #define NVREG_BKOFFCTRL_GEAR 12
  201. NvRegTxRingPhysAddr = 0x100,
  202. NvRegRxRingPhysAddr = 0x104,
  203. NvRegRingSizes = 0x108,
  204. #define NVREG_RINGSZ_TXSHIFT 0
  205. #define NVREG_RINGSZ_RXSHIFT 16
  206. NvRegTransmitPoll = 0x10c,
  207. #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
  208. NvRegLinkSpeed = 0x110,
  209. #define NVREG_LINKSPEED_FORCE 0x10000
  210. #define NVREG_LINKSPEED_10 1000
  211. #define NVREG_LINKSPEED_100 100
  212. #define NVREG_LINKSPEED_1000 50
  213. #define NVREG_LINKSPEED_MASK (0xFFF)
  214. NvRegUnknownSetupReg5 = 0x130,
  215. #define NVREG_UNKSETUP5_BIT31 (1<<31)
  216. NvRegTxWatermark = 0x13c,
  217. #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
  218. #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
  219. #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
  220. NvRegTxRxControl = 0x144,
  221. #define NVREG_TXRXCTL_KICK 0x0001
  222. #define NVREG_TXRXCTL_BIT1 0x0002
  223. #define NVREG_TXRXCTL_BIT2 0x0004
  224. #define NVREG_TXRXCTL_IDLE 0x0008
  225. #define NVREG_TXRXCTL_RESET 0x0010
  226. #define NVREG_TXRXCTL_RXCHECK 0x0400
  227. #define NVREG_TXRXCTL_DESC_1 0
  228. #define NVREG_TXRXCTL_DESC_2 0x002100
  229. #define NVREG_TXRXCTL_DESC_3 0xc02200
  230. #define NVREG_TXRXCTL_VLANSTRIP 0x00040
  231. #define NVREG_TXRXCTL_VLANINS 0x00080
  232. NvRegTxRingPhysAddrHigh = 0x148,
  233. NvRegRxRingPhysAddrHigh = 0x14C,
  234. NvRegTxPauseFrame = 0x170,
  235. #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
  236. #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
  237. #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
  238. #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
  239. NvRegTxPauseFrameLimit = 0x174,
  240. #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
  241. NvRegMIIStatus = 0x180,
  242. #define NVREG_MIISTAT_ERROR 0x0001
  243. #define NVREG_MIISTAT_LINKCHANGE 0x0008
  244. #define NVREG_MIISTAT_MASK_RW 0x0007
  245. #define NVREG_MIISTAT_MASK_ALL 0x000f
  246. NvRegMIIMask = 0x184,
  247. #define NVREG_MII_LINKCHANGE 0x0008
  248. NvRegAdapterControl = 0x188,
  249. #define NVREG_ADAPTCTL_START 0x02
  250. #define NVREG_ADAPTCTL_LINKUP 0x04
  251. #define NVREG_ADAPTCTL_PHYVALID 0x40000
  252. #define NVREG_ADAPTCTL_RUNNING 0x100000
  253. #define NVREG_ADAPTCTL_PHYSHIFT 24
  254. NvRegMIISpeed = 0x18c,
  255. #define NVREG_MIISPEED_BIT8 (1<<8)
  256. #define NVREG_MIIDELAY 5
  257. NvRegMIIControl = 0x190,
  258. #define NVREG_MIICTL_INUSE 0x08000
  259. #define NVREG_MIICTL_WRITE 0x00400
  260. #define NVREG_MIICTL_ADDRSHIFT 5
  261. NvRegMIIData = 0x194,
  262. NvRegTxUnicast = 0x1a0,
  263. NvRegTxMulticast = 0x1a4,
  264. NvRegTxBroadcast = 0x1a8,
  265. NvRegWakeUpFlags = 0x200,
  266. #define NVREG_WAKEUPFLAGS_VAL 0x7770
  267. #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
  268. #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
  269. #define NVREG_WAKEUPFLAGS_D3SHIFT 12
  270. #define NVREG_WAKEUPFLAGS_D2SHIFT 8
  271. #define NVREG_WAKEUPFLAGS_D1SHIFT 4
  272. #define NVREG_WAKEUPFLAGS_D0SHIFT 0
  273. #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
  274. #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
  275. #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
  276. #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
  277. NvRegMgmtUnitGetVersion = 0x204,
  278. #define NVREG_MGMTUNITGETVERSION 0x01
  279. NvRegMgmtUnitVersion = 0x208,
  280. #define NVREG_MGMTUNITVERSION 0x08
  281. NvRegPowerCap = 0x268,
  282. #define NVREG_POWERCAP_D3SUPP (1<<30)
  283. #define NVREG_POWERCAP_D2SUPP (1<<26)
  284. #define NVREG_POWERCAP_D1SUPP (1<<25)
  285. NvRegPowerState = 0x26c,
  286. #define NVREG_POWERSTATE_POWEREDUP 0x8000
  287. #define NVREG_POWERSTATE_VALID 0x0100
  288. #define NVREG_POWERSTATE_MASK 0x0003
  289. #define NVREG_POWERSTATE_D0 0x0000
  290. #define NVREG_POWERSTATE_D1 0x0001
  291. #define NVREG_POWERSTATE_D2 0x0002
  292. #define NVREG_POWERSTATE_D3 0x0003
  293. NvRegMgmtUnitControl = 0x278,
  294. #define NVREG_MGMTUNITCONTROL_INUSE 0x20000
  295. NvRegTxCnt = 0x280,
  296. NvRegTxZeroReXmt = 0x284,
  297. NvRegTxOneReXmt = 0x288,
  298. NvRegTxManyReXmt = 0x28c,
  299. NvRegTxLateCol = 0x290,
  300. NvRegTxUnderflow = 0x294,
  301. NvRegTxLossCarrier = 0x298,
  302. NvRegTxExcessDef = 0x29c,
  303. NvRegTxRetryErr = 0x2a0,
  304. NvRegRxFrameErr = 0x2a4,
  305. NvRegRxExtraByte = 0x2a8,
  306. NvRegRxLateCol = 0x2ac,
  307. NvRegRxRunt = 0x2b0,
  308. NvRegRxFrameTooLong = 0x2b4,
  309. NvRegRxOverflow = 0x2b8,
  310. NvRegRxFCSErr = 0x2bc,
  311. NvRegRxFrameAlignErr = 0x2c0,
  312. NvRegRxLenErr = 0x2c4,
  313. NvRegRxUnicast = 0x2c8,
  314. NvRegRxMulticast = 0x2cc,
  315. NvRegRxBroadcast = 0x2d0,
  316. NvRegTxDef = 0x2d4,
  317. NvRegTxFrame = 0x2d8,
  318. NvRegRxCnt = 0x2dc,
  319. NvRegTxPause = 0x2e0,
  320. NvRegRxPause = 0x2e4,
  321. NvRegRxDropFrame = 0x2e8,
  322. NvRegVlanControl = 0x300,
  323. #define NVREG_VLANCONTROL_ENABLE 0x2000
  324. NvRegMSIXMap0 = 0x3e0,
  325. NvRegMSIXMap1 = 0x3e4,
  326. NvRegMSIXIrqStatus = 0x3f0,
  327. NvRegPowerState2 = 0x600,
  328. #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
  329. #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
  330. #define NVREG_POWERSTATE2_PHY_RESET 0x0004
  331. #define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00
  332. };
  333. /* Big endian: should work, but is untested */
  334. struct ring_desc {
  335. __le32 buf;
  336. __le32 flaglen;
  337. };
  338. struct ring_desc_ex {
  339. __le32 bufhigh;
  340. __le32 buflow;
  341. __le32 txvlan;
  342. __le32 flaglen;
  343. };
  344. union ring_type {
  345. struct ring_desc* orig;
  346. struct ring_desc_ex* ex;
  347. };
  348. #define FLAG_MASK_V1 0xffff0000
  349. #define FLAG_MASK_V2 0xffffc000
  350. #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
  351. #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
  352. #define NV_TX_LASTPACKET (1<<16)
  353. #define NV_TX_RETRYERROR (1<<19)
  354. #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
  355. #define NV_TX_FORCED_INTERRUPT (1<<24)
  356. #define NV_TX_DEFERRED (1<<26)
  357. #define NV_TX_CARRIERLOST (1<<27)
  358. #define NV_TX_LATECOLLISION (1<<28)
  359. #define NV_TX_UNDERFLOW (1<<29)
  360. #define NV_TX_ERROR (1<<30)
  361. #define NV_TX_VALID (1<<31)
  362. #define NV_TX2_LASTPACKET (1<<29)
  363. #define NV_TX2_RETRYERROR (1<<18)
  364. #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
  365. #define NV_TX2_FORCED_INTERRUPT (1<<30)
  366. #define NV_TX2_DEFERRED (1<<25)
  367. #define NV_TX2_CARRIERLOST (1<<26)
  368. #define NV_TX2_LATECOLLISION (1<<27)
  369. #define NV_TX2_UNDERFLOW (1<<28)
  370. /* error and valid are the same for both */
  371. #define NV_TX2_ERROR (1<<30)
  372. #define NV_TX2_VALID (1<<31)
  373. #define NV_TX2_TSO (1<<28)
  374. #define NV_TX2_TSO_SHIFT 14
  375. #define NV_TX2_TSO_MAX_SHIFT 14
  376. #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
  377. #define NV_TX2_CHECKSUM_L3 (1<<27)
  378. #define NV_TX2_CHECKSUM_L4 (1<<26)
  379. #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
  380. #define NV_RX_DESCRIPTORVALID (1<<16)
  381. #define NV_RX_MISSEDFRAME (1<<17)
  382. #define NV_RX_SUBSTRACT1 (1<<18)
  383. #define NV_RX_ERROR1 (1<<23)
  384. #define NV_RX_ERROR2 (1<<24)
  385. #define NV_RX_ERROR3 (1<<25)
  386. #define NV_RX_ERROR4 (1<<26)
  387. #define NV_RX_CRCERR (1<<27)
  388. #define NV_RX_OVERFLOW (1<<28)
  389. #define NV_RX_FRAMINGERR (1<<29)
  390. #define NV_RX_ERROR (1<<30)
  391. #define NV_RX_AVAIL (1<<31)
  392. #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
  393. #define NV_RX2_CHECKSUMMASK (0x1C000000)
  394. #define NV_RX2_CHECKSUM_IP (0x10000000)
  395. #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
  396. #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
  397. #define NV_RX2_DESCRIPTORVALID (1<<29)
  398. #define NV_RX2_SUBSTRACT1 (1<<25)
  399. #define NV_RX2_ERROR1 (1<<18)
  400. #define NV_RX2_ERROR2 (1<<19)
  401. #define NV_RX2_ERROR3 (1<<20)
  402. #define NV_RX2_ERROR4 (1<<21)
  403. #define NV_RX2_CRCERR (1<<22)
  404. #define NV_RX2_OVERFLOW (1<<23)
  405. #define NV_RX2_FRAMINGERR (1<<24)
  406. /* error and avail are the same for both */
  407. #define NV_RX2_ERROR (1<<30)
  408. #define NV_RX2_AVAIL (1<<31)
  409. #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
  410. #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
  411. #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
  412. /* Miscelaneous hardware related defines: */
  413. #define NV_PCI_REGSZ_VER1 0x270
  414. #define NV_PCI_REGSZ_VER2 0x2d4
  415. #define NV_PCI_REGSZ_VER3 0x604
  416. #define NV_PCI_REGSZ_MAX 0x604
  417. /* various timeout delays: all in usec */
  418. #define NV_TXRX_RESET_DELAY 4
  419. #define NV_TXSTOP_DELAY1 10
  420. #define NV_TXSTOP_DELAY1MAX 500000
  421. #define NV_TXSTOP_DELAY2 100
  422. #define NV_RXSTOP_DELAY1 10
  423. #define NV_RXSTOP_DELAY1MAX 500000
  424. #define NV_RXSTOP_DELAY2 100
  425. #define NV_SETUP5_DELAY 5
  426. #define NV_SETUP5_DELAYMAX 50000
  427. #define NV_POWERUP_DELAY 5
  428. #define NV_POWERUP_DELAYMAX 5000
  429. #define NV_MIIBUSY_DELAY 50
  430. #define NV_MIIPHY_DELAY 10
  431. #define NV_MIIPHY_DELAYMAX 10000
  432. #define NV_MAC_RESET_DELAY 64
  433. #define NV_WAKEUPPATTERNS 5
  434. #define NV_WAKEUPMASKENTRIES 4
  435. /* General driver defaults */
  436. #define NV_WATCHDOG_TIMEO (5*HZ)
  437. #define RX_RING_DEFAULT 512
  438. #define TX_RING_DEFAULT 256
  439. #define RX_RING_MIN 128
  440. #define TX_RING_MIN 64
  441. #define RING_MAX_DESC_VER_1 1024
  442. #define RING_MAX_DESC_VER_2_3 16384
  443. /* rx/tx mac addr + type + vlan + align + slack*/
  444. #define NV_RX_HEADERS (64)
  445. /* even more slack. */
  446. #define NV_RX_ALLOC_PAD (64)
  447. /* maximum mtu size */
  448. #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
  449. #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
  450. #define OOM_REFILL (1+HZ/20)
  451. #define POLL_WAIT (1+HZ/100)
  452. #define LINK_TIMEOUT (3*HZ)
  453. #define STATS_INTERVAL (10*HZ)
  454. /*
  455. * desc_ver values:
  456. * The nic supports three different descriptor types:
  457. * - DESC_VER_1: Original
  458. * - DESC_VER_2: support for jumbo frames.
  459. * - DESC_VER_3: 64-bit format.
  460. */
  461. #define DESC_VER_1 1
  462. #define DESC_VER_2 2
  463. #define DESC_VER_3 3
  464. /* PHY defines */
  465. #define PHY_OUI_MARVELL 0x5043
  466. #define PHY_OUI_CICADA 0x03f1
  467. #define PHY_OUI_VITESSE 0x01c1
  468. #define PHY_OUI_REALTEK 0x0732
  469. #define PHY_OUI_REALTEK2 0x0020
  470. #define PHYID1_OUI_MASK 0x03ff
  471. #define PHYID1_OUI_SHFT 6
  472. #define PHYID2_OUI_MASK 0xfc00
  473. #define PHYID2_OUI_SHFT 10
  474. #define PHYID2_MODEL_MASK 0x03f0
  475. #define PHY_MODEL_REALTEK_8211 0x0110
  476. #define PHY_REV_MASK 0x0001
  477. #define PHY_REV_REALTEK_8211B 0x0000
  478. #define PHY_REV_REALTEK_8211C 0x0001
  479. #define PHY_MODEL_REALTEK_8201 0x0200
  480. #define PHY_MODEL_MARVELL_E3016 0x0220
  481. #define PHY_MARVELL_E3016_INITMASK 0x0300
  482. #define PHY_CICADA_INIT1 0x0f000
  483. #define PHY_CICADA_INIT2 0x0e00
  484. #define PHY_CICADA_INIT3 0x01000
  485. #define PHY_CICADA_INIT4 0x0200
  486. #define PHY_CICADA_INIT5 0x0004
  487. #define PHY_CICADA_INIT6 0x02000
  488. #define PHY_VITESSE_INIT_REG1 0x1f
  489. #define PHY_VITESSE_INIT_REG2 0x10
  490. #define PHY_VITESSE_INIT_REG3 0x11
  491. #define PHY_VITESSE_INIT_REG4 0x12
  492. #define PHY_VITESSE_INIT_MSK1 0xc
  493. #define PHY_VITESSE_INIT_MSK2 0x0180
  494. #define PHY_VITESSE_INIT1 0x52b5
  495. #define PHY_VITESSE_INIT2 0xaf8a
  496. #define PHY_VITESSE_INIT3 0x8
  497. #define PHY_VITESSE_INIT4 0x8f8a
  498. #define PHY_VITESSE_INIT5 0xaf86
  499. #define PHY_VITESSE_INIT6 0x8f86
  500. #define PHY_VITESSE_INIT7 0xaf82
  501. #define PHY_VITESSE_INIT8 0x0100
  502. #define PHY_VITESSE_INIT9 0x8f82
  503. #define PHY_VITESSE_INIT10 0x0
  504. #define PHY_REALTEK_INIT_REG1 0x1f
  505. #define PHY_REALTEK_INIT_REG2 0x19
  506. #define PHY_REALTEK_INIT_REG3 0x13
  507. #define PHY_REALTEK_INIT_REG4 0x14
  508. #define PHY_REALTEK_INIT_REG5 0x18
  509. #define PHY_REALTEK_INIT_REG6 0x11
  510. #define PHY_REALTEK_INIT_REG7 0x01
  511. #define PHY_REALTEK_INIT1 0x0000
  512. #define PHY_REALTEK_INIT2 0x8e00
  513. #define PHY_REALTEK_INIT3 0x0001
  514. #define PHY_REALTEK_INIT4 0xad17
  515. #define PHY_REALTEK_INIT5 0xfb54
  516. #define PHY_REALTEK_INIT6 0xf5c7
  517. #define PHY_REALTEK_INIT7 0x1000
  518. #define PHY_REALTEK_INIT8 0x0003
  519. #define PHY_REALTEK_INIT9 0x0008
  520. #define PHY_REALTEK_INIT10 0x0005
  521. #define PHY_REALTEK_INIT11 0x0200
  522. #define PHY_REALTEK_INIT_MSK1 0x0003
  523. #define PHY_GIGABIT 0x0100
  524. #define PHY_TIMEOUT 0x1
  525. #define PHY_ERROR 0x2
  526. #define PHY_100 0x1
  527. #define PHY_1000 0x2
  528. #define PHY_HALF 0x100
  529. #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
  530. #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
  531. #define NV_PAUSEFRAME_RX_ENABLE 0x0004
  532. #define NV_PAUSEFRAME_TX_ENABLE 0x0008
  533. #define NV_PAUSEFRAME_RX_REQ 0x0010
  534. #define NV_PAUSEFRAME_TX_REQ 0x0020
  535. #define NV_PAUSEFRAME_AUTONEG 0x0040
  536. /* MSI/MSI-X defines */
  537. #define NV_MSI_X_MAX_VECTORS 8
  538. #define NV_MSI_X_VECTORS_MASK 0x000f
  539. #define NV_MSI_CAPABLE 0x0010
  540. #define NV_MSI_X_CAPABLE 0x0020
  541. #define NV_MSI_ENABLED 0x0040
  542. #define NV_MSI_X_ENABLED 0x0080
  543. #define NV_MSI_X_VECTOR_ALL 0x0
  544. #define NV_MSI_X_VECTOR_RX 0x0
  545. #define NV_MSI_X_VECTOR_TX 0x1
  546. #define NV_MSI_X_VECTOR_OTHER 0x2
  547. #define NV_MSI_PRIV_OFFSET 0x68
  548. #define NV_MSI_PRIV_VALUE 0xffffffff
  549. #define NV_RESTART_TX 0x1
  550. #define NV_RESTART_RX 0x2
  551. #define NV_TX_LIMIT_COUNT 16
  552. #define NV_DYNAMIC_THRESHOLD 4
  553. #define NV_DYNAMIC_MAX_QUIET_COUNT 2048
  554. /* statistics */
  555. struct nv_ethtool_str {
  556. char name[ETH_GSTRING_LEN];
  557. };
  558. static const struct nv_ethtool_str nv_estats_str[] = {
  559. { "tx_bytes" },
  560. { "tx_zero_rexmt" },
  561. { "tx_one_rexmt" },
  562. { "tx_many_rexmt" },
  563. { "tx_late_collision" },
  564. { "tx_fifo_errors" },
  565. { "tx_carrier_errors" },
  566. { "tx_excess_deferral" },
  567. { "tx_retry_error" },
  568. { "rx_frame_error" },
  569. { "rx_extra_byte" },
  570. { "rx_late_collision" },
  571. { "rx_runt" },
  572. { "rx_frame_too_long" },
  573. { "rx_over_errors" },
  574. { "rx_crc_errors" },
  575. { "rx_frame_align_error" },
  576. { "rx_length_error" },
  577. { "rx_unicast" },
  578. { "rx_multicast" },
  579. { "rx_broadcast" },
  580. { "rx_packets" },
  581. { "rx_errors_total" },
  582. { "tx_errors_total" },
  583. /* version 2 stats */
  584. { "tx_deferral" },
  585. { "tx_packets" },
  586. { "rx_bytes" },
  587. { "tx_pause" },
  588. { "rx_pause" },
  589. { "rx_drop_frame" },
  590. /* version 3 stats */
  591. { "tx_unicast" },
  592. { "tx_multicast" },
  593. { "tx_broadcast" }
  594. };
  595. struct nv_ethtool_stats {
  596. u64 tx_bytes;
  597. u64 tx_zero_rexmt;
  598. u64 tx_one_rexmt;
  599. u64 tx_many_rexmt;
  600. u64 tx_late_collision;
  601. u64 tx_fifo_errors;
  602. u64 tx_carrier_errors;
  603. u64 tx_excess_deferral;
  604. u64 tx_retry_error;
  605. u64 rx_frame_error;
  606. u64 rx_extra_byte;
  607. u64 rx_late_collision;
  608. u64 rx_runt;
  609. u64 rx_frame_too_long;
  610. u64 rx_over_errors;
  611. u64 rx_crc_errors;
  612. u64 rx_frame_align_error;
  613. u64 rx_length_error;
  614. u64 rx_unicast;
  615. u64 rx_multicast;
  616. u64 rx_broadcast;
  617. u64 rx_packets;
  618. u64 rx_errors_total;
  619. u64 tx_errors_total;
  620. /* version 2 stats */
  621. u64 tx_deferral;
  622. u64 tx_packets;
  623. u64 rx_bytes;
  624. u64 tx_pause;
  625. u64 rx_pause;
  626. u64 rx_drop_frame;
  627. /* version 3 stats */
  628. u64 tx_unicast;
  629. u64 tx_multicast;
  630. u64 tx_broadcast;
  631. };
  632. #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
  633. #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
  634. #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
  635. /* diagnostics */
  636. #define NV_TEST_COUNT_BASE 3
  637. #define NV_TEST_COUNT_EXTENDED 4
  638. static const struct nv_ethtool_str nv_etests_str[] = {
  639. { "link (online/offline)" },
  640. { "register (offline) " },
  641. { "interrupt (offline) " },
  642. { "loopback (offline) " }
  643. };
  644. struct register_test {
  645. __u32 reg;
  646. __u32 mask;
  647. };
  648. static const struct register_test nv_registers_test[] = {
  649. { NvRegUnknownSetupReg6, 0x01 },
  650. { NvRegMisc1, 0x03c },
  651. { NvRegOffloadConfig, 0x03ff },
  652. { NvRegMulticastAddrA, 0xffffffff },
  653. { NvRegTxWatermark, 0x0ff },
  654. { NvRegWakeUpFlags, 0x07777 },
  655. { 0,0 }
  656. };
  657. struct nv_skb_map {
  658. struct sk_buff *skb;
  659. dma_addr_t dma;
  660. unsigned int dma_len;
  661. struct ring_desc_ex *first_tx_desc;
  662. struct nv_skb_map *next_tx_ctx;
  663. };
  664. /*
  665. * SMP locking:
  666. * All hardware access under netdev_priv(dev)->lock, except the performance
  667. * critical parts:
  668. * - rx is (pseudo-) lockless: it relies on the single-threading provided
  669. * by the arch code for interrupts.
  670. * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  671. * needs netdev_priv(dev)->lock :-(
  672. * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
  673. */
  674. /* in dev: base, irq */
  675. struct fe_priv {
  676. spinlock_t lock;
  677. struct net_device *dev;
  678. struct napi_struct napi;
  679. /* General data:
  680. * Locking: spin_lock(&np->lock); */
  681. struct nv_ethtool_stats estats;
  682. int in_shutdown;
  683. u32 linkspeed;
  684. int duplex;
  685. int autoneg;
  686. int fixed_mode;
  687. int phyaddr;
  688. int wolenabled;
  689. unsigned int phy_oui;
  690. unsigned int phy_model;
  691. unsigned int phy_rev;
  692. u16 gigabit;
  693. int intr_test;
  694. int recover_error;
  695. int quiet_count;
  696. /* General data: RO fields */
  697. dma_addr_t ring_addr;
  698. struct pci_dev *pci_dev;
  699. u32 orig_mac[2];
  700. u32 events;
  701. u32 irqmask;
  702. u32 desc_ver;
  703. u32 txrxctl_bits;
  704. u32 vlanctl_bits;
  705. u32 driver_data;
  706. u32 device_id;
  707. u32 register_size;
  708. int rx_csum;
  709. u32 mac_in_use;
  710. int mgmt_version;
  711. int mgmt_sema;
  712. void __iomem *base;
  713. /* rx specific fields.
  714. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
  715. */
  716. union ring_type get_rx, put_rx, first_rx, last_rx;
  717. struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
  718. struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
  719. struct nv_skb_map *rx_skb;
  720. union ring_type rx_ring;
  721. unsigned int rx_buf_sz;
  722. unsigned int pkt_limit;
  723. struct timer_list oom_kick;
  724. struct timer_list nic_poll;
  725. struct timer_list stats_poll;
  726. u32 nic_poll_irq;
  727. int rx_ring_size;
  728. /* media detection workaround.
  729. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
  730. */
  731. int need_linktimer;
  732. unsigned long link_timeout;
  733. /*
  734. * tx specific fields.
  735. */
  736. union ring_type get_tx, put_tx, first_tx, last_tx;
  737. struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
  738. struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
  739. struct nv_skb_map *tx_skb;
  740. union ring_type tx_ring;
  741. u32 tx_flags;
  742. int tx_ring_size;
  743. int tx_limit;
  744. u32 tx_pkts_in_progress;
  745. struct nv_skb_map *tx_change_owner;
  746. struct nv_skb_map *tx_end_flip;
  747. int tx_stop;
  748. /* vlan fields */
  749. struct vlan_group *vlangrp;
  750. /* msi/msi-x fields */
  751. u32 msi_flags;
  752. struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
  753. /* flow control */
  754. u32 pause_flags;
  755. /* power saved state */
  756. u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
  757. /* for different msi-x irq type */
  758. char name_rx[IFNAMSIZ + 3]; /* -rx */
  759. char name_tx[IFNAMSIZ + 3]; /* -tx */
  760. char name_other[IFNAMSIZ + 6]; /* -other */
  761. };
  762. /*
  763. * Maximum number of loops until we assume that a bit in the irq mask
  764. * is stuck. Overridable with module param.
  765. */
  766. static int max_interrupt_work = 4;
  767. /*
  768. * Optimization can be either throuput mode or cpu mode
  769. *
  770. * Throughput Mode: Every tx and rx packet will generate an interrupt.
  771. * CPU Mode: Interrupts are controlled by a timer.
  772. */
  773. enum {
  774. NV_OPTIMIZATION_MODE_THROUGHPUT,
  775. NV_OPTIMIZATION_MODE_CPU,
  776. NV_OPTIMIZATION_MODE_DYNAMIC
  777. };
  778. static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
  779. /*
  780. * Poll interval for timer irq
  781. *
  782. * This interval determines how frequent an interrupt is generated.
  783. * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
  784. * Min = 0, and Max = 65535
  785. */
  786. static int poll_interval = -1;
  787. /*
  788. * MSI interrupts
  789. */
  790. enum {
  791. NV_MSI_INT_DISABLED,
  792. NV_MSI_INT_ENABLED
  793. };
  794. static int msi = NV_MSI_INT_ENABLED;
  795. /*
  796. * MSIX interrupts
  797. */
  798. enum {
  799. NV_MSIX_INT_DISABLED,
  800. NV_MSIX_INT_ENABLED
  801. };
  802. static int msix = NV_MSIX_INT_ENABLED;
  803. /*
  804. * DMA 64bit
  805. */
  806. enum {
  807. NV_DMA_64BIT_DISABLED,
  808. NV_DMA_64BIT_ENABLED
  809. };
  810. static int dma_64bit = NV_DMA_64BIT_ENABLED;
  811. /*
  812. * Crossover Detection
  813. * Realtek 8201 phy + some OEM boards do not work properly.
  814. */
  815. enum {
  816. NV_CROSSOVER_DETECTION_DISABLED,
  817. NV_CROSSOVER_DETECTION_ENABLED
  818. };
  819. static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
  820. /*
  821. * Power down phy when interface is down (persists through reboot;
  822. * older Linux and other OSes may not power it up again)
  823. */
  824. static int phy_power_down = 0;
  825. static inline struct fe_priv *get_nvpriv(struct net_device *dev)
  826. {
  827. return netdev_priv(dev);
  828. }
  829. static inline u8 __iomem *get_hwbase(struct net_device *dev)
  830. {
  831. return ((struct fe_priv *)netdev_priv(dev))->base;
  832. }
  833. static inline void pci_push(u8 __iomem *base)
  834. {
  835. /* force out pending posted writes */
  836. readl(base);
  837. }
  838. static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
  839. {
  840. return le32_to_cpu(prd->flaglen)
  841. & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
  842. }
  843. static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
  844. {
  845. return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
  846. }
  847. static bool nv_optimized(struct fe_priv *np)
  848. {
  849. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  850. return false;
  851. return true;
  852. }
  853. static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
  854. int delay, int delaymax, const char *msg)
  855. {
  856. u8 __iomem *base = get_hwbase(dev);
  857. pci_push(base);
  858. do {
  859. udelay(delay);
  860. delaymax -= delay;
  861. if (delaymax < 0) {
  862. if (msg)
  863. printk("%s", msg);
  864. return 1;
  865. }
  866. } while ((readl(base + offset) & mask) != target);
  867. return 0;
  868. }
  869. #define NV_SETUP_RX_RING 0x01
  870. #define NV_SETUP_TX_RING 0x02
  871. static inline u32 dma_low(dma_addr_t addr)
  872. {
  873. return addr;
  874. }
  875. static inline u32 dma_high(dma_addr_t addr)
  876. {
  877. return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
  878. }
  879. static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
  880. {
  881. struct fe_priv *np = get_nvpriv(dev);
  882. u8 __iomem *base = get_hwbase(dev);
  883. if (!nv_optimized(np)) {
  884. if (rxtx_flags & NV_SETUP_RX_RING) {
  885. writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
  886. }
  887. if (rxtx_flags & NV_SETUP_TX_RING) {
  888. writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
  889. }
  890. } else {
  891. if (rxtx_flags & NV_SETUP_RX_RING) {
  892. writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
  893. writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
  894. }
  895. if (rxtx_flags & NV_SETUP_TX_RING) {
  896. writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
  897. writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
  898. }
  899. }
  900. }
  901. static void free_rings(struct net_device *dev)
  902. {
  903. struct fe_priv *np = get_nvpriv(dev);
  904. if (!nv_optimized(np)) {
  905. if (np->rx_ring.orig)
  906. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
  907. np->rx_ring.orig, np->ring_addr);
  908. } else {
  909. if (np->rx_ring.ex)
  910. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
  911. np->rx_ring.ex, np->ring_addr);
  912. }
  913. if (np->rx_skb)
  914. kfree(np->rx_skb);
  915. if (np->tx_skb)
  916. kfree(np->tx_skb);
  917. }
  918. static int using_multi_irqs(struct net_device *dev)
  919. {
  920. struct fe_priv *np = get_nvpriv(dev);
  921. if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
  922. ((np->msi_flags & NV_MSI_X_ENABLED) &&
  923. ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
  924. return 0;
  925. else
  926. return 1;
  927. }
  928. static void nv_txrx_gate(struct net_device *dev, bool gate)
  929. {
  930. struct fe_priv *np = get_nvpriv(dev);
  931. u8 __iomem *base = get_hwbase(dev);
  932. u32 powerstate;
  933. if (!np->mac_in_use &&
  934. (np->driver_data & DEV_HAS_POWER_CNTRL)) {
  935. powerstate = readl(base + NvRegPowerState2);
  936. if (gate)
  937. powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
  938. else
  939. powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
  940. writel(powerstate, base + NvRegPowerState2);
  941. }
  942. }
  943. static void nv_enable_irq(struct net_device *dev)
  944. {
  945. struct fe_priv *np = get_nvpriv(dev);
  946. if (!using_multi_irqs(dev)) {
  947. if (np->msi_flags & NV_MSI_X_ENABLED)
  948. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  949. else
  950. enable_irq(np->pci_dev->irq);
  951. } else {
  952. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  953. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  954. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  955. }
  956. }
  957. static void nv_disable_irq(struct net_device *dev)
  958. {
  959. struct fe_priv *np = get_nvpriv(dev);
  960. if (!using_multi_irqs(dev)) {
  961. if (np->msi_flags & NV_MSI_X_ENABLED)
  962. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  963. else
  964. disable_irq(np->pci_dev->irq);
  965. } else {
  966. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  967. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  968. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  969. }
  970. }
  971. /* In MSIX mode, a write to irqmask behaves as XOR */
  972. static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
  973. {
  974. u8 __iomem *base = get_hwbase(dev);
  975. writel(mask, base + NvRegIrqMask);
  976. }
  977. static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
  978. {
  979. struct fe_priv *np = get_nvpriv(dev);
  980. u8 __iomem *base = get_hwbase(dev);
  981. if (np->msi_flags & NV_MSI_X_ENABLED) {
  982. writel(mask, base + NvRegIrqMask);
  983. } else {
  984. if (np->msi_flags & NV_MSI_ENABLED)
  985. writel(0, base + NvRegMSIIrqMask);
  986. writel(0, base + NvRegIrqMask);
  987. }
  988. }
  989. static void nv_napi_enable(struct net_device *dev)
  990. {
  991. #ifdef CONFIG_FORCEDETH_NAPI
  992. struct fe_priv *np = get_nvpriv(dev);
  993. napi_enable(&np->napi);
  994. #endif
  995. }
  996. static void nv_napi_disable(struct net_device *dev)
  997. {
  998. #ifdef CONFIG_FORCEDETH_NAPI
  999. struct fe_priv *np = get_nvpriv(dev);
  1000. napi_disable(&np->napi);
  1001. #endif
  1002. }
  1003. #define MII_READ (-1)
  1004. /* mii_rw: read/write a register on the PHY.
  1005. *
  1006. * Caller must guarantee serialization
  1007. */
  1008. static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
  1009. {
  1010. u8 __iomem *base = get_hwbase(dev);
  1011. u32 reg;
  1012. int retval;
  1013. writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
  1014. reg = readl(base + NvRegMIIControl);
  1015. if (reg & NVREG_MIICTL_INUSE) {
  1016. writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
  1017. udelay(NV_MIIBUSY_DELAY);
  1018. }
  1019. reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
  1020. if (value != MII_READ) {
  1021. writel(value, base + NvRegMIIData);
  1022. reg |= NVREG_MIICTL_WRITE;
  1023. }
  1024. writel(reg, base + NvRegMIIControl);
  1025. if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
  1026. NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
  1027. dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
  1028. dev->name, miireg, addr);
  1029. retval = -1;
  1030. } else if (value != MII_READ) {
  1031. /* it was a write operation - fewer failures are detectable */
  1032. dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
  1033. dev->name, value, miireg, addr);
  1034. retval = 0;
  1035. } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
  1036. dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
  1037. dev->name, miireg, addr);
  1038. retval = -1;
  1039. } else {
  1040. retval = readl(base + NvRegMIIData);
  1041. dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
  1042. dev->name, miireg, addr, retval);
  1043. }
  1044. return retval;
  1045. }
  1046. static int phy_reset(struct net_device *dev, u32 bmcr_setup)
  1047. {
  1048. struct fe_priv *np = netdev_priv(dev);
  1049. u32 miicontrol;
  1050. unsigned int tries = 0;
  1051. miicontrol = BMCR_RESET | bmcr_setup;
  1052. if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
  1053. return -1;
  1054. }
  1055. /* wait for 500ms */
  1056. msleep(500);
  1057. /* must wait till reset is deasserted */
  1058. while (miicontrol & BMCR_RESET) {
  1059. msleep(10);
  1060. miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1061. /* FIXME: 100 tries seem excessive */
  1062. if (tries++ > 100)
  1063. return -1;
  1064. }
  1065. return 0;
  1066. }
  1067. static int phy_init(struct net_device *dev)
  1068. {
  1069. struct fe_priv *np = get_nvpriv(dev);
  1070. u8 __iomem *base = get_hwbase(dev);
  1071. u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
  1072. /* phy errata for E3016 phy */
  1073. if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
  1074. reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
  1075. reg &= ~PHY_MARVELL_E3016_INITMASK;
  1076. if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
  1077. printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
  1078. return PHY_ERROR;
  1079. }
  1080. }
  1081. if (np->phy_oui == PHY_OUI_REALTEK) {
  1082. if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1083. np->phy_rev == PHY_REV_REALTEK_8211B) {
  1084. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1085. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1086. return PHY_ERROR;
  1087. }
  1088. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
  1089. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1090. return PHY_ERROR;
  1091. }
  1092. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
  1093. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1094. return PHY_ERROR;
  1095. }
  1096. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
  1097. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1098. return PHY_ERROR;
  1099. }
  1100. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
  1101. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1102. return PHY_ERROR;
  1103. }
  1104. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
  1105. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1106. return PHY_ERROR;
  1107. }
  1108. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1109. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1110. return PHY_ERROR;
  1111. }
  1112. }
  1113. if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1114. np->phy_rev == PHY_REV_REALTEK_8211C) {
  1115. u32 powerstate = readl(base + NvRegPowerState2);
  1116. /* need to perform hw phy reset */
  1117. powerstate |= NVREG_POWERSTATE2_PHY_RESET;
  1118. writel(powerstate, base + NvRegPowerState2);
  1119. msleep(25);
  1120. powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
  1121. writel(powerstate, base + NvRegPowerState2);
  1122. msleep(25);
  1123. reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
  1124. reg |= PHY_REALTEK_INIT9;
  1125. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
  1126. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1127. return PHY_ERROR;
  1128. }
  1129. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
  1130. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1131. return PHY_ERROR;
  1132. }
  1133. reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
  1134. if (!(reg & PHY_REALTEK_INIT11)) {
  1135. reg |= PHY_REALTEK_INIT11;
  1136. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
  1137. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1138. return PHY_ERROR;
  1139. }
  1140. }
  1141. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1142. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1143. return PHY_ERROR;
  1144. }
  1145. }
  1146. if (np->phy_model == PHY_MODEL_REALTEK_8201) {
  1147. if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
  1148. phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
  1149. phy_reserved |= PHY_REALTEK_INIT7;
  1150. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
  1151. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1152. return PHY_ERROR;
  1153. }
  1154. }
  1155. }
  1156. }
  1157. /* set advertise register */
  1158. reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  1159. reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
  1160. if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
  1161. printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
  1162. return PHY_ERROR;
  1163. }
  1164. /* get phy interface type */
  1165. phyinterface = readl(base + NvRegPhyInterface);
  1166. /* see if gigabit phy */
  1167. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  1168. if (mii_status & PHY_GIGABIT) {
  1169. np->gigabit = PHY_GIGABIT;
  1170. mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  1171. mii_control_1000 &= ~ADVERTISE_1000HALF;
  1172. if (phyinterface & PHY_RGMII)
  1173. mii_control_1000 |= ADVERTISE_1000FULL;
  1174. else
  1175. mii_control_1000 &= ~ADVERTISE_1000FULL;
  1176. if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
  1177. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1178. return PHY_ERROR;
  1179. }
  1180. }
  1181. else
  1182. np->gigabit = 0;
  1183. mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1184. mii_control |= BMCR_ANENABLE;
  1185. if (np->phy_oui == PHY_OUI_REALTEK &&
  1186. np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1187. np->phy_rev == PHY_REV_REALTEK_8211C) {
  1188. /* start autoneg since we already performed hw reset above */
  1189. mii_control |= BMCR_ANRESTART;
  1190. if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
  1191. printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev));
  1192. return PHY_ERROR;
  1193. }
  1194. } else {
  1195. /* reset the phy
  1196. * (certain phys need bmcr to be setup with reset)
  1197. */
  1198. if (phy_reset(dev, mii_control)) {
  1199. printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
  1200. return PHY_ERROR;
  1201. }
  1202. }
  1203. /* phy vendor specific configuration */
  1204. if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
  1205. phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
  1206. phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
  1207. phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
  1208. if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
  1209. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1210. return PHY_ERROR;
  1211. }
  1212. phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
  1213. phy_reserved |= PHY_CICADA_INIT5;
  1214. if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
  1215. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1216. return PHY_ERROR;
  1217. }
  1218. }
  1219. if (np->phy_oui == PHY_OUI_CICADA) {
  1220. phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
  1221. phy_reserved |= PHY_CICADA_INIT6;
  1222. if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
  1223. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1224. return PHY_ERROR;
  1225. }
  1226. }
  1227. if (np->phy_oui == PHY_OUI_VITESSE) {
  1228. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
  1229. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1230. return PHY_ERROR;
  1231. }
  1232. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
  1233. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1234. return PHY_ERROR;
  1235. }
  1236. phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
  1237. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
  1238. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1239. return PHY_ERROR;
  1240. }
  1241. phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
  1242. phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
  1243. phy_reserved |= PHY_VITESSE_INIT3;
  1244. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
  1245. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1246. return PHY_ERROR;
  1247. }
  1248. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
  1249. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1250. return PHY_ERROR;
  1251. }
  1252. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
  1253. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1254. return PHY_ERROR;
  1255. }
  1256. phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
  1257. phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
  1258. phy_reserved |= PHY_VITESSE_INIT3;
  1259. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
  1260. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1261. return PHY_ERROR;
  1262. }
  1263. phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
  1264. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
  1265. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1266. return PHY_ERROR;
  1267. }
  1268. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
  1269. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1270. return PHY_ERROR;
  1271. }
  1272. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
  1273. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1274. return PHY_ERROR;
  1275. }
  1276. phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
  1277. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
  1278. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1279. return PHY_ERROR;
  1280. }
  1281. phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
  1282. phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
  1283. phy_reserved |= PHY_VITESSE_INIT8;
  1284. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
  1285. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1286. return PHY_ERROR;
  1287. }
  1288. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
  1289. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1290. return PHY_ERROR;
  1291. }
  1292. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
  1293. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1294. return PHY_ERROR;
  1295. }
  1296. }
  1297. if (np->phy_oui == PHY_OUI_REALTEK) {
  1298. if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1299. np->phy_rev == PHY_REV_REALTEK_8211B) {
  1300. /* reset could have cleared these out, set them back */
  1301. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1302. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1303. return PHY_ERROR;
  1304. }
  1305. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
  1306. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1307. return PHY_ERROR;
  1308. }
  1309. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
  1310. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1311. return PHY_ERROR;
  1312. }
  1313. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
  1314. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1315. return PHY_ERROR;
  1316. }
  1317. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
  1318. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1319. return PHY_ERROR;
  1320. }
  1321. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
  1322. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1323. return PHY_ERROR;
  1324. }
  1325. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1326. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1327. return PHY_ERROR;
  1328. }
  1329. }
  1330. if (np->phy_model == PHY_MODEL_REALTEK_8201) {
  1331. if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
  1332. phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
  1333. phy_reserved |= PHY_REALTEK_INIT7;
  1334. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
  1335. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1336. return PHY_ERROR;
  1337. }
  1338. }
  1339. if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
  1340. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
  1341. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1342. return PHY_ERROR;
  1343. }
  1344. phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
  1345. phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
  1346. phy_reserved |= PHY_REALTEK_INIT3;
  1347. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
  1348. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1349. return PHY_ERROR;
  1350. }
  1351. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1352. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1353. return PHY_ERROR;
  1354. }
  1355. }
  1356. }
  1357. }
  1358. /* some phys clear out pause advertisment on reset, set it back */
  1359. mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
  1360. /* restart auto negotiation, power down phy */
  1361. mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1362. mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
  1363. if (phy_power_down) {
  1364. mii_control |= BMCR_PDOWN;
  1365. }
  1366. if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
  1367. return PHY_ERROR;
  1368. }
  1369. return 0;
  1370. }
  1371. static void nv_start_rx(struct net_device *dev)
  1372. {
  1373. struct fe_priv *np = netdev_priv(dev);
  1374. u8 __iomem *base = get_hwbase(dev);
  1375. u32 rx_ctrl = readl(base + NvRegReceiverControl);
  1376. dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
  1377. /* Already running? Stop it. */
  1378. if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
  1379. rx_ctrl &= ~NVREG_RCVCTL_START;
  1380. writel(rx_ctrl, base + NvRegReceiverControl);
  1381. pci_push(base);
  1382. }
  1383. writel(np->linkspeed, base + NvRegLinkSpeed);
  1384. pci_push(base);
  1385. rx_ctrl |= NVREG_RCVCTL_START;
  1386. if (np->mac_in_use)
  1387. rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
  1388. writel(rx_ctrl, base + NvRegReceiverControl);
  1389. dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
  1390. dev->name, np->duplex, np->linkspeed);
  1391. pci_push(base);
  1392. }
  1393. static void nv_stop_rx(struct net_device *dev)
  1394. {
  1395. struct fe_priv *np = netdev_priv(dev);
  1396. u8 __iomem *base = get_hwbase(dev);
  1397. u32 rx_ctrl = readl(base + NvRegReceiverControl);
  1398. dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
  1399. if (!np->mac_in_use)
  1400. rx_ctrl &= ~NVREG_RCVCTL_START;
  1401. else
  1402. rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
  1403. writel(rx_ctrl, base + NvRegReceiverControl);
  1404. reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
  1405. NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
  1406. KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
  1407. udelay(NV_RXSTOP_DELAY2);
  1408. if (!np->mac_in_use)
  1409. writel(0, base + NvRegLinkSpeed);
  1410. }
  1411. static void nv_start_tx(struct net_device *dev)
  1412. {
  1413. struct fe_priv *np = netdev_priv(dev);
  1414. u8 __iomem *base = get_hwbase(dev);
  1415. u32 tx_ctrl = readl(base + NvRegTransmitterControl);
  1416. dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
  1417. tx_ctrl |= NVREG_XMITCTL_START;
  1418. if (np->mac_in_use)
  1419. tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
  1420. writel(tx_ctrl, base + NvRegTransmitterControl);
  1421. pci_push(base);
  1422. }
  1423. static void nv_stop_tx(struct net_device *dev)
  1424. {
  1425. struct fe_priv *np = netdev_priv(dev);
  1426. u8 __iomem *base = get_hwbase(dev);
  1427. u32 tx_ctrl = readl(base + NvRegTransmitterControl);
  1428. dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
  1429. if (!np->mac_in_use)
  1430. tx_ctrl &= ~NVREG_XMITCTL_START;
  1431. else
  1432. tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
  1433. writel(tx_ctrl, base + NvRegTransmitterControl);
  1434. reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
  1435. NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
  1436. KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
  1437. udelay(NV_TXSTOP_DELAY2);
  1438. if (!np->mac_in_use)
  1439. writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
  1440. base + NvRegTransmitPoll);
  1441. }
  1442. static void nv_start_rxtx(struct net_device *dev)
  1443. {
  1444. nv_start_rx(dev);
  1445. nv_start_tx(dev);
  1446. }
  1447. static void nv_stop_rxtx(struct net_device *dev)
  1448. {
  1449. nv_stop_rx(dev);
  1450. nv_stop_tx(dev);
  1451. }
  1452. static void nv_txrx_reset(struct net_device *dev)
  1453. {
  1454. struct fe_priv *np = netdev_priv(dev);
  1455. u8 __iomem *base = get_hwbase(dev);
  1456. dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
  1457. writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
  1458. pci_push(base);
  1459. udelay(NV_TXRX_RESET_DELAY);
  1460. writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1461. pci_push(base);
  1462. }
  1463. static void nv_mac_reset(struct net_device *dev)
  1464. {
  1465. struct fe_priv *np = netdev_priv(dev);
  1466. u8 __iomem *base = get_hwbase(dev);
  1467. u32 temp1, temp2, temp3;
  1468. dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
  1469. writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
  1470. pci_push(base);
  1471. /* save registers since they will be cleared on reset */
  1472. temp1 = readl(base + NvRegMacAddrA);
  1473. temp2 = readl(base + NvRegMacAddrB);
  1474. temp3 = readl(base + NvRegTransmitPoll);
  1475. writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
  1476. pci_push(base);
  1477. udelay(NV_MAC_RESET_DELAY);
  1478. writel(0, base + NvRegMacReset);
  1479. pci_push(base);
  1480. udelay(NV_MAC_RESET_DELAY);
  1481. /* restore saved registers */
  1482. writel(temp1, base + NvRegMacAddrA);
  1483. writel(temp2, base + NvRegMacAddrB);
  1484. writel(temp3, base + NvRegTransmitPoll);
  1485. writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1486. pci_push(base);
  1487. }
  1488. static void nv_get_hw_stats(struct net_device *dev)
  1489. {
  1490. struct fe_priv *np = netdev_priv(dev);
  1491. u8 __iomem *base = get_hwbase(dev);
  1492. np->estats.tx_bytes += readl(base + NvRegTxCnt);
  1493. np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
  1494. np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
  1495. np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
  1496. np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
  1497. np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
  1498. np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
  1499. np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
  1500. np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
  1501. np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
  1502. np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
  1503. np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
  1504. np->estats.rx_runt += readl(base + NvRegRxRunt);
  1505. np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
  1506. np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
  1507. np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
  1508. np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
  1509. np->estats.rx_length_error += readl(base + NvRegRxLenErr);
  1510. np->estats.rx_unicast += readl(base + NvRegRxUnicast);
  1511. np->estats.rx_multicast += readl(base + NvRegRxMulticast);
  1512. np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
  1513. np->estats.rx_packets =
  1514. np->estats.rx_unicast +
  1515. np->estats.rx_multicast +
  1516. np->estats.rx_broadcast;
  1517. np->estats.rx_errors_total =
  1518. np->estats.rx_crc_errors +
  1519. np->estats.rx_over_errors +
  1520. np->estats.rx_frame_error +
  1521. (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
  1522. np->estats.rx_late_collision +
  1523. np->estats.rx_runt +
  1524. np->estats.rx_frame_too_long;
  1525. np->estats.tx_errors_total =
  1526. np->estats.tx_late_collision +
  1527. np->estats.tx_fifo_errors +
  1528. np->estats.tx_carrier_errors +
  1529. np->estats.tx_excess_deferral +
  1530. np->estats.tx_retry_error;
  1531. if (np->driver_data & DEV_HAS_STATISTICS_V2) {
  1532. np->estats.tx_deferral += readl(base + NvRegTxDef);
  1533. np->estats.tx_packets += readl(base + NvRegTxFrame);
  1534. np->estats.rx_bytes += readl(base + NvRegRxCnt);
  1535. np->estats.tx_pause += readl(base + NvRegTxPause);
  1536. np->estats.rx_pause += readl(base + NvRegRxPause);
  1537. np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
  1538. }
  1539. if (np->driver_data & DEV_HAS_STATISTICS_V3) {
  1540. np->estats.tx_unicast += readl(base + NvRegTxUnicast);
  1541. np->estats.tx_multicast += readl(base + NvRegTxMulticast);
  1542. np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
  1543. }
  1544. }
  1545. /*
  1546. * nv_get_stats: dev->get_stats function
  1547. * Get latest stats value from the nic.
  1548. * Called with read_lock(&dev_base_lock) held for read -
  1549. * only synchronized against unregister_netdevice.
  1550. */
  1551. static struct net_device_stats *nv_get_stats(struct net_device *dev)
  1552. {
  1553. struct fe_priv *np = netdev_priv(dev);
  1554. /* If the nic supports hw counters then retrieve latest values */
  1555. if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
  1556. nv_get_hw_stats(dev);
  1557. /* copy to net_device stats */
  1558. dev->stats.tx_bytes = np->estats.tx_bytes;
  1559. dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
  1560. dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
  1561. dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
  1562. dev->stats.rx_over_errors = np->estats.rx_over_errors;
  1563. dev->stats.rx_errors = np->estats.rx_errors_total;
  1564. dev->stats.tx_errors = np->estats.tx_errors_total;
  1565. }
  1566. return &dev->stats;
  1567. }
  1568. /*
  1569. * nv_alloc_rx: fill rx ring entries.
  1570. * Return 1 if the allocations for the skbs failed and the
  1571. * rx engine is without Available descriptors
  1572. */
  1573. static int nv_alloc_rx(struct net_device *dev)
  1574. {
  1575. struct fe_priv *np = netdev_priv(dev);
  1576. struct ring_desc* less_rx;
  1577. less_rx = np->get_rx.orig;
  1578. if (less_rx-- == np->first_rx.orig)
  1579. less_rx = np->last_rx.orig;
  1580. while (np->put_rx.orig != less_rx) {
  1581. struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
  1582. if (skb) {
  1583. np->put_rx_ctx->skb = skb;
  1584. np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
  1585. skb->data,
  1586. skb_tailroom(skb),
  1587. PCI_DMA_FROMDEVICE);
  1588. np->put_rx_ctx->dma_len = skb_tailroom(skb);
  1589. np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
  1590. wmb();
  1591. np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
  1592. if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
  1593. np->put_rx.orig = np->first_rx.orig;
  1594. if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
  1595. np->put_rx_ctx = np->first_rx_ctx;
  1596. } else {
  1597. return 1;
  1598. }
  1599. }
  1600. return 0;
  1601. }
  1602. static int nv_alloc_rx_optimized(struct net_device *dev)
  1603. {
  1604. struct fe_priv *np = netdev_priv(dev);
  1605. struct ring_desc_ex* less_rx;
  1606. less_rx = np->get_rx.ex;
  1607. if (less_rx-- == np->first_rx.ex)
  1608. less_rx = np->last_rx.ex;
  1609. while (np->put_rx.ex != less_rx) {
  1610. struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
  1611. if (skb) {
  1612. np->put_rx_ctx->skb = skb;
  1613. np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
  1614. skb->data,
  1615. skb_tailroom(skb),
  1616. PCI_DMA_FROMDEVICE);
  1617. np->put_rx_ctx->dma_len = skb_tailroom(skb);
  1618. np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
  1619. np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
  1620. wmb();
  1621. np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
  1622. if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
  1623. np->put_rx.ex = np->first_rx.ex;
  1624. if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
  1625. np->put_rx_ctx = np->first_rx_ctx;
  1626. } else {
  1627. return 1;
  1628. }
  1629. }
  1630. return 0;
  1631. }
  1632. /* If rx bufs are exhausted called after 50ms to attempt to refresh */
  1633. #ifdef CONFIG_FORCEDETH_NAPI
  1634. static void nv_do_rx_refill(unsigned long data)
  1635. {
  1636. struct net_device *dev = (struct net_device *) data;
  1637. struct fe_priv *np = netdev_priv(dev);
  1638. /* Just reschedule NAPI rx processing */
  1639. napi_schedule(&np->napi);
  1640. }
  1641. #else
  1642. static void nv_do_rx_refill(unsigned long data)
  1643. {
  1644. struct net_device *dev = (struct net_device *) data;
  1645. struct fe_priv *np = netdev_priv(dev);
  1646. int retcode;
  1647. if (!using_multi_irqs(dev)) {
  1648. if (np->msi_flags & NV_MSI_X_ENABLED)
  1649. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  1650. else
  1651. disable_irq(np->pci_dev->irq);
  1652. } else {
  1653. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  1654. }
  1655. if (!nv_optimized(np))
  1656. retcode = nv_alloc_rx(dev);
  1657. else
  1658. retcode = nv_alloc_rx_optimized(dev);
  1659. if (retcode) {
  1660. spin_lock_irq(&np->lock);
  1661. if (!np->in_shutdown)
  1662. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  1663. spin_unlock_irq(&np->lock);
  1664. }
  1665. if (!using_multi_irqs(dev)) {
  1666. if (np->msi_flags & NV_MSI_X_ENABLED)
  1667. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  1668. else
  1669. enable_irq(np->pci_dev->irq);
  1670. } else {
  1671. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  1672. }
  1673. }
  1674. #endif
  1675. static void nv_init_rx(struct net_device *dev)
  1676. {
  1677. struct fe_priv *np = netdev_priv(dev);
  1678. int i;
  1679. np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
  1680. if (!nv_optimized(np))
  1681. np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
  1682. else
  1683. np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
  1684. np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
  1685. np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
  1686. for (i = 0; i < np->rx_ring_size; i++) {
  1687. if (!nv_optimized(np)) {
  1688. np->rx_ring.orig[i].flaglen = 0;
  1689. np->rx_ring.orig[i].buf = 0;
  1690. } else {
  1691. np->rx_ring.ex[i].flaglen = 0;
  1692. np->rx_ring.ex[i].txvlan = 0;
  1693. np->rx_ring.ex[i].bufhigh = 0;
  1694. np->rx_ring.ex[i].buflow = 0;
  1695. }
  1696. np->rx_skb[i].skb = NULL;
  1697. np->rx_skb[i].dma = 0;
  1698. }
  1699. }
  1700. static void nv_init_tx(struct net_device *dev)
  1701. {
  1702. struct fe_priv *np = netdev_priv(dev);
  1703. int i;
  1704. np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
  1705. if (!nv_optimized(np))
  1706. np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
  1707. else
  1708. np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
  1709. np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
  1710. np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
  1711. np->tx_pkts_in_progress = 0;
  1712. np->tx_change_owner = NULL;
  1713. np->tx_end_flip = NULL;
  1714. np->tx_stop = 0;
  1715. for (i = 0; i < np->tx_ring_size; i++) {
  1716. if (!nv_optimized(np)) {
  1717. np->tx_ring.orig[i].flaglen = 0;
  1718. np->tx_ring.orig[i].buf = 0;
  1719. } else {
  1720. np->tx_ring.ex[i].flaglen = 0;
  1721. np->tx_ring.ex[i].txvlan = 0;
  1722. np->tx_ring.ex[i].bufhigh = 0;
  1723. np->tx_ring.ex[i].buflow = 0;
  1724. }
  1725. np->tx_skb[i].skb = NULL;
  1726. np->tx_skb[i].dma = 0;
  1727. np->tx_skb[i].dma_len = 0;
  1728. np->tx_skb[i].first_tx_desc = NULL;
  1729. np->tx_skb[i].next_tx_ctx = NULL;
  1730. }
  1731. }
  1732. static int nv_init_ring(struct net_device *dev)
  1733. {
  1734. struct fe_priv *np = netdev_priv(dev);
  1735. nv_init_tx(dev);
  1736. nv_init_rx(dev);
  1737. if (!nv_optimized(np))
  1738. return nv_alloc_rx(dev);
  1739. else
  1740. return nv_alloc_rx_optimized(dev);
  1741. }
  1742. static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
  1743. {
  1744. struct fe_priv *np = netdev_priv(dev);
  1745. if (tx_skb->dma) {
  1746. pci_unmap_page(np->pci_dev, tx_skb->dma,
  1747. tx_skb->dma_len,
  1748. PCI_DMA_TODEVICE);
  1749. tx_skb->dma = 0;
  1750. }
  1751. if (tx_skb->skb) {
  1752. dev_kfree_skb_any(tx_skb->skb);
  1753. tx_skb->skb = NULL;
  1754. return 1;
  1755. } else {
  1756. return 0;
  1757. }
  1758. }
  1759. static void nv_drain_tx(struct net_device *dev)
  1760. {
  1761. struct fe_priv *np = netdev_priv(dev);
  1762. unsigned int i;
  1763. for (i = 0; i < np->tx_ring_size; i++) {
  1764. if (!nv_optimized(np)) {
  1765. np->tx_ring.orig[i].flaglen = 0;
  1766. np->tx_ring.orig[i].buf = 0;
  1767. } else {
  1768. np->tx_ring.ex[i].flaglen = 0;
  1769. np->tx_ring.ex[i].txvlan = 0;
  1770. np->tx_ring.ex[i].bufhigh = 0;
  1771. np->tx_ring.ex[i].buflow = 0;
  1772. }
  1773. if (nv_release_txskb(dev, &np->tx_skb[i]))
  1774. dev->stats.tx_dropped++;
  1775. np->tx_skb[i].dma = 0;
  1776. np->tx_skb[i].dma_len = 0;
  1777. np->tx_skb[i].first_tx_desc = NULL;
  1778. np->tx_skb[i].next_tx_ctx = NULL;
  1779. }
  1780. np->tx_pkts_in_progress = 0;
  1781. np->tx_change_owner = NULL;
  1782. np->tx_end_flip = NULL;
  1783. }
  1784. static void nv_drain_rx(struct net_device *dev)
  1785. {
  1786. struct fe_priv *np = netdev_priv(dev);
  1787. int i;
  1788. for (i = 0; i < np->rx_ring_size; i++) {
  1789. if (!nv_optimized(np)) {
  1790. np->rx_ring.orig[i].flaglen = 0;
  1791. np->rx_ring.orig[i].buf = 0;
  1792. } else {
  1793. np->rx_ring.ex[i].flaglen = 0;
  1794. np->rx_ring.ex[i].txvlan = 0;
  1795. np->rx_ring.ex[i].bufhigh = 0;
  1796. np->rx_ring.ex[i].buflow = 0;
  1797. }
  1798. wmb();
  1799. if (np->rx_skb[i].skb) {
  1800. pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
  1801. (skb_end_pointer(np->rx_skb[i].skb) -
  1802. np->rx_skb[i].skb->data),
  1803. PCI_DMA_FROMDEVICE);
  1804. dev_kfree_skb(np->rx_skb[i].skb);
  1805. np->rx_skb[i].skb = NULL;
  1806. }
  1807. }
  1808. }
  1809. static void nv_drain_rxtx(struct net_device *dev)
  1810. {
  1811. nv_drain_tx(dev);
  1812. nv_drain_rx(dev);
  1813. }
  1814. static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
  1815. {
  1816. return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
  1817. }
  1818. static void nv_legacybackoff_reseed(struct net_device *dev)
  1819. {
  1820. u8 __iomem *base = get_hwbase(dev);
  1821. u32 reg;
  1822. u32 low;
  1823. int tx_status = 0;
  1824. reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
  1825. get_random_bytes(&low, sizeof(low));
  1826. reg |= low & NVREG_SLOTTIME_MASK;
  1827. /* Need to stop tx before change takes effect.
  1828. * Caller has already gained np->lock.
  1829. */
  1830. tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
  1831. if (tx_status)
  1832. nv_stop_tx(dev);
  1833. nv_stop_rx(dev);
  1834. writel(reg, base + NvRegSlotTime);
  1835. if (tx_status)
  1836. nv_start_tx(dev);
  1837. nv_start_rx(dev);
  1838. }
  1839. /* Gear Backoff Seeds */
  1840. #define BACKOFF_SEEDSET_ROWS 8
  1841. #define BACKOFF_SEEDSET_LFSRS 15
  1842. /* Known Good seed sets */
  1843. static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
  1844. {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
  1845. {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
  1846. {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
  1847. {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
  1848. {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
  1849. {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
  1850. {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
  1851. {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
  1852. static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
  1853. {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
  1854. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
  1855. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
  1856. {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
  1857. {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
  1858. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
  1859. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
  1860. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
  1861. static void nv_gear_backoff_reseed(struct net_device *dev)
  1862. {
  1863. u8 __iomem *base = get_hwbase(dev);
  1864. u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
  1865. u32 temp, seedset, combinedSeed;
  1866. int i;
  1867. /* Setup seed for free running LFSR */
  1868. /* We are going to read the time stamp counter 3 times
  1869. and swizzle bits around to increase randomness */
  1870. get_random_bytes(&miniseed1, sizeof(miniseed1));
  1871. miniseed1 &= 0x0fff;
  1872. if (miniseed1 == 0)
  1873. miniseed1 = 0xabc;
  1874. get_random_bytes(&miniseed2, sizeof(miniseed2));
  1875. miniseed2 &= 0x0fff;
  1876. if (miniseed2 == 0)
  1877. miniseed2 = 0xabc;
  1878. miniseed2_reversed =
  1879. ((miniseed2 & 0xF00) >> 8) |
  1880. (miniseed2 & 0x0F0) |
  1881. ((miniseed2 & 0x00F) << 8);
  1882. get_random_bytes(&miniseed3, sizeof(miniseed3));
  1883. miniseed3 &= 0x0fff;
  1884. if (miniseed3 == 0)
  1885. miniseed3 = 0xabc;
  1886. miniseed3_reversed =
  1887. ((miniseed3 & 0xF00) >> 8) |
  1888. (miniseed3 & 0x0F0) |
  1889. ((miniseed3 & 0x00F) << 8);
  1890. combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
  1891. (miniseed2 ^ miniseed3_reversed);
  1892. /* Seeds can not be zero */
  1893. if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
  1894. combinedSeed |= 0x08;
  1895. if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
  1896. combinedSeed |= 0x8000;
  1897. /* No need to disable tx here */
  1898. temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
  1899. temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
  1900. temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
  1901. writel(temp,base + NvRegBackOffControl);
  1902. /* Setup seeds for all gear LFSRs. */
  1903. get_random_bytes(&seedset, sizeof(seedset));
  1904. seedset = seedset % BACKOFF_SEEDSET_ROWS;
  1905. for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++)
  1906. {
  1907. temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
  1908. temp |= main_seedset[seedset][i-1] & 0x3ff;
  1909. temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
  1910. writel(temp, base + NvRegBackOffControl);
  1911. }
  1912. }
  1913. /*
  1914. * nv_start_xmit: dev->hard_start_xmit function
  1915. * Called with netif_tx_lock held.
  1916. */
  1917. static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1918. {
  1919. struct fe_priv *np = netdev_priv(dev);
  1920. u32 tx_flags = 0;
  1921. u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
  1922. unsigned int fragments = skb_shinfo(skb)->nr_frags;
  1923. unsigned int i;
  1924. u32 offset = 0;
  1925. u32 bcnt;
  1926. u32 size = skb->len-skb->data_len;
  1927. u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1928. u32 empty_slots;
  1929. struct ring_desc* put_tx;
  1930. struct ring_desc* start_tx;
  1931. struct ring_desc* prev_tx;
  1932. struct nv_skb_map* prev_tx_ctx;
  1933. unsigned long flags;
  1934. /* add fragments to entries count */
  1935. for (i = 0; i < fragments; i++) {
  1936. entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
  1937. ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1938. }
  1939. spin_lock_irqsave(&np->lock, flags);
  1940. empty_slots = nv_get_empty_tx_slots(np);
  1941. if (unlikely(empty_slots <= entries)) {
  1942. netif_stop_queue(dev);
  1943. np->tx_stop = 1;
  1944. spin_unlock_irqrestore(&np->lock, flags);
  1945. return NETDEV_TX_BUSY;
  1946. }
  1947. spin_unlock_irqrestore(&np->lock, flags);
  1948. start_tx = put_tx = np->put_tx.orig;
  1949. /* setup the header buffer */
  1950. do {
  1951. prev_tx = put_tx;
  1952. prev_tx_ctx = np->put_tx_ctx;
  1953. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1954. np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
  1955. PCI_DMA_TODEVICE);
  1956. np->put_tx_ctx->dma_len = bcnt;
  1957. put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
  1958. put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  1959. tx_flags = np->tx_flags;
  1960. offset += bcnt;
  1961. size -= bcnt;
  1962. if (unlikely(put_tx++ == np->last_tx.orig))
  1963. put_tx = np->first_tx.orig;
  1964. if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  1965. np->put_tx_ctx = np->first_tx_ctx;
  1966. } while (size);
  1967. /* setup the fragments */
  1968. for (i = 0; i < fragments; i++) {
  1969. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1970. u32 size = frag->size;
  1971. offset = 0;
  1972. do {
  1973. prev_tx = put_tx;
  1974. prev_tx_ctx = np->put_tx_ctx;
  1975. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1976. np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
  1977. PCI_DMA_TODEVICE);
  1978. np->put_tx_ctx->dma_len = bcnt;
  1979. put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
  1980. put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  1981. offset += bcnt;
  1982. size -= bcnt;
  1983. if (unlikely(put_tx++ == np->last_tx.orig))
  1984. put_tx = np->first_tx.orig;
  1985. if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  1986. np->put_tx_ctx = np->first_tx_ctx;
  1987. } while (size);
  1988. }
  1989. /* set last fragment flag */
  1990. prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
  1991. /* save skb in this slot's context area */
  1992. prev_tx_ctx->skb = skb;
  1993. if (skb_is_gso(skb))
  1994. tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  1995. else
  1996. tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
  1997. NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
  1998. spin_lock_irqsave(&np->lock, flags);
  1999. /* set tx flags */
  2000. start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
  2001. np->put_tx.orig = put_tx;
  2002. spin_unlock_irqrestore(&np->lock, flags);
  2003. dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
  2004. dev->name, entries, tx_flags_extra);
  2005. {
  2006. int j;
  2007. for (j=0; j<64; j++) {
  2008. if ((j%16) == 0)
  2009. dprintk("\n%03x:", j);
  2010. dprintk(" %02x", ((unsigned char*)skb->data)[j]);
  2011. }
  2012. dprintk("\n");
  2013. }
  2014. dev->trans_start = jiffies;
  2015. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  2016. return NETDEV_TX_OK;
  2017. }
  2018. static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
  2019. {
  2020. struct fe_priv *np = netdev_priv(dev);
  2021. u32 tx_flags = 0;
  2022. u32 tx_flags_extra;
  2023. unsigned int fragments = skb_shinfo(skb)->nr_frags;
  2024. unsigned int i;
  2025. u32 offset = 0;
  2026. u32 bcnt;
  2027. u32 size = skb->len-skb->data_len;
  2028. u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  2029. u32 empty_slots;
  2030. struct ring_desc_ex* put_tx;
  2031. struct ring_desc_ex* start_tx;
  2032. struct ring_desc_ex* prev_tx;
  2033. struct nv_skb_map* prev_tx_ctx;
  2034. struct nv_skb_map* start_tx_ctx;
  2035. unsigned long flags;
  2036. /* add fragments to entries count */
  2037. for (i = 0; i < fragments; i++) {
  2038. entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
  2039. ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  2040. }
  2041. spin_lock_irqsave(&np->lock, flags);
  2042. empty_slots = nv_get_empty_tx_slots(np);
  2043. if (unlikely(empty_slots <= entries)) {
  2044. netif_stop_queue(dev);
  2045. np->tx_stop = 1;
  2046. spin_unlock_irqrestore(&np->lock, flags);
  2047. return NETDEV_TX_BUSY;
  2048. }
  2049. spin_unlock_irqrestore(&np->lock, flags);
  2050. start_tx = put_tx = np->put_tx.ex;
  2051. start_tx_ctx = np->put_tx_ctx;
  2052. /* setup the header buffer */
  2053. do {
  2054. prev_tx = put_tx;
  2055. prev_tx_ctx = np->put_tx_ctx;
  2056. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  2057. np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
  2058. PCI_DMA_TODEVICE);
  2059. np->put_tx_ctx->dma_len = bcnt;
  2060. put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
  2061. put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
  2062. put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  2063. tx_flags = NV_TX2_VALID;
  2064. offset += bcnt;
  2065. size -= bcnt;
  2066. if (unlikely(put_tx++ == np->last_tx.ex))
  2067. put_tx = np->first_tx.ex;
  2068. if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  2069. np->put_tx_ctx = np->first_tx_ctx;
  2070. } while (size);
  2071. /* setup the fragments */
  2072. for (i = 0; i < fragments; i++) {
  2073. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  2074. u32 size = frag->size;
  2075. offset = 0;
  2076. do {
  2077. prev_tx = put_tx;
  2078. prev_tx_ctx = np->put_tx_ctx;
  2079. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  2080. np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
  2081. PCI_DMA_TODEVICE);
  2082. np->put_tx_ctx->dma_len = bcnt;
  2083. put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
  2084. put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
  2085. put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  2086. offset += bcnt;
  2087. size -= bcnt;
  2088. if (unlikely(put_tx++ == np->last_tx.ex))
  2089. put_tx = np->first_tx.ex;
  2090. if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  2091. np->put_tx_ctx = np->first_tx_ctx;
  2092. } while (size);
  2093. }
  2094. /* set last fragment flag */
  2095. prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
  2096. /* save skb in this slot's context area */
  2097. prev_tx_ctx->skb = skb;
  2098. if (skb_is_gso(skb))
  2099. tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  2100. else
  2101. tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
  2102. NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
  2103. /* vlan tag */
  2104. if (likely(!np->vlangrp)) {
  2105. start_tx->txvlan = 0;
  2106. } else {
  2107. if (vlan_tx_tag_present(skb))
  2108. start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
  2109. else
  2110. start_tx->txvlan = 0;
  2111. }
  2112. spin_lock_irqsave(&np->lock, flags);
  2113. if (np->tx_limit) {
  2114. /* Limit the number of outstanding tx. Setup all fragments, but
  2115. * do not set the VALID bit on the first descriptor. Save a pointer
  2116. * to that descriptor and also for next skb_map element.
  2117. */
  2118. if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
  2119. if (!np->tx_change_owner)
  2120. np->tx_change_owner = start_tx_ctx;
  2121. /* remove VALID bit */
  2122. tx_flags &= ~NV_TX2_VALID;
  2123. start_tx_ctx->first_tx_desc = start_tx;
  2124. start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
  2125. np->tx_end_flip = np->put_tx_ctx;
  2126. } else {
  2127. np->tx_pkts_in_progress++;
  2128. }
  2129. }
  2130. /* set tx flags */
  2131. start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
  2132. np->put_tx.ex = put_tx;
  2133. spin_unlock_irqrestore(&np->lock, flags);
  2134. dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
  2135. dev->name, entries, tx_flags_extra);
  2136. {
  2137. int j;
  2138. for (j=0; j<64; j++) {
  2139. if ((j%16) == 0)
  2140. dprintk("\n%03x:", j);
  2141. dprintk(" %02x", ((unsigned char*)skb->data)[j]);
  2142. }
  2143. dprintk("\n");
  2144. }
  2145. dev->trans_start = jiffies;
  2146. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  2147. return NETDEV_TX_OK;
  2148. }
  2149. static inline void nv_tx_flip_ownership(struct net_device *dev)
  2150. {
  2151. struct fe_priv *np = netdev_priv(dev);
  2152. np->tx_pkts_in_progress--;
  2153. if (np->tx_change_owner) {
  2154. np->tx_change_owner->first_tx_desc->flaglen |=
  2155. cpu_to_le32(NV_TX2_VALID);
  2156. np->tx_pkts_in_progress++;
  2157. np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
  2158. if (np->tx_change_owner == np->tx_end_flip)
  2159. np->tx_change_owner = NULL;
  2160. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  2161. }
  2162. }
  2163. /*
  2164. * nv_tx_done: check for completed packets, release the skbs.
  2165. *
  2166. * Caller must own np->lock.
  2167. */
  2168. static int nv_tx_done(struct net_device *dev, int limit)
  2169. {
  2170. struct fe_priv *np = netdev_priv(dev);
  2171. u32 flags;
  2172. int tx_work = 0;
  2173. struct ring_desc* orig_get_tx = np->get_tx.orig;
  2174. while ((np->get_tx.orig != np->put_tx.orig) &&
  2175. !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
  2176. (tx_work < limit)) {
  2177. dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
  2178. dev->name, flags);
  2179. pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
  2180. np->get_tx_ctx->dma_len,
  2181. PCI_DMA_TODEVICE);
  2182. np->get_tx_ctx->dma = 0;
  2183. if (np->desc_ver == DESC_VER_1) {
  2184. if (flags & NV_TX_LASTPACKET) {
  2185. if (flags & NV_TX_ERROR) {
  2186. if (flags & NV_TX_UNDERFLOW)
  2187. dev->stats.tx_fifo_errors++;
  2188. if (flags & NV_TX_CARRIERLOST)
  2189. dev->stats.tx_carrier_errors++;
  2190. if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
  2191. nv_legacybackoff_reseed(dev);
  2192. dev->stats.tx_errors++;
  2193. } else {
  2194. dev->stats.tx_packets++;
  2195. dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
  2196. }
  2197. dev_kfree_skb_any(np->get_tx_ctx->skb);
  2198. np->get_tx_ctx->skb = NULL;
  2199. tx_work++;
  2200. }
  2201. } else {
  2202. if (flags & NV_TX2_LASTPACKET) {
  2203. if (flags & NV_TX2_ERROR) {
  2204. if (flags & NV_TX2_UNDERFLOW)
  2205. dev->stats.tx_fifo_errors++;
  2206. if (flags & NV_TX2_CARRIERLOST)
  2207. dev->stats.tx_carrier_errors++;
  2208. if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
  2209. nv_legacybackoff_reseed(dev);
  2210. dev->stats.tx_errors++;
  2211. } else {
  2212. dev->stats.tx_packets++;
  2213. dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
  2214. }
  2215. dev_kfree_skb_any(np->get_tx_ctx->skb);
  2216. np->get_tx_ctx->skb = NULL;
  2217. tx_work++;
  2218. }
  2219. }
  2220. if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
  2221. np->get_tx.orig = np->first_tx.orig;
  2222. if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
  2223. np->get_tx_ctx = np->first_tx_ctx;
  2224. }
  2225. if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
  2226. np->tx_stop = 0;
  2227. netif_wake_queue(dev);
  2228. }
  2229. return tx_work;
  2230. }
  2231. static int nv_tx_done_optimized(struct net_device *dev, int limit)
  2232. {
  2233. struct fe_priv *np = netdev_priv(dev);
  2234. u32 flags;
  2235. int tx_work = 0;
  2236. struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
  2237. while ((np->get_tx.ex != np->put_tx.ex) &&
  2238. !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
  2239. (tx_work < limit)) {
  2240. dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
  2241. dev->name, flags);
  2242. pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
  2243. np->get_tx_ctx->dma_len,
  2244. PCI_DMA_TODEVICE);
  2245. np->get_tx_ctx->dma = 0;
  2246. if (flags & NV_TX2_LASTPACKET) {
  2247. if (!(flags & NV_TX2_ERROR))
  2248. dev->stats.tx_packets++;
  2249. else {
  2250. if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
  2251. if (np->driver_data & DEV_HAS_GEAR_MODE)
  2252. nv_gear_backoff_reseed(dev);
  2253. else
  2254. nv_legacybackoff_reseed(dev);
  2255. }
  2256. }
  2257. dev_kfree_skb_any(np->get_tx_ctx->skb);
  2258. np->get_tx_ctx->skb = NULL;
  2259. tx_work++;
  2260. if (np->tx_limit) {
  2261. nv_tx_flip_ownership(dev);
  2262. }
  2263. }
  2264. if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
  2265. np->get_tx.ex = np->first_tx.ex;
  2266. if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
  2267. np->get_tx_ctx = np->first_tx_ctx;
  2268. }
  2269. if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
  2270. np->tx_stop = 0;
  2271. netif_wake_queue(dev);
  2272. }
  2273. return tx_work;
  2274. }
  2275. /*
  2276. * nv_tx_timeout: dev->tx_timeout function
  2277. * Called with netif_tx_lock held.
  2278. */
  2279. static void nv_tx_timeout(struct net_device *dev)
  2280. {
  2281. struct fe_priv *np = netdev_priv(dev);
  2282. u8 __iomem *base = get_hwbase(dev);
  2283. u32 status;
  2284. union ring_type put_tx;
  2285. int saved_tx_limit;
  2286. if (np->msi_flags & NV_MSI_X_ENABLED)
  2287. status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
  2288. else
  2289. status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
  2290. printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
  2291. {
  2292. int i;
  2293. printk(KERN_INFO "%s: Ring at %lx\n",
  2294. dev->name, (unsigned long)np->ring_addr);
  2295. printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
  2296. for (i=0;i<=np->register_size;i+= 32) {
  2297. printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
  2298. i,
  2299. readl(base + i + 0), readl(base + i + 4),
  2300. readl(base + i + 8), readl(base + i + 12),
  2301. readl(base + i + 16), readl(base + i + 20),
  2302. readl(base + i + 24), readl(base + i + 28));
  2303. }
  2304. printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
  2305. for (i=0;i<np->tx_ring_size;i+= 4) {
  2306. if (!nv_optimized(np)) {
  2307. printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
  2308. i,
  2309. le32_to_cpu(np->tx_ring.orig[i].buf),
  2310. le32_to_cpu(np->tx_ring.orig[i].flaglen),
  2311. le32_to_cpu(np->tx_ring.orig[i+1].buf),
  2312. le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
  2313. le32_to_cpu(np->tx_ring.orig[i+2].buf),
  2314. le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
  2315. le32_to_cpu(np->tx_ring.orig[i+3].buf),
  2316. le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
  2317. } else {
  2318. printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
  2319. i,
  2320. le32_to_cpu(np->tx_ring.ex[i].bufhigh),
  2321. le32_to_cpu(np->tx_ring.ex[i].buflow),
  2322. le32_to_cpu(np->tx_ring.ex[i].flaglen),
  2323. le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
  2324. le32_to_cpu(np->tx_ring.ex[i+1].buflow),
  2325. le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
  2326. le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
  2327. le32_to_cpu(np->tx_ring.ex[i+2].buflow),
  2328. le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
  2329. le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
  2330. le32_to_cpu(np->tx_ring.ex[i+3].buflow),
  2331. le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
  2332. }
  2333. }
  2334. }
  2335. spin_lock_irq(&np->lock);
  2336. /* 1) stop tx engine */
  2337. nv_stop_tx(dev);
  2338. /* 2) complete any outstanding tx and do not give HW any limited tx pkts */
  2339. saved_tx_limit = np->tx_limit;
  2340. np->tx_limit = 0; /* prevent giving HW any limited pkts */
  2341. np->tx_stop = 0; /* prevent waking tx queue */
  2342. if (!nv_optimized(np))
  2343. nv_tx_done(dev, np->tx_ring_size);
  2344. else
  2345. nv_tx_done_optimized(dev, np->tx_ring_size);
  2346. /* save current HW postion */
  2347. if (np->tx_change_owner)
  2348. put_tx.ex = np->tx_change_owner->first_tx_desc;
  2349. else
  2350. put_tx = np->put_tx;
  2351. /* 3) clear all tx state */
  2352. nv_drain_tx(dev);
  2353. nv_init_tx(dev);
  2354. /* 4) restore state to current HW position */
  2355. np->get_tx = np->put_tx = put_tx;
  2356. np->tx_limit = saved_tx_limit;
  2357. /* 5) restart tx engine */
  2358. nv_start_tx(dev);
  2359. netif_wake_queue(dev);
  2360. spin_unlock_irq(&np->lock);
  2361. }
  2362. /*
  2363. * Called when the nic notices a mismatch between the actual data len on the
  2364. * wire and the len indicated in the 802 header
  2365. */
  2366. static int nv_getlen(struct net_device *dev, void *packet, int datalen)
  2367. {
  2368. int hdrlen; /* length of the 802 header */
  2369. int protolen; /* length as stored in the proto field */
  2370. /* 1) calculate len according to header */
  2371. if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
  2372. protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
  2373. hdrlen = VLAN_HLEN;
  2374. } else {
  2375. protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
  2376. hdrlen = ETH_HLEN;
  2377. }
  2378. dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
  2379. dev->name, datalen, protolen, hdrlen);
  2380. if (protolen > ETH_DATA_LEN)
  2381. return datalen; /* Value in proto field not a len, no checks possible */
  2382. protolen += hdrlen;
  2383. /* consistency checks: */
  2384. if (datalen > ETH_ZLEN) {
  2385. if (datalen >= protolen) {
  2386. /* more data on wire than in 802 header, trim of
  2387. * additional data.
  2388. */
  2389. dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
  2390. dev->name, protolen);
  2391. return protolen;
  2392. } else {
  2393. /* less data on wire than mentioned in header.
  2394. * Discard the packet.
  2395. */
  2396. dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
  2397. dev->name);
  2398. return -1;
  2399. }
  2400. } else {
  2401. /* short packet. Accept only if 802 values are also short */
  2402. if (protolen > ETH_ZLEN) {
  2403. dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
  2404. dev->name);
  2405. return -1;
  2406. }
  2407. dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
  2408. dev->name, datalen);
  2409. return datalen;
  2410. }
  2411. }
  2412. static int nv_rx_process(struct net_device *dev, int limit)
  2413. {
  2414. struct fe_priv *np = netdev_priv(dev);
  2415. u32 flags;
  2416. int rx_work = 0;
  2417. struct sk_buff *skb;
  2418. int len;
  2419. while((np->get_rx.orig != np->put_rx.orig) &&
  2420. !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
  2421. (rx_work < limit)) {
  2422. dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
  2423. dev->name, flags);
  2424. /*
  2425. * the packet is for us - immediately tear down the pci mapping.
  2426. * TODO: check if a prefetch of the first cacheline improves
  2427. * the performance.
  2428. */
  2429. pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
  2430. np->get_rx_ctx->dma_len,
  2431. PCI_DMA_FROMDEVICE);
  2432. skb = np->get_rx_ctx->skb;
  2433. np->get_rx_ctx->skb = NULL;
  2434. {
  2435. int j;
  2436. dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
  2437. for (j=0; j<64; j++) {
  2438. if ((j%16) == 0)
  2439. dprintk("\n%03x:", j);
  2440. dprintk(" %02x", ((unsigned char*)skb->data)[j]);
  2441. }
  2442. dprintk("\n");
  2443. }
  2444. /* look at what we actually got: */
  2445. if (np->desc_ver == DESC_VER_1) {
  2446. if (likely(flags & NV_RX_DESCRIPTORVALID)) {
  2447. len = flags & LEN_MASK_V1;
  2448. if (unlikely(flags & NV_RX_ERROR)) {
  2449. if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
  2450. len = nv_getlen(dev, skb->data, len);
  2451. if (len < 0) {
  2452. dev->stats.rx_errors++;
  2453. dev_kfree_skb(skb);
  2454. goto next_pkt;
  2455. }
  2456. }
  2457. /* framing errors are soft errors */
  2458. else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
  2459. if (flags & NV_RX_SUBSTRACT1) {
  2460. len--;
  2461. }
  2462. }
  2463. /* the rest are hard errors */
  2464. else {
  2465. if (flags & NV_RX_MISSEDFRAME)
  2466. dev->stats.rx_missed_errors++;
  2467. if (flags & NV_RX_CRCERR)
  2468. dev->stats.rx_crc_errors++;
  2469. if (flags & NV_RX_OVERFLOW)
  2470. dev->stats.rx_over_errors++;
  2471. dev->stats.rx_errors++;
  2472. dev_kfree_skb(skb);
  2473. goto next_pkt;
  2474. }
  2475. }
  2476. } else {
  2477. dev_kfree_skb(skb);
  2478. goto next_pkt;
  2479. }
  2480. } else {
  2481. if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
  2482. len = flags & LEN_MASK_V2;
  2483. if (unlikely(flags & NV_RX2_ERROR)) {
  2484. if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
  2485. len = nv_getlen(dev, skb->data, len);
  2486. if (len < 0) {
  2487. dev->stats.rx_errors++;
  2488. dev_kfree_skb(skb);
  2489. goto next_pkt;
  2490. }
  2491. }
  2492. /* framing errors are soft errors */
  2493. else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
  2494. if (flags & NV_RX2_SUBSTRACT1) {
  2495. len--;
  2496. }
  2497. }
  2498. /* the rest are hard errors */
  2499. else {
  2500. if (flags & NV_RX2_CRCERR)
  2501. dev->stats.rx_crc_errors++;
  2502. if (flags & NV_RX2_OVERFLOW)
  2503. dev->stats.rx_over_errors++;
  2504. dev->stats.rx_errors++;
  2505. dev_kfree_skb(skb);
  2506. goto next_pkt;
  2507. }
  2508. }
  2509. if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
  2510. ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
  2511. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2512. } else {
  2513. dev_kfree_skb(skb);
  2514. goto next_pkt;
  2515. }
  2516. }
  2517. /* got a valid packet - forward it to the network core */
  2518. skb_put(skb, len);
  2519. skb->protocol = eth_type_trans(skb, dev);
  2520. dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
  2521. dev->name, len, skb->protocol);
  2522. #ifdef CONFIG_FORCEDETH_NAPI
  2523. netif_receive_skb(skb);
  2524. #else
  2525. netif_rx(skb);
  2526. #endif
  2527. dev->stats.rx_packets++;
  2528. dev->stats.rx_bytes += len;
  2529. next_pkt:
  2530. if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
  2531. np->get_rx.orig = np->first_rx.orig;
  2532. if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
  2533. np->get_rx_ctx = np->first_rx_ctx;
  2534. rx_work++;
  2535. }
  2536. return rx_work;
  2537. }
  2538. static int nv_rx_process_optimized(struct net_device *dev, int limit)
  2539. {
  2540. struct fe_priv *np = netdev_priv(dev);
  2541. u32 flags;
  2542. u32 vlanflags = 0;
  2543. int rx_work = 0;
  2544. struct sk_buff *skb;
  2545. int len;
  2546. while((np->get_rx.ex != np->put_rx.ex) &&
  2547. !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
  2548. (rx_work < limit)) {
  2549. dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
  2550. dev->name, flags);
  2551. /*
  2552. * the packet is for us - immediately tear down the pci mapping.
  2553. * TODO: check if a prefetch of the first cacheline improves
  2554. * the performance.
  2555. */
  2556. pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
  2557. np->get_rx_ctx->dma_len,
  2558. PCI_DMA_FROMDEVICE);
  2559. skb = np->get_rx_ctx->skb;
  2560. np->get_rx_ctx->skb = NULL;
  2561. {
  2562. int j;
  2563. dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
  2564. for (j=0; j<64; j++) {
  2565. if ((j%16) == 0)
  2566. dprintk("\n%03x:", j);
  2567. dprintk(" %02x", ((unsigned char*)skb->data)[j]);
  2568. }
  2569. dprintk("\n");
  2570. }
  2571. /* look at what we actually got: */
  2572. if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
  2573. len = flags & LEN_MASK_V2;
  2574. if (unlikely(flags & NV_RX2_ERROR)) {
  2575. if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
  2576. len = nv_getlen(dev, skb->data, len);
  2577. if (len < 0) {
  2578. dev_kfree_skb(skb);
  2579. goto next_pkt;
  2580. }
  2581. }
  2582. /* framing errors are soft errors */
  2583. else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
  2584. if (flags & NV_RX2_SUBSTRACT1) {
  2585. len--;
  2586. }
  2587. }
  2588. /* the rest are hard errors */
  2589. else {
  2590. dev_kfree_skb(skb);
  2591. goto next_pkt;
  2592. }
  2593. }
  2594. if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
  2595. ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
  2596. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2597. /* got a valid packet - forward it to the network core */
  2598. skb_put(skb, len);
  2599. skb->protocol = eth_type_trans(skb, dev);
  2600. prefetch(skb->data);
  2601. dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
  2602. dev->name, len, skb->protocol);
  2603. if (likely(!np->vlangrp)) {
  2604. #ifdef CONFIG_FORCEDETH_NAPI
  2605. netif_receive_skb(skb);
  2606. #else
  2607. netif_rx(skb);
  2608. #endif
  2609. } else {
  2610. vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
  2611. if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
  2612. #ifdef CONFIG_FORCEDETH_NAPI
  2613. vlan_hwaccel_receive_skb(skb, np->vlangrp,
  2614. vlanflags & NV_RX3_VLAN_TAG_MASK);
  2615. #else
  2616. vlan_hwaccel_rx(skb, np->vlangrp,
  2617. vlanflags & NV_RX3_VLAN_TAG_MASK);
  2618. #endif
  2619. } else {
  2620. #ifdef CONFIG_FORCEDETH_NAPI
  2621. netif_receive_skb(skb);
  2622. #else
  2623. netif_rx(skb);
  2624. #endif
  2625. }
  2626. }
  2627. dev->stats.rx_packets++;
  2628. dev->stats.rx_bytes += len;
  2629. } else {
  2630. dev_kfree_skb(skb);
  2631. }
  2632. next_pkt:
  2633. if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
  2634. np->get_rx.ex = np->first_rx.ex;
  2635. if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
  2636. np->get_rx_ctx = np->first_rx_ctx;
  2637. rx_work++;
  2638. }
  2639. return rx_work;
  2640. }
  2641. static void set_bufsize(struct net_device *dev)
  2642. {
  2643. struct fe_priv *np = netdev_priv(dev);
  2644. if (dev->mtu <= ETH_DATA_LEN)
  2645. np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
  2646. else
  2647. np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
  2648. }
  2649. /*
  2650. * nv_change_mtu: dev->change_mtu function
  2651. * Called with dev_base_lock held for read.
  2652. */
  2653. static int nv_change_mtu(struct net_device *dev, int new_mtu)
  2654. {
  2655. struct fe_priv *np = netdev_priv(dev);
  2656. int old_mtu;
  2657. if (new_mtu < 64 || new_mtu > np->pkt_limit)
  2658. return -EINVAL;
  2659. old_mtu = dev->mtu;
  2660. dev->mtu = new_mtu;
  2661. /* return early if the buffer sizes will not change */
  2662. if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
  2663. return 0;
  2664. if (old_mtu == new_mtu)
  2665. return 0;
  2666. /* synchronized against open : rtnl_lock() held by caller */
  2667. if (netif_running(dev)) {
  2668. u8 __iomem *base = get_hwbase(dev);
  2669. /*
  2670. * It seems that the nic preloads valid ring entries into an
  2671. * internal buffer. The procedure for flushing everything is
  2672. * guessed, there is probably a simpler approach.
  2673. * Changing the MTU is a rare event, it shouldn't matter.
  2674. */
  2675. nv_disable_irq(dev);
  2676. nv_napi_disable(dev);
  2677. netif_tx_lock_bh(dev);
  2678. netif_addr_lock(dev);
  2679. spin_lock(&np->lock);
  2680. /* stop engines */
  2681. nv_stop_rxtx(dev);
  2682. nv_txrx_reset(dev);
  2683. /* drain rx queue */
  2684. nv_drain_rxtx(dev);
  2685. /* reinit driver view of the rx queue */
  2686. set_bufsize(dev);
  2687. if (nv_init_ring(dev)) {
  2688. if (!np->in_shutdown)
  2689. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  2690. }
  2691. /* reinit nic view of the rx queue */
  2692. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  2693. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  2694. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  2695. base + NvRegRingSizes);
  2696. pci_push(base);
  2697. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  2698. pci_push(base);
  2699. /* restart rx engine */
  2700. nv_start_rxtx(dev);
  2701. spin_unlock(&np->lock);
  2702. netif_addr_unlock(dev);
  2703. netif_tx_unlock_bh(dev);
  2704. nv_napi_enable(dev);
  2705. nv_enable_irq(dev);
  2706. }
  2707. return 0;
  2708. }
  2709. static void nv_copy_mac_to_hw(struct net_device *dev)
  2710. {
  2711. u8 __iomem *base = get_hwbase(dev);
  2712. u32 mac[2];
  2713. mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
  2714. (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
  2715. mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
  2716. writel(mac[0], base + NvRegMacAddrA);
  2717. writel(mac[1], base + NvRegMacAddrB);
  2718. }
  2719. /*
  2720. * nv_set_mac_address: dev->set_mac_address function
  2721. * Called with rtnl_lock() held.
  2722. */
  2723. static int nv_set_mac_address(struct net_device *dev, void *addr)
  2724. {
  2725. struct fe_priv *np = netdev_priv(dev);
  2726. struct sockaddr *macaddr = (struct sockaddr*)addr;
  2727. if (!is_valid_ether_addr(macaddr->sa_data))
  2728. return -EADDRNOTAVAIL;
  2729. /* synchronized against open : rtnl_lock() held by caller */
  2730. memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
  2731. if (netif_running(dev)) {
  2732. netif_tx_lock_bh(dev);
  2733. netif_addr_lock(dev);
  2734. spin_lock_irq(&np->lock);
  2735. /* stop rx engine */
  2736. nv_stop_rx(dev);
  2737. /* set mac address */
  2738. nv_copy_mac_to_hw(dev);
  2739. /* restart rx engine */
  2740. nv_start_rx(dev);
  2741. spin_unlock_irq(&np->lock);
  2742. netif_addr_unlock(dev);
  2743. netif_tx_unlock_bh(dev);
  2744. } else {
  2745. nv_copy_mac_to_hw(dev);
  2746. }
  2747. return 0;
  2748. }
  2749. /*
  2750. * nv_set_multicast: dev->set_multicast function
  2751. * Called with netif_tx_lock held.
  2752. */
  2753. static void nv_set_multicast(struct net_device *dev)
  2754. {
  2755. struct fe_priv *np = netdev_priv(dev);
  2756. u8 __iomem *base = get_hwbase(dev);
  2757. u32 addr[2];
  2758. u32 mask[2];
  2759. u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
  2760. memset(addr, 0, sizeof(addr));
  2761. memset(mask, 0, sizeof(mask));
  2762. if (dev->flags & IFF_PROMISC) {
  2763. pff |= NVREG_PFF_PROMISC;
  2764. } else {
  2765. pff |= NVREG_PFF_MYADDR;
  2766. if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
  2767. u32 alwaysOff[2];
  2768. u32 alwaysOn[2];
  2769. alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
  2770. if (dev->flags & IFF_ALLMULTI) {
  2771. alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
  2772. } else {
  2773. struct dev_mc_list *walk;
  2774. walk = dev->mc_list;
  2775. while (walk != NULL) {
  2776. u32 a, b;
  2777. a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
  2778. b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
  2779. alwaysOn[0] &= a;
  2780. alwaysOff[0] &= ~a;
  2781. alwaysOn[1] &= b;
  2782. alwaysOff[1] &= ~b;
  2783. walk = walk->next;
  2784. }
  2785. }
  2786. addr[0] = alwaysOn[0];
  2787. addr[1] = alwaysOn[1];
  2788. mask[0] = alwaysOn[0] | alwaysOff[0];
  2789. mask[1] = alwaysOn[1] | alwaysOff[1];
  2790. } else {
  2791. mask[0] = NVREG_MCASTMASKA_NONE;
  2792. mask[1] = NVREG_MCASTMASKB_NONE;
  2793. }
  2794. }
  2795. addr[0] |= NVREG_MCASTADDRA_FORCE;
  2796. pff |= NVREG_PFF_ALWAYS;
  2797. spin_lock_irq(&np->lock);
  2798. nv_stop_rx(dev);
  2799. writel(addr[0], base + NvRegMulticastAddrA);
  2800. writel(addr[1], base + NvRegMulticastAddrB);
  2801. writel(mask[0], base + NvRegMulticastMaskA);
  2802. writel(mask[1], base + NvRegMulticastMaskB);
  2803. writel(pff, base + NvRegPacketFilterFlags);
  2804. dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
  2805. dev->name);
  2806. nv_start_rx(dev);
  2807. spin_unlock_irq(&np->lock);
  2808. }
  2809. static void nv_update_pause(struct net_device *dev, u32 pause_flags)
  2810. {
  2811. struct fe_priv *np = netdev_priv(dev);
  2812. u8 __iomem *base = get_hwbase(dev);
  2813. np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
  2814. if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
  2815. u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
  2816. if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
  2817. writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
  2818. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2819. } else {
  2820. writel(pff, base + NvRegPacketFilterFlags);
  2821. }
  2822. }
  2823. if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
  2824. u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
  2825. if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
  2826. u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
  2827. if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
  2828. pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
  2829. if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
  2830. pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
  2831. /* limit the number of tx pause frames to a default of 8 */
  2832. writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
  2833. }
  2834. writel(pause_enable, base + NvRegTxPauseFrame);
  2835. writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
  2836. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2837. } else {
  2838. writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
  2839. writel(regmisc, base + NvRegMisc1);
  2840. }
  2841. }
  2842. }
  2843. /**
  2844. * nv_update_linkspeed: Setup the MAC according to the link partner
  2845. * @dev: Network device to be configured
  2846. *
  2847. * The function queries the PHY and checks if there is a link partner.
  2848. * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
  2849. * set to 10 MBit HD.
  2850. *
  2851. * The function returns 0 if there is no link partner and 1 if there is
  2852. * a good link partner.
  2853. */
  2854. static int nv_update_linkspeed(struct net_device *dev)
  2855. {
  2856. struct fe_priv *np = netdev_priv(dev);
  2857. u8 __iomem *base = get_hwbase(dev);
  2858. int adv = 0;
  2859. int lpa = 0;
  2860. int adv_lpa, adv_pause, lpa_pause;
  2861. int newls = np->linkspeed;
  2862. int newdup = np->duplex;
  2863. int mii_status;
  2864. int retval = 0;
  2865. u32 control_1000, status_1000, phyreg, pause_flags, txreg;
  2866. u32 txrxFlags = 0;
  2867. u32 phy_exp;
  2868. /* BMSR_LSTATUS is latched, read it twice:
  2869. * we want the current value.
  2870. */
  2871. mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  2872. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  2873. if (!(mii_status & BMSR_LSTATUS)) {
  2874. dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
  2875. dev->name);
  2876. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2877. newdup = 0;
  2878. retval = 0;
  2879. goto set_speed;
  2880. }
  2881. if (np->autoneg == 0) {
  2882. dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
  2883. dev->name, np->fixed_mode);
  2884. if (np->fixed_mode & LPA_100FULL) {
  2885. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  2886. newdup = 1;
  2887. } else if (np->fixed_mode & LPA_100HALF) {
  2888. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  2889. newdup = 0;
  2890. } else if (np->fixed_mode & LPA_10FULL) {
  2891. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2892. newdup = 1;
  2893. } else {
  2894. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2895. newdup = 0;
  2896. }
  2897. retval = 1;
  2898. goto set_speed;
  2899. }
  2900. /* check auto negotiation is complete */
  2901. if (!(mii_status & BMSR_ANEGCOMPLETE)) {
  2902. /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
  2903. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2904. newdup = 0;
  2905. retval = 0;
  2906. dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
  2907. goto set_speed;
  2908. }
  2909. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  2910. lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
  2911. dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
  2912. dev->name, adv, lpa);
  2913. retval = 1;
  2914. if (np->gigabit == PHY_GIGABIT) {
  2915. control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  2916. status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
  2917. if ((control_1000 & ADVERTISE_1000FULL) &&
  2918. (status_1000 & LPA_1000FULL)) {
  2919. dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
  2920. dev->name);
  2921. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
  2922. newdup = 1;
  2923. goto set_speed;
  2924. }
  2925. }
  2926. /* FIXME: handle parallel detection properly */
  2927. adv_lpa = lpa & adv;
  2928. if (adv_lpa & LPA_100FULL) {
  2929. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  2930. newdup = 1;
  2931. } else if (adv_lpa & LPA_100HALF) {
  2932. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  2933. newdup = 0;
  2934. } else if (adv_lpa & LPA_10FULL) {
  2935. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2936. newdup = 1;
  2937. } else if (adv_lpa & LPA_10HALF) {
  2938. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2939. newdup = 0;
  2940. } else {
  2941. dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
  2942. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2943. newdup = 0;
  2944. }
  2945. set_speed:
  2946. if (np->duplex == newdup && np->linkspeed == newls)
  2947. return retval;
  2948. dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
  2949. dev->name, np->linkspeed, np->duplex, newls, newdup);
  2950. np->duplex = newdup;
  2951. np->linkspeed = newls;
  2952. /* The transmitter and receiver must be restarted for safe update */
  2953. if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
  2954. txrxFlags |= NV_RESTART_TX;
  2955. nv_stop_tx(dev);
  2956. }
  2957. if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
  2958. txrxFlags |= NV_RESTART_RX;
  2959. nv_stop_rx(dev);
  2960. }
  2961. if (np->gigabit == PHY_GIGABIT) {
  2962. phyreg = readl(base + NvRegSlotTime);
  2963. phyreg &= ~(0x3FF00);
  2964. if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
  2965. ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
  2966. phyreg |= NVREG_SLOTTIME_10_100_FULL;
  2967. else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
  2968. phyreg |= NVREG_SLOTTIME_1000_FULL;
  2969. writel(phyreg, base + NvRegSlotTime);
  2970. }
  2971. phyreg = readl(base + NvRegPhyInterface);
  2972. phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
  2973. if (np->duplex == 0)
  2974. phyreg |= PHY_HALF;
  2975. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
  2976. phyreg |= PHY_100;
  2977. else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
  2978. phyreg |= PHY_1000;
  2979. writel(phyreg, base + NvRegPhyInterface);
  2980. phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
  2981. if (phyreg & PHY_RGMII) {
  2982. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
  2983. txreg = NVREG_TX_DEFERRAL_RGMII_1000;
  2984. } else {
  2985. if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
  2986. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
  2987. txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
  2988. else
  2989. txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
  2990. } else {
  2991. txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
  2992. }
  2993. }
  2994. } else {
  2995. if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
  2996. txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
  2997. else
  2998. txreg = NVREG_TX_DEFERRAL_DEFAULT;
  2999. }
  3000. writel(txreg, base + NvRegTxDeferral);
  3001. if (np->desc_ver == DESC_VER_1) {
  3002. txreg = NVREG_TX_WM_DESC1_DEFAULT;
  3003. } else {
  3004. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
  3005. txreg = NVREG_TX_WM_DESC2_3_1000;
  3006. else
  3007. txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
  3008. }
  3009. writel(txreg, base + NvRegTxWatermark);
  3010. writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
  3011. base + NvRegMisc1);
  3012. pci_push(base);
  3013. writel(np->linkspeed, base + NvRegLinkSpeed);
  3014. pci_push(base);
  3015. pause_flags = 0;
  3016. /* setup pause frame */
  3017. if (np->duplex != 0) {
  3018. if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
  3019. adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
  3020. lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
  3021. switch (adv_pause) {
  3022. case ADVERTISE_PAUSE_CAP:
  3023. if (lpa_pause & LPA_PAUSE_CAP) {
  3024. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  3025. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  3026. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  3027. }
  3028. break;
  3029. case ADVERTISE_PAUSE_ASYM:
  3030. if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
  3031. {
  3032. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  3033. }
  3034. break;
  3035. case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
  3036. if (lpa_pause & LPA_PAUSE_CAP)
  3037. {
  3038. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  3039. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  3040. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  3041. }
  3042. if (lpa_pause == LPA_PAUSE_ASYM)
  3043. {
  3044. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  3045. }
  3046. break;
  3047. }
  3048. } else {
  3049. pause_flags = np->pause_flags;
  3050. }
  3051. }
  3052. nv_update_pause(dev, pause_flags);
  3053. if (txrxFlags & NV_RESTART_TX)
  3054. nv_start_tx(dev);
  3055. if (txrxFlags & NV_RESTART_RX)
  3056. nv_start_rx(dev);
  3057. return retval;
  3058. }
  3059. static void nv_linkchange(struct net_device *dev)
  3060. {
  3061. if (nv_update_linkspeed(dev)) {
  3062. if (!netif_carrier_ok(dev)) {
  3063. netif_carrier_on(dev);
  3064. printk(KERN_INFO "%s: link up.\n", dev->name);
  3065. nv_txrx_gate(dev, false);
  3066. nv_start_rx(dev);
  3067. }
  3068. } else {
  3069. if (netif_carrier_ok(dev)) {
  3070. netif_carrier_off(dev);
  3071. printk(KERN_INFO "%s: link down.\n", dev->name);
  3072. nv_txrx_gate(dev, true);
  3073. nv_stop_rx(dev);
  3074. }
  3075. }
  3076. }
  3077. static void nv_link_irq(struct net_device *dev)
  3078. {
  3079. u8 __iomem *base = get_hwbase(dev);
  3080. u32 miistat;
  3081. miistat = readl(base + NvRegMIIStatus);
  3082. writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
  3083. dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
  3084. if (miistat & (NVREG_MIISTAT_LINKCHANGE))
  3085. nv_linkchange(dev);
  3086. dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
  3087. }
  3088. static void nv_msi_workaround(struct fe_priv *np)
  3089. {
  3090. /* Need to toggle the msi irq mask within the ethernet device,
  3091. * otherwise, future interrupts will not be detected.
  3092. */
  3093. if (np->msi_flags & NV_MSI_ENABLED) {
  3094. u8 __iomem *base = np->base;
  3095. writel(0, base + NvRegMSIIrqMask);
  3096. writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
  3097. }
  3098. }
  3099. static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
  3100. {
  3101. struct fe_priv *np = netdev_priv(dev);
  3102. if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
  3103. if (total_work > NV_DYNAMIC_THRESHOLD) {
  3104. /* transition to poll based interrupts */
  3105. np->quiet_count = 0;
  3106. if (np->irqmask != NVREG_IRQMASK_CPU) {
  3107. np->irqmask = NVREG_IRQMASK_CPU;
  3108. return 1;
  3109. }
  3110. } else {
  3111. if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
  3112. np->quiet_count++;
  3113. } else {
  3114. /* reached a period of low activity, switch
  3115. to per tx/rx packet interrupts */
  3116. if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
  3117. np->irqmask = NVREG_IRQMASK_THROUGHPUT;
  3118. return 1;
  3119. }
  3120. }
  3121. }
  3122. }
  3123. return 0;
  3124. }
  3125. static irqreturn_t nv_nic_irq(int foo, void *data)
  3126. {
  3127. struct net_device *dev = (struct net_device *) data;
  3128. struct fe_priv *np = netdev_priv(dev);
  3129. u8 __iomem *base = get_hwbase(dev);
  3130. #ifndef CONFIG_FORCEDETH_NAPI
  3131. int total_work = 0;
  3132. int loop_count = 0;
  3133. #endif
  3134. dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
  3135. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  3136. np->events = readl(base + NvRegIrqStatus);
  3137. writel(np->events, base + NvRegIrqStatus);
  3138. } else {
  3139. np->events = readl(base + NvRegMSIXIrqStatus);
  3140. writel(np->events, base + NvRegMSIXIrqStatus);
  3141. }
  3142. dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
  3143. if (!(np->events & np->irqmask))
  3144. return IRQ_NONE;
  3145. nv_msi_workaround(np);
  3146. #ifdef CONFIG_FORCEDETH_NAPI
  3147. napi_schedule(&np->napi);
  3148. /* Disable furthur irq's
  3149. (msix not enabled with napi) */
  3150. writel(0, base + NvRegIrqMask);
  3151. #else
  3152. do
  3153. {
  3154. int work = 0;
  3155. if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) {
  3156. if (unlikely(nv_alloc_rx(dev))) {
  3157. spin_lock(&np->lock);
  3158. if (!np->in_shutdown)
  3159. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3160. spin_unlock(&np->lock);
  3161. }
  3162. }
  3163. spin_lock(&np->lock);
  3164. work += nv_tx_done(dev, TX_WORK_PER_LOOP);
  3165. spin_unlock(&np->lock);
  3166. if (!work)
  3167. break;
  3168. total_work += work;
  3169. loop_count++;
  3170. }
  3171. while (loop_count < max_interrupt_work);
  3172. if (nv_change_interrupt_mode(dev, total_work)) {
  3173. /* setup new irq mask */
  3174. writel(np->irqmask, base + NvRegIrqMask);
  3175. }
  3176. if (unlikely(np->events & NVREG_IRQ_LINK)) {
  3177. spin_lock(&np->lock);
  3178. nv_link_irq(dev);
  3179. spin_unlock(&np->lock);
  3180. }
  3181. if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
  3182. spin_lock(&np->lock);
  3183. nv_linkchange(dev);
  3184. spin_unlock(&np->lock);
  3185. np->link_timeout = jiffies + LINK_TIMEOUT;
  3186. }
  3187. if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
  3188. spin_lock(&np->lock);
  3189. /* disable interrupts on the nic */
  3190. if (!(np->msi_flags & NV_MSI_X_ENABLED))
  3191. writel(0, base + NvRegIrqMask);
  3192. else
  3193. writel(np->irqmask, base + NvRegIrqMask);
  3194. pci_push(base);
  3195. if (!np->in_shutdown) {
  3196. np->nic_poll_irq = np->irqmask;
  3197. np->recover_error = 1;
  3198. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3199. }
  3200. spin_unlock(&np->lock);
  3201. }
  3202. #endif
  3203. dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
  3204. return IRQ_HANDLED;
  3205. }
  3206. /**
  3207. * All _optimized functions are used to help increase performance
  3208. * (reduce CPU and increase throughput). They use descripter version 3,
  3209. * compiler directives, and reduce memory accesses.
  3210. */
  3211. static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
  3212. {
  3213. struct net_device *dev = (struct net_device *) data;
  3214. struct fe_priv *np = netdev_priv(dev);
  3215. u8 __iomem *base = get_hwbase(dev);
  3216. #ifndef CONFIG_FORCEDETH_NAPI
  3217. int total_work = 0;
  3218. int loop_count = 0;
  3219. #endif
  3220. dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
  3221. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  3222. np->events = readl(base + NvRegIrqStatus);
  3223. writel(np->events, base + NvRegIrqStatus);
  3224. } else {
  3225. np->events = readl(base + NvRegMSIXIrqStatus);
  3226. writel(np->events, base + NvRegMSIXIrqStatus);
  3227. }
  3228. dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
  3229. if (!(np->events & np->irqmask))
  3230. return IRQ_NONE;
  3231. nv_msi_workaround(np);
  3232. #ifdef CONFIG_FORCEDETH_NAPI
  3233. napi_schedule(&np->napi);
  3234. /* Disable furthur irq's
  3235. (msix not enabled with napi) */
  3236. writel(0, base + NvRegIrqMask);
  3237. #else
  3238. do
  3239. {
  3240. int work = 0;
  3241. if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) {
  3242. if (unlikely(nv_alloc_rx_optimized(dev))) {
  3243. spin_lock(&np->lock);
  3244. if (!np->in_shutdown)
  3245. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3246. spin_unlock(&np->lock);
  3247. }
  3248. }
  3249. spin_lock(&np->lock);
  3250. work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
  3251. spin_unlock(&np->lock);
  3252. if (!work)
  3253. break;
  3254. total_work += work;
  3255. loop_count++;
  3256. }
  3257. while (loop_count < max_interrupt_work);
  3258. if (nv_change_interrupt_mode(dev, total_work)) {
  3259. /* setup new irq mask */
  3260. writel(np->irqmask, base + NvRegIrqMask);
  3261. }
  3262. if (unlikely(np->events & NVREG_IRQ_LINK)) {
  3263. spin_lock(&np->lock);
  3264. nv_link_irq(dev);
  3265. spin_unlock(&np->lock);
  3266. }
  3267. if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
  3268. spin_lock(&np->lock);
  3269. nv_linkchange(dev);
  3270. spin_unlock(&np->lock);
  3271. np->link_timeout = jiffies + LINK_TIMEOUT;
  3272. }
  3273. if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
  3274. spin_lock(&np->lock);
  3275. /* disable interrupts on the nic */
  3276. if (!(np->msi_flags & NV_MSI_X_ENABLED))
  3277. writel(0, base + NvRegIrqMask);
  3278. else
  3279. writel(np->irqmask, base + NvRegIrqMask);
  3280. pci_push(base);
  3281. if (!np->in_shutdown) {
  3282. np->nic_poll_irq = np->irqmask;
  3283. np->recover_error = 1;
  3284. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3285. }
  3286. spin_unlock(&np->lock);
  3287. }
  3288. #endif
  3289. dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
  3290. return IRQ_HANDLED;
  3291. }
  3292. static irqreturn_t nv_nic_irq_tx(int foo, void *data)
  3293. {
  3294. struct net_device *dev = (struct net_device *) data;
  3295. struct fe_priv *np = netdev_priv(dev);
  3296. u8 __iomem *base = get_hwbase(dev);
  3297. u32 events;
  3298. int i;
  3299. unsigned long flags;
  3300. dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
  3301. for (i=0; ; i++) {
  3302. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
  3303. writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
  3304. dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
  3305. if (!(events & np->irqmask))
  3306. break;
  3307. spin_lock_irqsave(&np->lock, flags);
  3308. nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
  3309. spin_unlock_irqrestore(&np->lock, flags);
  3310. if (unlikely(i > max_interrupt_work)) {
  3311. spin_lock_irqsave(&np->lock, flags);
  3312. /* disable interrupts on the nic */
  3313. writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
  3314. pci_push(base);
  3315. if (!np->in_shutdown) {
  3316. np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
  3317. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3318. }
  3319. spin_unlock_irqrestore(&np->lock, flags);
  3320. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
  3321. break;
  3322. }
  3323. }
  3324. dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
  3325. return IRQ_RETVAL(i);
  3326. }
  3327. #ifdef CONFIG_FORCEDETH_NAPI
  3328. static int nv_napi_poll(struct napi_struct *napi, int budget)
  3329. {
  3330. struct fe_priv *np = container_of(napi, struct fe_priv, napi);
  3331. struct net_device *dev = np->dev;
  3332. u8 __iomem *base = get_hwbase(dev);
  3333. unsigned long flags;
  3334. int retcode;
  3335. int tx_work, rx_work;
  3336. if (!nv_optimized(np)) {
  3337. spin_lock_irqsave(&np->lock, flags);
  3338. tx_work = nv_tx_done(dev, np->tx_ring_size);
  3339. spin_unlock_irqrestore(&np->lock, flags);
  3340. rx_work = nv_rx_process(dev, budget);
  3341. retcode = nv_alloc_rx(dev);
  3342. } else {
  3343. spin_lock_irqsave(&np->lock, flags);
  3344. tx_work = nv_tx_done_optimized(dev, np->tx_ring_size);
  3345. spin_unlock_irqrestore(&np->lock, flags);
  3346. rx_work = nv_rx_process_optimized(dev, budget);
  3347. retcode = nv_alloc_rx_optimized(dev);
  3348. }
  3349. if (retcode) {
  3350. spin_lock_irqsave(&np->lock, flags);
  3351. if (!np->in_shutdown)
  3352. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3353. spin_unlock_irqrestore(&np->lock, flags);
  3354. }
  3355. nv_change_interrupt_mode(dev, tx_work + rx_work);
  3356. if (unlikely(np->events & NVREG_IRQ_LINK)) {
  3357. spin_lock_irqsave(&np->lock, flags);
  3358. nv_link_irq(dev);
  3359. spin_unlock_irqrestore(&np->lock, flags);
  3360. }
  3361. if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
  3362. spin_lock_irqsave(&np->lock, flags);
  3363. nv_linkchange(dev);
  3364. spin_unlock_irqrestore(&np->lock, flags);
  3365. np->link_timeout = jiffies + LINK_TIMEOUT;
  3366. }
  3367. if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
  3368. spin_lock_irqsave(&np->lock, flags);
  3369. if (!np->in_shutdown) {
  3370. np->nic_poll_irq = np->irqmask;
  3371. np->recover_error = 1;
  3372. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3373. }
  3374. spin_unlock_irqrestore(&np->lock, flags);
  3375. napi_complete(napi);
  3376. return rx_work;
  3377. }
  3378. if (rx_work < budget) {
  3379. /* re-enable interrupts
  3380. (msix not enabled in napi) */
  3381. napi_complete(napi);
  3382. writel(np->irqmask, base + NvRegIrqMask);
  3383. }
  3384. return rx_work;
  3385. }
  3386. #endif
  3387. static irqreturn_t nv_nic_irq_rx(int foo, void *data)
  3388. {
  3389. struct net_device *dev = (struct net_device *) data;
  3390. struct fe_priv *np = netdev_priv(dev);
  3391. u8 __iomem *base = get_hwbase(dev);
  3392. u32 events;
  3393. int i;
  3394. unsigned long flags;
  3395. dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
  3396. for (i=0; ; i++) {
  3397. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
  3398. writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
  3399. dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
  3400. if (!(events & np->irqmask))
  3401. break;
  3402. if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
  3403. if (unlikely(nv_alloc_rx_optimized(dev))) {
  3404. spin_lock_irqsave(&np->lock, flags);
  3405. if (!np->in_shutdown)
  3406. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3407. spin_unlock_irqrestore(&np->lock, flags);
  3408. }
  3409. }
  3410. if (unlikely(i > max_interrupt_work)) {
  3411. spin_lock_irqsave(&np->lock, flags);
  3412. /* disable interrupts on the nic */
  3413. writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
  3414. pci_push(base);
  3415. if (!np->in_shutdown) {
  3416. np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
  3417. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3418. }
  3419. spin_unlock_irqrestore(&np->lock, flags);
  3420. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
  3421. break;
  3422. }
  3423. }
  3424. dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
  3425. return IRQ_RETVAL(i);
  3426. }
  3427. static irqreturn_t nv_nic_irq_other(int foo, void *data)
  3428. {
  3429. struct net_device *dev = (struct net_device *) data;
  3430. struct fe_priv *np = netdev_priv(dev);
  3431. u8 __iomem *base = get_hwbase(dev);
  3432. u32 events;
  3433. int i;
  3434. unsigned long flags;
  3435. dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
  3436. for (i=0; ; i++) {
  3437. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
  3438. writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
  3439. dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
  3440. if (!(events & np->irqmask))
  3441. break;
  3442. /* check tx in case we reached max loop limit in tx isr */
  3443. spin_lock_irqsave(&np->lock, flags);
  3444. nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
  3445. spin_unlock_irqrestore(&np->lock, flags);
  3446. if (events & NVREG_IRQ_LINK) {
  3447. spin_lock_irqsave(&np->lock, flags);
  3448. nv_link_irq(dev);
  3449. spin_unlock_irqrestore(&np->lock, flags);
  3450. }
  3451. if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
  3452. spin_lock_irqsave(&np->lock, flags);
  3453. nv_linkchange(dev);
  3454. spin_unlock_irqrestore(&np->lock, flags);
  3455. np->link_timeout = jiffies + LINK_TIMEOUT;
  3456. }
  3457. if (events & NVREG_IRQ_RECOVER_ERROR) {
  3458. spin_lock_irq(&np->lock);
  3459. /* disable interrupts on the nic */
  3460. writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
  3461. pci_push(base);
  3462. if (!np->in_shutdown) {
  3463. np->nic_poll_irq |= NVREG_IRQ_OTHER;
  3464. np->recover_error = 1;
  3465. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3466. }
  3467. spin_unlock_irq(&np->lock);
  3468. break;
  3469. }
  3470. if (unlikely(i > max_interrupt_work)) {
  3471. spin_lock_irqsave(&np->lock, flags);
  3472. /* disable interrupts on the nic */
  3473. writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
  3474. pci_push(base);
  3475. if (!np->in_shutdown) {
  3476. np->nic_poll_irq |= NVREG_IRQ_OTHER;
  3477. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3478. }
  3479. spin_unlock_irqrestore(&np->lock, flags);
  3480. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
  3481. break;
  3482. }
  3483. }
  3484. dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
  3485. return IRQ_RETVAL(i);
  3486. }
  3487. static irqreturn_t nv_nic_irq_test(int foo, void *data)
  3488. {
  3489. struct net_device *dev = (struct net_device *) data;
  3490. struct fe_priv *np = netdev_priv(dev);
  3491. u8 __iomem *base = get_hwbase(dev);
  3492. u32 events;
  3493. dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
  3494. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  3495. events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
  3496. writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
  3497. } else {
  3498. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
  3499. writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
  3500. }
  3501. pci_push(base);
  3502. dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
  3503. if (!(events & NVREG_IRQ_TIMER))
  3504. return IRQ_RETVAL(0);
  3505. nv_msi_workaround(np);
  3506. spin_lock(&np->lock);
  3507. np->intr_test = 1;
  3508. spin_unlock(&np->lock);
  3509. dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
  3510. return IRQ_RETVAL(1);
  3511. }
  3512. static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
  3513. {
  3514. u8 __iomem *base = get_hwbase(dev);
  3515. int i;
  3516. u32 msixmap = 0;
  3517. /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
  3518. * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
  3519. * the remaining 8 interrupts.
  3520. */
  3521. for (i = 0; i < 8; i++) {
  3522. if ((irqmask >> i) & 0x1) {
  3523. msixmap |= vector << (i << 2);
  3524. }
  3525. }
  3526. writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
  3527. msixmap = 0;
  3528. for (i = 0; i < 8; i++) {
  3529. if ((irqmask >> (i + 8)) & 0x1) {
  3530. msixmap |= vector << (i << 2);
  3531. }
  3532. }
  3533. writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
  3534. }
  3535. static int nv_request_irq(struct net_device *dev, int intr_test)
  3536. {
  3537. struct fe_priv *np = get_nvpriv(dev);
  3538. u8 __iomem *base = get_hwbase(dev);
  3539. int ret = 1;
  3540. int i;
  3541. irqreturn_t (*handler)(int foo, void *data);
  3542. if (intr_test) {
  3543. handler = nv_nic_irq_test;
  3544. } else {
  3545. if (nv_optimized(np))
  3546. handler = nv_nic_irq_optimized;
  3547. else
  3548. handler = nv_nic_irq;
  3549. }
  3550. if (np->msi_flags & NV_MSI_X_CAPABLE) {
  3551. for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
  3552. np->msi_x_entry[i].entry = i;
  3553. }
  3554. if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
  3555. np->msi_flags |= NV_MSI_X_ENABLED;
  3556. if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
  3557. /* Request irq for rx handling */
  3558. sprintf(np->name_rx, "%s-rx", dev->name);
  3559. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
  3560. &nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
  3561. printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
  3562. pci_disable_msix(np->pci_dev);
  3563. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3564. goto out_err;
  3565. }
  3566. /* Request irq for tx handling */
  3567. sprintf(np->name_tx, "%s-tx", dev->name);
  3568. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
  3569. &nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
  3570. printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
  3571. pci_disable_msix(np->pci_dev);
  3572. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3573. goto out_free_rx;
  3574. }
  3575. /* Request irq for link and timer handling */
  3576. sprintf(np->name_other, "%s-other", dev->name);
  3577. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
  3578. &nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
  3579. printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
  3580. pci_disable_msix(np->pci_dev);
  3581. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3582. goto out_free_tx;
  3583. }
  3584. /* map interrupts to their respective vector */
  3585. writel(0, base + NvRegMSIXMap0);
  3586. writel(0, base + NvRegMSIXMap1);
  3587. set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
  3588. set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
  3589. set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
  3590. } else {
  3591. /* Request irq for all interrupts */
  3592. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
  3593. printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
  3594. pci_disable_msix(np->pci_dev);
  3595. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3596. goto out_err;
  3597. }
  3598. /* map interrupts to vector 0 */
  3599. writel(0, base + NvRegMSIXMap0);
  3600. writel(0, base + NvRegMSIXMap1);
  3601. }
  3602. }
  3603. }
  3604. if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
  3605. if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
  3606. np->msi_flags |= NV_MSI_ENABLED;
  3607. dev->irq = np->pci_dev->irq;
  3608. if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
  3609. printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
  3610. pci_disable_msi(np->pci_dev);
  3611. np->msi_flags &= ~NV_MSI_ENABLED;
  3612. dev->irq = np->pci_dev->irq;
  3613. goto out_err;
  3614. }
  3615. /* map interrupts to vector 0 */
  3616. writel(0, base + NvRegMSIMap0);
  3617. writel(0, base + NvRegMSIMap1);
  3618. /* enable msi vector 0 */
  3619. writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
  3620. }
  3621. }
  3622. if (ret != 0) {
  3623. if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
  3624. goto out_err;
  3625. }
  3626. return 0;
  3627. out_free_tx:
  3628. free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
  3629. out_free_rx:
  3630. free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
  3631. out_err:
  3632. return 1;
  3633. }
  3634. static void nv_free_irq(struct net_device *dev)
  3635. {
  3636. struct fe_priv *np = get_nvpriv(dev);
  3637. int i;
  3638. if (np->msi_flags & NV_MSI_X_ENABLED) {
  3639. for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
  3640. free_irq(np->msi_x_entry[i].vector, dev);
  3641. }
  3642. pci_disable_msix(np->pci_dev);
  3643. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3644. } else {
  3645. free_irq(np->pci_dev->irq, dev);
  3646. if (np->msi_flags & NV_MSI_ENABLED) {
  3647. pci_disable_msi(np->pci_dev);
  3648. np->msi_flags &= ~NV_MSI_ENABLED;
  3649. }
  3650. }
  3651. }
  3652. static void nv_do_nic_poll(unsigned long data)
  3653. {
  3654. struct net_device *dev = (struct net_device *) data;
  3655. struct fe_priv *np = netdev_priv(dev);
  3656. u8 __iomem *base = get_hwbase(dev);
  3657. u32 mask = 0;
  3658. /*
  3659. * First disable irq(s) and then
  3660. * reenable interrupts on the nic, we have to do this before calling
  3661. * nv_nic_irq because that may decide to do otherwise
  3662. */
  3663. if (!using_multi_irqs(dev)) {
  3664. if (np->msi_flags & NV_MSI_X_ENABLED)
  3665. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  3666. else
  3667. disable_irq_lockdep(np->pci_dev->irq);
  3668. mask = np->irqmask;
  3669. } else {
  3670. if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
  3671. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  3672. mask |= NVREG_IRQ_RX_ALL;
  3673. }
  3674. if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
  3675. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  3676. mask |= NVREG_IRQ_TX_ALL;
  3677. }
  3678. if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
  3679. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  3680. mask |= NVREG_IRQ_OTHER;
  3681. }
  3682. }
  3683. /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
  3684. if (np->recover_error) {
  3685. np->recover_error = 0;
  3686. printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name);
  3687. if (netif_running(dev)) {
  3688. netif_tx_lock_bh(dev);
  3689. netif_addr_lock(dev);
  3690. spin_lock(&np->lock);
  3691. /* stop engines */
  3692. nv_stop_rxtx(dev);
  3693. if (np->driver_data & DEV_HAS_POWER_CNTRL)
  3694. nv_mac_reset(dev);
  3695. nv_txrx_reset(dev);
  3696. /* drain rx queue */
  3697. nv_drain_rxtx(dev);
  3698. /* reinit driver view of the rx queue */
  3699. set_bufsize(dev);
  3700. if (nv_init_ring(dev)) {
  3701. if (!np->in_shutdown)
  3702. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3703. }
  3704. /* reinit nic view of the rx queue */
  3705. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  3706. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  3707. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  3708. base + NvRegRingSizes);
  3709. pci_push(base);
  3710. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  3711. pci_push(base);
  3712. /* clear interrupts */
  3713. if (!(np->msi_flags & NV_MSI_X_ENABLED))
  3714. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  3715. else
  3716. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  3717. /* restart rx engine */
  3718. nv_start_rxtx(dev);
  3719. spin_unlock(&np->lock);
  3720. netif_addr_unlock(dev);
  3721. netif_tx_unlock_bh(dev);
  3722. }
  3723. }
  3724. writel(mask, base + NvRegIrqMask);
  3725. pci_push(base);
  3726. if (!using_multi_irqs(dev)) {
  3727. np->nic_poll_irq = 0;
  3728. if (nv_optimized(np))
  3729. nv_nic_irq_optimized(0, dev);
  3730. else
  3731. nv_nic_irq(0, dev);
  3732. if (np->msi_flags & NV_MSI_X_ENABLED)
  3733. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  3734. else
  3735. enable_irq_lockdep(np->pci_dev->irq);
  3736. } else {
  3737. if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
  3738. np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
  3739. nv_nic_irq_rx(0, dev);
  3740. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  3741. }
  3742. if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
  3743. np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
  3744. nv_nic_irq_tx(0, dev);
  3745. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  3746. }
  3747. if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
  3748. np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
  3749. nv_nic_irq_other(0, dev);
  3750. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  3751. }
  3752. }
  3753. }
  3754. #ifdef CONFIG_NET_POLL_CONTROLLER
  3755. static void nv_poll_controller(struct net_device *dev)
  3756. {
  3757. nv_do_nic_poll((unsigned long) dev);
  3758. }
  3759. #endif
  3760. static void nv_do_stats_poll(unsigned long data)
  3761. {
  3762. struct net_device *dev = (struct net_device *) data;
  3763. struct fe_priv *np = netdev_priv(dev);
  3764. nv_get_hw_stats(dev);
  3765. if (!np->in_shutdown)
  3766. mod_timer(&np->stats_poll,
  3767. round_jiffies(jiffies + STATS_INTERVAL));
  3768. }
  3769. static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  3770. {
  3771. struct fe_priv *np = netdev_priv(dev);
  3772. strcpy(info->driver, DRV_NAME);
  3773. strcpy(info->version, FORCEDETH_VERSION);
  3774. strcpy(info->bus_info, pci_name(np->pci_dev));
  3775. }
  3776. static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
  3777. {
  3778. struct fe_priv *np = netdev_priv(dev);
  3779. wolinfo->supported = WAKE_MAGIC;
  3780. spin_lock_irq(&np->lock);
  3781. if (np->wolenabled)
  3782. wolinfo->wolopts = WAKE_MAGIC;
  3783. spin_unlock_irq(&np->lock);
  3784. }
  3785. static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
  3786. {
  3787. struct fe_priv *np = netdev_priv(dev);
  3788. u8 __iomem *base = get_hwbase(dev);
  3789. u32 flags = 0;
  3790. if (wolinfo->wolopts == 0) {
  3791. np->wolenabled = 0;
  3792. } else if (wolinfo->wolopts & WAKE_MAGIC) {
  3793. np->wolenabled = 1;
  3794. flags = NVREG_WAKEUPFLAGS_ENABLE;
  3795. }
  3796. if (netif_running(dev)) {
  3797. spin_lock_irq(&np->lock);
  3798. writel(flags, base + NvRegWakeUpFlags);
  3799. spin_unlock_irq(&np->lock);
  3800. }
  3801. return 0;
  3802. }
  3803. static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  3804. {
  3805. struct fe_priv *np = netdev_priv(dev);
  3806. int adv;
  3807. spin_lock_irq(&np->lock);
  3808. ecmd->port = PORT_MII;
  3809. if (!netif_running(dev)) {
  3810. /* We do not track link speed / duplex setting if the
  3811. * interface is disabled. Force a link check */
  3812. if (nv_update_linkspeed(dev)) {
  3813. if (!netif_carrier_ok(dev))
  3814. netif_carrier_on(dev);
  3815. } else {
  3816. if (netif_carrier_ok(dev))
  3817. netif_carrier_off(dev);
  3818. }
  3819. }
  3820. if (netif_carrier_ok(dev)) {
  3821. switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
  3822. case NVREG_LINKSPEED_10:
  3823. ecmd->speed = SPEED_10;
  3824. break;
  3825. case NVREG_LINKSPEED_100:
  3826. ecmd->speed = SPEED_100;
  3827. break;
  3828. case NVREG_LINKSPEED_1000:
  3829. ecmd->speed = SPEED_1000;
  3830. break;
  3831. }
  3832. ecmd->duplex = DUPLEX_HALF;
  3833. if (np->duplex)
  3834. ecmd->duplex = DUPLEX_FULL;
  3835. } else {
  3836. ecmd->speed = -1;
  3837. ecmd->duplex = -1;
  3838. }
  3839. ecmd->autoneg = np->autoneg;
  3840. ecmd->advertising = ADVERTISED_MII;
  3841. if (np->autoneg) {
  3842. ecmd->advertising |= ADVERTISED_Autoneg;
  3843. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  3844. if (adv & ADVERTISE_10HALF)
  3845. ecmd->advertising |= ADVERTISED_10baseT_Half;
  3846. if (adv & ADVERTISE_10FULL)
  3847. ecmd->advertising |= ADVERTISED_10baseT_Full;
  3848. if (adv & ADVERTISE_100HALF)
  3849. ecmd->advertising |= ADVERTISED_100baseT_Half;
  3850. if (adv & ADVERTISE_100FULL)
  3851. ecmd->advertising |= ADVERTISED_100baseT_Full;
  3852. if (np->gigabit == PHY_GIGABIT) {
  3853. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  3854. if (adv & ADVERTISE_1000FULL)
  3855. ecmd->advertising |= ADVERTISED_1000baseT_Full;
  3856. }
  3857. }
  3858. ecmd->supported = (SUPPORTED_Autoneg |
  3859. SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
  3860. SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
  3861. SUPPORTED_MII);
  3862. if (np->gigabit == PHY_GIGABIT)
  3863. ecmd->supported |= SUPPORTED_1000baseT_Full;
  3864. ecmd->phy_address = np->phyaddr;
  3865. ecmd->transceiver = XCVR_EXTERNAL;
  3866. /* ignore maxtxpkt, maxrxpkt for now */
  3867. spin_unlock_irq(&np->lock);
  3868. return 0;
  3869. }
  3870. static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  3871. {
  3872. struct fe_priv *np = netdev_priv(dev);
  3873. if (ecmd->port != PORT_MII)
  3874. return -EINVAL;
  3875. if (ecmd->transceiver != XCVR_EXTERNAL)
  3876. return -EINVAL;
  3877. if (ecmd->phy_address != np->phyaddr) {
  3878. /* TODO: support switching between multiple phys. Should be
  3879. * trivial, but not enabled due to lack of test hardware. */
  3880. return -EINVAL;
  3881. }
  3882. if (ecmd->autoneg == AUTONEG_ENABLE) {
  3883. u32 mask;
  3884. mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
  3885. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
  3886. if (np->gigabit == PHY_GIGABIT)
  3887. mask |= ADVERTISED_1000baseT_Full;
  3888. if ((ecmd->advertising & mask) == 0)
  3889. return -EINVAL;
  3890. } else if (ecmd->autoneg == AUTONEG_DISABLE) {
  3891. /* Note: autonegotiation disable, speed 1000 intentionally
  3892. * forbidden - noone should need that. */
  3893. if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
  3894. return -EINVAL;
  3895. if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
  3896. return -EINVAL;
  3897. } else {
  3898. return -EINVAL;
  3899. }
  3900. netif_carrier_off(dev);
  3901. if (netif_running(dev)) {
  3902. unsigned long flags;
  3903. nv_disable_irq(dev);
  3904. netif_tx_lock_bh(dev);
  3905. netif_addr_lock(dev);
  3906. /* with plain spinlock lockdep complains */
  3907. spin_lock_irqsave(&np->lock, flags);
  3908. /* stop engines */
  3909. /* FIXME:
  3910. * this can take some time, and interrupts are disabled
  3911. * due to spin_lock_irqsave, but let's hope no daemon
  3912. * is going to change the settings very often...
  3913. * Worst case:
  3914. * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
  3915. * + some minor delays, which is up to a second approximately
  3916. */
  3917. nv_stop_rxtx(dev);
  3918. spin_unlock_irqrestore(&np->lock, flags);
  3919. netif_addr_unlock(dev);
  3920. netif_tx_unlock_bh(dev);
  3921. }
  3922. if (ecmd->autoneg == AUTONEG_ENABLE) {
  3923. int adv, bmcr;
  3924. np->autoneg = 1;
  3925. /* advertise only what has been requested */
  3926. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  3927. adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  3928. if (ecmd->advertising & ADVERTISED_10baseT_Half)
  3929. adv |= ADVERTISE_10HALF;
  3930. if (ecmd->advertising & ADVERTISED_10baseT_Full)
  3931. adv |= ADVERTISE_10FULL;
  3932. if (ecmd->advertising & ADVERTISED_100baseT_Half)
  3933. adv |= ADVERTISE_100HALF;
  3934. if (ecmd->advertising & ADVERTISED_100baseT_Full)
  3935. adv |= ADVERTISE_100FULL;
  3936. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
  3937. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  3938. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  3939. adv |= ADVERTISE_PAUSE_ASYM;
  3940. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  3941. if (np->gigabit == PHY_GIGABIT) {
  3942. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  3943. adv &= ~ADVERTISE_1000FULL;
  3944. if (ecmd->advertising & ADVERTISED_1000baseT_Full)
  3945. adv |= ADVERTISE_1000FULL;
  3946. mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
  3947. }
  3948. if (netif_running(dev))
  3949. printk(KERN_INFO "%s: link down.\n", dev->name);
  3950. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  3951. if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
  3952. bmcr |= BMCR_ANENABLE;
  3953. /* reset the phy in order for settings to stick,
  3954. * and cause autoneg to start */
  3955. if (phy_reset(dev, bmcr)) {
  3956. printk(KERN_INFO "%s: phy reset failed\n", dev->name);
  3957. return -EINVAL;
  3958. }
  3959. } else {
  3960. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  3961. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  3962. }
  3963. } else {
  3964. int adv, bmcr;
  3965. np->autoneg = 0;
  3966. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  3967. adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  3968. if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
  3969. adv |= ADVERTISE_10HALF;
  3970. if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
  3971. adv |= ADVERTISE_10FULL;
  3972. if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
  3973. adv |= ADVERTISE_100HALF;
  3974. if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
  3975. adv |= ADVERTISE_100FULL;
  3976. np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
  3977. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
  3978. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  3979. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  3980. }
  3981. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
  3982. adv |= ADVERTISE_PAUSE_ASYM;
  3983. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  3984. }
  3985. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  3986. np->fixed_mode = adv;
  3987. if (np->gigabit == PHY_GIGABIT) {
  3988. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  3989. adv &= ~ADVERTISE_1000FULL;
  3990. mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
  3991. }
  3992. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  3993. bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
  3994. if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
  3995. bmcr |= BMCR_FULLDPLX;
  3996. if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
  3997. bmcr |= BMCR_SPEED100;
  3998. if (np->phy_oui == PHY_OUI_MARVELL) {
  3999. /* reset the phy in order for forced mode settings to stick */
  4000. if (phy_reset(dev, bmcr)) {
  4001. printk(KERN_INFO "%s: phy reset failed\n", dev->name);
  4002. return -EINVAL;
  4003. }
  4004. } else {
  4005. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  4006. if (netif_running(dev)) {
  4007. /* Wait a bit and then reconfigure the nic. */
  4008. udelay(10);
  4009. nv_linkchange(dev);
  4010. }
  4011. }
  4012. }
  4013. if (netif_running(dev)) {
  4014. nv_start_rxtx(dev);
  4015. nv_enable_irq(dev);
  4016. }
  4017. return 0;
  4018. }
  4019. #define FORCEDETH_REGS_VER 1
  4020. static int nv_get_regs_len(struct net_device *dev)
  4021. {
  4022. struct fe_priv *np = netdev_priv(dev);
  4023. return np->register_size;
  4024. }
  4025. static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
  4026. {
  4027. struct fe_priv *np = netdev_priv(dev);
  4028. u8 __iomem *base = get_hwbase(dev);
  4029. u32 *rbuf = buf;
  4030. int i;
  4031. regs->version = FORCEDETH_REGS_VER;
  4032. spin_lock_irq(&np->lock);
  4033. for (i = 0;i <= np->register_size/sizeof(u32); i++)
  4034. rbuf[i] = readl(base + i*sizeof(u32));
  4035. spin_unlock_irq(&np->lock);
  4036. }
  4037. static int nv_nway_reset(struct net_device *dev)
  4038. {
  4039. struct fe_priv *np = netdev_priv(dev);
  4040. int ret;
  4041. if (np->autoneg) {
  4042. int bmcr;
  4043. netif_carrier_off(dev);
  4044. if (netif_running(dev)) {
  4045. nv_disable_irq(dev);
  4046. netif_tx_lock_bh(dev);
  4047. netif_addr_lock(dev);
  4048. spin_lock(&np->lock);
  4049. /* stop engines */
  4050. nv_stop_rxtx(dev);
  4051. spin_unlock(&np->lock);
  4052. netif_addr_unlock(dev);
  4053. netif_tx_unlock_bh(dev);
  4054. printk(KERN_INFO "%s: link down.\n", dev->name);
  4055. }
  4056. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  4057. if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
  4058. bmcr |= BMCR_ANENABLE;
  4059. /* reset the phy in order for settings to stick*/
  4060. if (phy_reset(dev, bmcr)) {
  4061. printk(KERN_INFO "%s: phy reset failed\n", dev->name);
  4062. return -EINVAL;
  4063. }
  4064. } else {
  4065. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  4066. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  4067. }
  4068. if (netif_running(dev)) {
  4069. nv_start_rxtx(dev);
  4070. nv_enable_irq(dev);
  4071. }
  4072. ret = 0;
  4073. } else {
  4074. ret = -EINVAL;
  4075. }
  4076. return ret;
  4077. }
  4078. static int nv_set_tso(struct net_device *dev, u32 value)
  4079. {
  4080. struct fe_priv *np = netdev_priv(dev);
  4081. if ((np->driver_data & DEV_HAS_CHECKSUM))
  4082. return ethtool_op_set_tso(dev, value);
  4083. else
  4084. return -EOPNOTSUPP;
  4085. }
  4086. static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  4087. {
  4088. struct fe_priv *np = netdev_priv(dev);
  4089. ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
  4090. ring->rx_mini_max_pending = 0;
  4091. ring->rx_jumbo_max_pending = 0;
  4092. ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
  4093. ring->rx_pending = np->rx_ring_size;
  4094. ring->rx_mini_pending = 0;
  4095. ring->rx_jumbo_pending = 0;
  4096. ring->tx_pending = np->tx_ring_size;
  4097. }
  4098. static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  4099. {
  4100. struct fe_priv *np = netdev_priv(dev);
  4101. u8 __iomem *base = get_hwbase(dev);
  4102. u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
  4103. dma_addr_t ring_addr;
  4104. if (ring->rx_pending < RX_RING_MIN ||
  4105. ring->tx_pending < TX_RING_MIN ||
  4106. ring->rx_mini_pending != 0 ||
  4107. ring->rx_jumbo_pending != 0 ||
  4108. (np->desc_ver == DESC_VER_1 &&
  4109. (ring->rx_pending > RING_MAX_DESC_VER_1 ||
  4110. ring->tx_pending > RING_MAX_DESC_VER_1)) ||
  4111. (np->desc_ver != DESC_VER_1 &&
  4112. (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
  4113. ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
  4114. return -EINVAL;
  4115. }
  4116. /* allocate new rings */
  4117. if (!nv_optimized(np)) {
  4118. rxtx_ring = pci_alloc_consistent(np->pci_dev,
  4119. sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
  4120. &ring_addr);
  4121. } else {
  4122. rxtx_ring = pci_alloc_consistent(np->pci_dev,
  4123. sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
  4124. &ring_addr);
  4125. }
  4126. rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
  4127. tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
  4128. if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
  4129. /* fall back to old rings */
  4130. if (!nv_optimized(np)) {
  4131. if (rxtx_ring)
  4132. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
  4133. rxtx_ring, ring_addr);
  4134. } else {
  4135. if (rxtx_ring)
  4136. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
  4137. rxtx_ring, ring_addr);
  4138. }
  4139. if (rx_skbuff)
  4140. kfree(rx_skbuff);
  4141. if (tx_skbuff)
  4142. kfree(tx_skbuff);
  4143. goto exit;
  4144. }
  4145. if (netif_running(dev)) {
  4146. nv_disable_irq(dev);
  4147. nv_napi_disable(dev);
  4148. netif_tx_lock_bh(dev);
  4149. netif_addr_lock(dev);
  4150. spin_lock(&np->lock);
  4151. /* stop engines */
  4152. nv_stop_rxtx(dev);
  4153. nv_txrx_reset(dev);
  4154. /* drain queues */
  4155. nv_drain_rxtx(dev);
  4156. /* delete queues */
  4157. free_rings(dev);
  4158. }
  4159. /* set new values */
  4160. np->rx_ring_size = ring->rx_pending;
  4161. np->tx_ring_size = ring->tx_pending;
  4162. if (!nv_optimized(np)) {
  4163. np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
  4164. np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
  4165. } else {
  4166. np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
  4167. np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
  4168. }
  4169. np->rx_skb = (struct nv_skb_map*)rx_skbuff;
  4170. np->tx_skb = (struct nv_skb_map*)tx_skbuff;
  4171. np->ring_addr = ring_addr;
  4172. memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
  4173. memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
  4174. if (netif_running(dev)) {
  4175. /* reinit driver view of the queues */
  4176. set_bufsize(dev);
  4177. if (nv_init_ring(dev)) {
  4178. if (!np->in_shutdown)
  4179. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  4180. }
  4181. /* reinit nic view of the queues */
  4182. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  4183. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  4184. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  4185. base + NvRegRingSizes);
  4186. pci_push(base);
  4187. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  4188. pci_push(base);
  4189. /* restart engines */
  4190. nv_start_rxtx(dev);
  4191. spin_unlock(&np->lock);
  4192. netif_addr_unlock(dev);
  4193. netif_tx_unlock_bh(dev);
  4194. nv_napi_enable(dev);
  4195. nv_enable_irq(dev);
  4196. }
  4197. return 0;
  4198. exit:
  4199. return -ENOMEM;
  4200. }
  4201. static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
  4202. {
  4203. struct fe_priv *np = netdev_priv(dev);
  4204. pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
  4205. pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
  4206. pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
  4207. }
  4208. static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
  4209. {
  4210. struct fe_priv *np = netdev_priv(dev);
  4211. int adv, bmcr;
  4212. if ((!np->autoneg && np->duplex == 0) ||
  4213. (np->autoneg && !pause->autoneg && np->duplex == 0)) {
  4214. printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
  4215. dev->name);
  4216. return -EINVAL;
  4217. }
  4218. if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
  4219. printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
  4220. return -EINVAL;
  4221. }
  4222. netif_carrier_off(dev);
  4223. if (netif_running(dev)) {
  4224. nv_disable_irq(dev);
  4225. netif_tx_lock_bh(dev);
  4226. netif_addr_lock(dev);
  4227. spin_lock(&np->lock);
  4228. /* stop engines */
  4229. nv_stop_rxtx(dev);
  4230. spin_unlock(&np->lock);
  4231. netif_addr_unlock(dev);
  4232. netif_tx_unlock_bh(dev);
  4233. }
  4234. np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
  4235. if (pause->rx_pause)
  4236. np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
  4237. if (pause->tx_pause)
  4238. np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
  4239. if (np->autoneg && pause->autoneg) {
  4240. np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
  4241. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  4242. adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  4243. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
  4244. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  4245. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  4246. adv |= ADVERTISE_PAUSE_ASYM;
  4247. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  4248. if (netif_running(dev))
  4249. printk(KERN_INFO "%s: link down.\n", dev->name);
  4250. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  4251. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  4252. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  4253. } else {
  4254. np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
  4255. if (pause->rx_pause)
  4256. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  4257. if (pause->tx_pause)
  4258. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  4259. if (!netif_running(dev))
  4260. nv_update_linkspeed(dev);
  4261. else
  4262. nv_update_pause(dev, np->pause_flags);
  4263. }
  4264. if (netif_running(dev)) {
  4265. nv_start_rxtx(dev);
  4266. nv_enable_irq(dev);
  4267. }
  4268. return 0;
  4269. }
  4270. static u32 nv_get_rx_csum(struct net_device *dev)
  4271. {
  4272. struct fe_priv *np = netdev_priv(dev);
  4273. return (np->rx_csum) != 0;
  4274. }
  4275. static int nv_set_rx_csum(struct net_device *dev, u32 data)
  4276. {
  4277. struct fe_priv *np = netdev_priv(dev);
  4278. u8 __iomem *base = get_hwbase(dev);
  4279. int retcode = 0;
  4280. if (np->driver_data & DEV_HAS_CHECKSUM) {
  4281. if (data) {
  4282. np->rx_csum = 1;
  4283. np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  4284. } else {
  4285. np->rx_csum = 0;
  4286. /* vlan is dependent on rx checksum offload */
  4287. if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
  4288. np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
  4289. }
  4290. if (netif_running(dev)) {
  4291. spin_lock_irq(&np->lock);
  4292. writel(np->txrxctl_bits, base + NvRegTxRxControl);
  4293. spin_unlock_irq(&np->lock);
  4294. }
  4295. } else {
  4296. return -EINVAL;
  4297. }
  4298. return retcode;
  4299. }
  4300. static int nv_set_tx_csum(struct net_device *dev, u32 data)
  4301. {
  4302. struct fe_priv *np = netdev_priv(dev);
  4303. if (np->driver_data & DEV_HAS_CHECKSUM)
  4304. return ethtool_op_set_tx_csum(dev, data);
  4305. else
  4306. return -EOPNOTSUPP;
  4307. }
  4308. static int nv_set_sg(struct net_device *dev, u32 data)
  4309. {
  4310. struct fe_priv *np = netdev_priv(dev);
  4311. if (np->driver_data & DEV_HAS_CHECKSUM)
  4312. return ethtool_op_set_sg(dev, data);
  4313. else
  4314. return -EOPNOTSUPP;
  4315. }
  4316. static int nv_get_sset_count(struct net_device *dev, int sset)
  4317. {
  4318. struct fe_priv *np = netdev_priv(dev);
  4319. switch (sset) {
  4320. case ETH_SS_TEST:
  4321. if (np->driver_data & DEV_HAS_TEST_EXTENDED)
  4322. return NV_TEST_COUNT_EXTENDED;
  4323. else
  4324. return NV_TEST_COUNT_BASE;
  4325. case ETH_SS_STATS:
  4326. if (np->driver_data & DEV_HAS_STATISTICS_V3)
  4327. return NV_DEV_STATISTICS_V3_COUNT;
  4328. else if (np->driver_data & DEV_HAS_STATISTICS_V2)
  4329. return NV_DEV_STATISTICS_V2_COUNT;
  4330. else if (np->driver_data & DEV_HAS_STATISTICS_V1)
  4331. return NV_DEV_STATISTICS_V1_COUNT;
  4332. else
  4333. return 0;
  4334. default:
  4335. return -EOPNOTSUPP;
  4336. }
  4337. }
  4338. static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
  4339. {
  4340. struct fe_priv *np = netdev_priv(dev);
  4341. /* update stats */
  4342. nv_do_stats_poll((unsigned long)dev);
  4343. memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
  4344. }
  4345. static int nv_link_test(struct net_device *dev)
  4346. {
  4347. struct fe_priv *np = netdev_priv(dev);
  4348. int mii_status;
  4349. mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  4350. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  4351. /* check phy link status */
  4352. if (!(mii_status & BMSR_LSTATUS))
  4353. return 0;
  4354. else
  4355. return 1;
  4356. }
  4357. static int nv_register_test(struct net_device *dev)
  4358. {
  4359. u8 __iomem *base = get_hwbase(dev);
  4360. int i = 0;
  4361. u32 orig_read, new_read;
  4362. do {
  4363. orig_read = readl(base + nv_registers_test[i].reg);
  4364. /* xor with mask to toggle bits */
  4365. orig_read ^= nv_registers_test[i].mask;
  4366. writel(orig_read, base + nv_registers_test[i].reg);
  4367. new_read = readl(base + nv_registers_test[i].reg);
  4368. if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
  4369. return 0;
  4370. /* restore original value */
  4371. orig_read ^= nv_registers_test[i].mask;
  4372. writel(orig_read, base + nv_registers_test[i].reg);
  4373. } while (nv_registers_test[++i].reg != 0);
  4374. return 1;
  4375. }
  4376. static int nv_interrupt_test(struct net_device *dev)
  4377. {
  4378. struct fe_priv *np = netdev_priv(dev);
  4379. u8 __iomem *base = get_hwbase(dev);
  4380. int ret = 1;
  4381. int testcnt;
  4382. u32 save_msi_flags, save_poll_interval = 0;
  4383. if (netif_running(dev)) {
  4384. /* free current irq */
  4385. nv_free_irq(dev);
  4386. save_poll_interval = readl(base+NvRegPollingInterval);
  4387. }
  4388. /* flag to test interrupt handler */
  4389. np->intr_test = 0;
  4390. /* setup test irq */
  4391. save_msi_flags = np->msi_flags;
  4392. np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
  4393. np->msi_flags |= 0x001; /* setup 1 vector */
  4394. if (nv_request_irq(dev, 1))
  4395. return 0;
  4396. /* setup timer interrupt */
  4397. writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
  4398. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  4399. nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
  4400. /* wait for at least one interrupt */
  4401. msleep(100);
  4402. spin_lock_irq(&np->lock);
  4403. /* flag should be set within ISR */
  4404. testcnt = np->intr_test;
  4405. if (!testcnt)
  4406. ret = 2;
  4407. nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
  4408. if (!(np->msi_flags & NV_MSI_X_ENABLED))
  4409. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  4410. else
  4411. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  4412. spin_unlock_irq(&np->lock);
  4413. nv_free_irq(dev);
  4414. np->msi_flags = save_msi_flags;
  4415. if (netif_running(dev)) {
  4416. writel(save_poll_interval, base + NvRegPollingInterval);
  4417. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  4418. /* restore original irq */
  4419. if (nv_request_irq(dev, 0))
  4420. return 0;
  4421. }
  4422. return ret;
  4423. }
  4424. static int nv_loopback_test(struct net_device *dev)
  4425. {
  4426. struct fe_priv *np = netdev_priv(dev);
  4427. u8 __iomem *base = get_hwbase(dev);
  4428. struct sk_buff *tx_skb, *rx_skb;
  4429. dma_addr_t test_dma_addr;
  4430. u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
  4431. u32 flags;
  4432. int len, i, pkt_len;
  4433. u8 *pkt_data;
  4434. u32 filter_flags = 0;
  4435. u32 misc1_flags = 0;
  4436. int ret = 1;
  4437. if (netif_running(dev)) {
  4438. nv_disable_irq(dev);
  4439. filter_flags = readl(base + NvRegPacketFilterFlags);
  4440. misc1_flags = readl(base + NvRegMisc1);
  4441. } else {
  4442. nv_txrx_reset(dev);
  4443. }
  4444. /* reinit driver view of the rx queue */
  4445. set_bufsize(dev);
  4446. nv_init_ring(dev);
  4447. /* setup hardware for loopback */
  4448. writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
  4449. writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
  4450. /* reinit nic view of the rx queue */
  4451. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  4452. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  4453. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  4454. base + NvRegRingSizes);
  4455. pci_push(base);
  4456. /* restart rx engine */
  4457. nv_start_rxtx(dev);
  4458. /* setup packet for tx */
  4459. pkt_len = ETH_DATA_LEN;
  4460. tx_skb = dev_alloc_skb(pkt_len);
  4461. if (!tx_skb) {
  4462. printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
  4463. " of %s\n", dev->name);
  4464. ret = 0;
  4465. goto out;
  4466. }
  4467. test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
  4468. skb_tailroom(tx_skb),
  4469. PCI_DMA_FROMDEVICE);
  4470. pkt_data = skb_put(tx_skb, pkt_len);
  4471. for (i = 0; i < pkt_len; i++)
  4472. pkt_data[i] = (u8)(i & 0xff);
  4473. if (!nv_optimized(np)) {
  4474. np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
  4475. np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
  4476. } else {
  4477. np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
  4478. np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
  4479. np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
  4480. }
  4481. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  4482. pci_push(get_hwbase(dev));
  4483. msleep(500);
  4484. /* check for rx of the packet */
  4485. if (!nv_optimized(np)) {
  4486. flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
  4487. len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
  4488. } else {
  4489. flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
  4490. len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
  4491. }
  4492. if (flags & NV_RX_AVAIL) {
  4493. ret = 0;
  4494. } else if (np->desc_ver == DESC_VER_1) {
  4495. if (flags & NV_RX_ERROR)
  4496. ret = 0;
  4497. } else {
  4498. if (flags & NV_RX2_ERROR) {
  4499. ret = 0;
  4500. }
  4501. }
  4502. if (ret) {
  4503. if (len != pkt_len) {
  4504. ret = 0;
  4505. dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
  4506. dev->name, len, pkt_len);
  4507. } else {
  4508. rx_skb = np->rx_skb[0].skb;
  4509. for (i = 0; i < pkt_len; i++) {
  4510. if (rx_skb->data[i] != (u8)(i & 0xff)) {
  4511. ret = 0;
  4512. dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
  4513. dev->name, i);
  4514. break;
  4515. }
  4516. }
  4517. }
  4518. } else {
  4519. dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
  4520. }
  4521. pci_unmap_page(np->pci_dev, test_dma_addr,
  4522. (skb_end_pointer(tx_skb) - tx_skb->data),
  4523. PCI_DMA_TODEVICE);
  4524. dev_kfree_skb_any(tx_skb);
  4525. out:
  4526. /* stop engines */
  4527. nv_stop_rxtx(dev);
  4528. nv_txrx_reset(dev);
  4529. /* drain rx queue */
  4530. nv_drain_rxtx(dev);
  4531. if (netif_running(dev)) {
  4532. writel(misc1_flags, base + NvRegMisc1);
  4533. writel(filter_flags, base + NvRegPacketFilterFlags);
  4534. nv_enable_irq(dev);
  4535. }
  4536. return ret;
  4537. }
  4538. static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
  4539. {
  4540. struct fe_priv *np = netdev_priv(dev);
  4541. u8 __iomem *base = get_hwbase(dev);
  4542. int result;
  4543. memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
  4544. if (!nv_link_test(dev)) {
  4545. test->flags |= ETH_TEST_FL_FAILED;
  4546. buffer[0] = 1;
  4547. }
  4548. if (test->flags & ETH_TEST_FL_OFFLINE) {
  4549. if (netif_running(dev)) {
  4550. netif_stop_queue(dev);
  4551. nv_napi_disable(dev);
  4552. netif_tx_lock_bh(dev);
  4553. netif_addr_lock(dev);
  4554. spin_lock_irq(&np->lock);
  4555. nv_disable_hw_interrupts(dev, np->irqmask);
  4556. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  4557. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  4558. } else {
  4559. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  4560. }
  4561. /* stop engines */
  4562. nv_stop_rxtx(dev);
  4563. nv_txrx_reset(dev);
  4564. /* drain rx queue */
  4565. nv_drain_rxtx(dev);
  4566. spin_unlock_irq(&np->lock);
  4567. netif_addr_unlock(dev);
  4568. netif_tx_unlock_bh(dev);
  4569. }
  4570. if (!nv_register_test(dev)) {
  4571. test->flags |= ETH_TEST_FL_FAILED;
  4572. buffer[1] = 1;
  4573. }
  4574. result = nv_interrupt_test(dev);
  4575. if (result != 1) {
  4576. test->flags |= ETH_TEST_FL_FAILED;
  4577. buffer[2] = 1;
  4578. }
  4579. if (result == 0) {
  4580. /* bail out */
  4581. return;
  4582. }
  4583. if (!nv_loopback_test(dev)) {
  4584. test->flags |= ETH_TEST_FL_FAILED;
  4585. buffer[3] = 1;
  4586. }
  4587. if (netif_running(dev)) {
  4588. /* reinit driver view of the rx queue */
  4589. set_bufsize(dev);
  4590. if (nv_init_ring(dev)) {
  4591. if (!np->in_shutdown)
  4592. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  4593. }
  4594. /* reinit nic view of the rx queue */
  4595. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  4596. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  4597. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  4598. base + NvRegRingSizes);
  4599. pci_push(base);
  4600. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  4601. pci_push(base);
  4602. /* restart rx engine */
  4603. nv_start_rxtx(dev);
  4604. netif_start_queue(dev);
  4605. nv_napi_enable(dev);
  4606. nv_enable_hw_interrupts(dev, np->irqmask);
  4607. }
  4608. }
  4609. }
  4610. static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
  4611. {
  4612. switch (stringset) {
  4613. case ETH_SS_STATS:
  4614. memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
  4615. break;
  4616. case ETH_SS_TEST:
  4617. memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
  4618. break;
  4619. }
  4620. }
  4621. static const struct ethtool_ops ops = {
  4622. .get_drvinfo = nv_get_drvinfo,
  4623. .get_link = ethtool_op_get_link,
  4624. .get_wol = nv_get_wol,
  4625. .set_wol = nv_set_wol,
  4626. .get_settings = nv_get_settings,
  4627. .set_settings = nv_set_settings,
  4628. .get_regs_len = nv_get_regs_len,
  4629. .get_regs = nv_get_regs,
  4630. .nway_reset = nv_nway_reset,
  4631. .set_tso = nv_set_tso,
  4632. .get_ringparam = nv_get_ringparam,
  4633. .set_ringparam = nv_set_ringparam,
  4634. .get_pauseparam = nv_get_pauseparam,
  4635. .set_pauseparam = nv_set_pauseparam,
  4636. .get_rx_csum = nv_get_rx_csum,
  4637. .set_rx_csum = nv_set_rx_csum,
  4638. .set_tx_csum = nv_set_tx_csum,
  4639. .set_sg = nv_set_sg,
  4640. .get_strings = nv_get_strings,
  4641. .get_ethtool_stats = nv_get_ethtool_stats,
  4642. .get_sset_count = nv_get_sset_count,
  4643. .self_test = nv_self_test,
  4644. };
  4645. static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
  4646. {
  4647. struct fe_priv *np = get_nvpriv(dev);
  4648. spin_lock_irq(&np->lock);
  4649. /* save vlan group */
  4650. np->vlangrp = grp;
  4651. if (grp) {
  4652. /* enable vlan on MAC */
  4653. np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
  4654. } else {
  4655. /* disable vlan on MAC */
  4656. np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
  4657. np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
  4658. }
  4659. writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  4660. spin_unlock_irq(&np->lock);
  4661. }
  4662. /* The mgmt unit and driver use a semaphore to access the phy during init */
  4663. static int nv_mgmt_acquire_sema(struct net_device *dev)
  4664. {
  4665. struct fe_priv *np = netdev_priv(dev);
  4666. u8 __iomem *base = get_hwbase(dev);
  4667. int i;
  4668. u32 tx_ctrl, mgmt_sema;
  4669. for (i = 0; i < 10; i++) {
  4670. mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
  4671. if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
  4672. break;
  4673. msleep(500);
  4674. }
  4675. if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
  4676. return 0;
  4677. for (i = 0; i < 2; i++) {
  4678. tx_ctrl = readl(base + NvRegTransmitterControl);
  4679. tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
  4680. writel(tx_ctrl, base + NvRegTransmitterControl);
  4681. /* verify that semaphore was acquired */
  4682. tx_ctrl = readl(base + NvRegTransmitterControl);
  4683. if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
  4684. ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
  4685. np->mgmt_sema = 1;
  4686. return 1;
  4687. }
  4688. else
  4689. udelay(50);
  4690. }
  4691. return 0;
  4692. }
  4693. static void nv_mgmt_release_sema(struct net_device *dev)
  4694. {
  4695. struct fe_priv *np = netdev_priv(dev);
  4696. u8 __iomem *base = get_hwbase(dev);
  4697. u32 tx_ctrl;
  4698. if (np->driver_data & DEV_HAS_MGMT_UNIT) {
  4699. if (np->mgmt_sema) {
  4700. tx_ctrl = readl(base + NvRegTransmitterControl);
  4701. tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
  4702. writel(tx_ctrl, base + NvRegTransmitterControl);
  4703. }
  4704. }
  4705. }
  4706. static int nv_mgmt_get_version(struct net_device *dev)
  4707. {
  4708. struct fe_priv *np = netdev_priv(dev);
  4709. u8 __iomem *base = get_hwbase(dev);
  4710. u32 data_ready = readl(base + NvRegTransmitterControl);
  4711. u32 data_ready2 = 0;
  4712. unsigned long start;
  4713. int ready = 0;
  4714. writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
  4715. writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
  4716. start = jiffies;
  4717. while (time_before(jiffies, start + 5*HZ)) {
  4718. data_ready2 = readl(base + NvRegTransmitterControl);
  4719. if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
  4720. ready = 1;
  4721. break;
  4722. }
  4723. schedule_timeout_uninterruptible(1);
  4724. }
  4725. if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
  4726. return 0;
  4727. np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
  4728. return 1;
  4729. }
  4730. static int nv_open(struct net_device *dev)
  4731. {
  4732. struct fe_priv *np = netdev_priv(dev);
  4733. u8 __iomem *base = get_hwbase(dev);
  4734. int ret = 1;
  4735. int oom, i;
  4736. u32 low;
  4737. dprintk(KERN_DEBUG "nv_open: begin\n");
  4738. /* power up phy */
  4739. mii_rw(dev, np->phyaddr, MII_BMCR,
  4740. mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
  4741. nv_txrx_gate(dev, false);
  4742. /* erase previous misconfiguration */
  4743. if (np->driver_data & DEV_HAS_POWER_CNTRL)
  4744. nv_mac_reset(dev);
  4745. writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
  4746. writel(0, base + NvRegMulticastAddrB);
  4747. writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
  4748. writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
  4749. writel(0, base + NvRegPacketFilterFlags);
  4750. writel(0, base + NvRegTransmitterControl);
  4751. writel(0, base + NvRegReceiverControl);
  4752. writel(0, base + NvRegAdapterControl);
  4753. if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
  4754. writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
  4755. /* initialize descriptor rings */
  4756. set_bufsize(dev);
  4757. oom = nv_init_ring(dev);
  4758. writel(0, base + NvRegLinkSpeed);
  4759. writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
  4760. nv_txrx_reset(dev);
  4761. writel(0, base + NvRegUnknownSetupReg6);
  4762. np->in_shutdown = 0;
  4763. /* give hw rings */
  4764. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  4765. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  4766. base + NvRegRingSizes);
  4767. writel(np->linkspeed, base + NvRegLinkSpeed);
  4768. if (np->desc_ver == DESC_VER_1)
  4769. writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
  4770. else
  4771. writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
  4772. writel(np->txrxctl_bits, base + NvRegTxRxControl);
  4773. writel(np->vlanctl_bits, base + NvRegVlanControl);
  4774. pci_push(base);
  4775. writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
  4776. reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
  4777. NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
  4778. KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
  4779. writel(0, base + NvRegMIIMask);
  4780. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  4781. writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  4782. writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
  4783. writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
  4784. writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
  4785. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  4786. writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
  4787. get_random_bytes(&low, sizeof(low));
  4788. low &= NVREG_SLOTTIME_MASK;
  4789. if (np->desc_ver == DESC_VER_1) {
  4790. writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
  4791. } else {
  4792. if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
  4793. /* setup legacy backoff */
  4794. writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
  4795. } else {
  4796. writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
  4797. nv_gear_backoff_reseed(dev);
  4798. }
  4799. }
  4800. writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
  4801. writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
  4802. if (poll_interval == -1) {
  4803. if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
  4804. writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
  4805. else
  4806. writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
  4807. }
  4808. else
  4809. writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
  4810. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  4811. writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
  4812. base + NvRegAdapterControl);
  4813. writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
  4814. writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
  4815. if (np->wolenabled)
  4816. writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
  4817. i = readl(base + NvRegPowerState);
  4818. if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
  4819. writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
  4820. pci_push(base);
  4821. udelay(10);
  4822. writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
  4823. nv_disable_hw_interrupts(dev, np->irqmask);
  4824. pci_push(base);
  4825. writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  4826. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  4827. pci_push(base);
  4828. if (nv_request_irq(dev, 0)) {
  4829. goto out_drain;
  4830. }
  4831. /* ask for interrupts */
  4832. nv_enable_hw_interrupts(dev, np->irqmask);
  4833. spin_lock_irq(&np->lock);
  4834. writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
  4835. writel(0, base + NvRegMulticastAddrB);
  4836. writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
  4837. writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
  4838. writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
  4839. /* One manual link speed update: Interrupts are enabled, future link
  4840. * speed changes cause interrupts and are handled by nv_link_irq().
  4841. */
  4842. {
  4843. u32 miistat;
  4844. miistat = readl(base + NvRegMIIStatus);
  4845. writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  4846. dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
  4847. }
  4848. /* set linkspeed to invalid value, thus force nv_update_linkspeed
  4849. * to init hw */
  4850. np->linkspeed = 0;
  4851. ret = nv_update_linkspeed(dev);
  4852. nv_start_rxtx(dev);
  4853. netif_start_queue(dev);
  4854. nv_napi_enable(dev);
  4855. if (ret) {
  4856. netif_carrier_on(dev);
  4857. } else {
  4858. printk(KERN_INFO "%s: no link during initialization.\n", dev->name);
  4859. netif_carrier_off(dev);
  4860. }
  4861. if (oom)
  4862. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  4863. /* start statistics timer */
  4864. if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
  4865. mod_timer(&np->stats_poll,
  4866. round_jiffies(jiffies + STATS_INTERVAL));
  4867. spin_unlock_irq(&np->lock);
  4868. return 0;
  4869. out_drain:
  4870. nv_drain_rxtx(dev);
  4871. return ret;
  4872. }
  4873. static int nv_close(struct net_device *dev)
  4874. {
  4875. struct fe_priv *np = netdev_priv(dev);
  4876. u8 __iomem *base;
  4877. spin_lock_irq(&np->lock);
  4878. np->in_shutdown = 1;
  4879. spin_unlock_irq(&np->lock);
  4880. nv_napi_disable(dev);
  4881. synchronize_irq(np->pci_dev->irq);
  4882. del_timer_sync(&np->oom_kick);
  4883. del_timer_sync(&np->nic_poll);
  4884. del_timer_sync(&np->stats_poll);
  4885. netif_stop_queue(dev);
  4886. spin_lock_irq(&np->lock);
  4887. nv_stop_rxtx(dev);
  4888. nv_txrx_reset(dev);
  4889. /* disable interrupts on the nic or we will lock up */
  4890. base = get_hwbase(dev);
  4891. nv_disable_hw_interrupts(dev, np->irqmask);
  4892. pci_push(base);
  4893. dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
  4894. spin_unlock_irq(&np->lock);
  4895. nv_free_irq(dev);
  4896. nv_drain_rxtx(dev);
  4897. if (np->wolenabled || !phy_power_down) {
  4898. nv_txrx_gate(dev, false);
  4899. writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
  4900. nv_start_rx(dev);
  4901. } else {
  4902. /* power down phy */
  4903. mii_rw(dev, np->phyaddr, MII_BMCR,
  4904. mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
  4905. nv_txrx_gate(dev, true);
  4906. }
  4907. /* FIXME: power down nic */
  4908. return 0;
  4909. }
  4910. static const struct net_device_ops nv_netdev_ops = {
  4911. .ndo_open = nv_open,
  4912. .ndo_stop = nv_close,
  4913. .ndo_get_stats = nv_get_stats,
  4914. .ndo_start_xmit = nv_start_xmit,
  4915. .ndo_tx_timeout = nv_tx_timeout,
  4916. .ndo_change_mtu = nv_change_mtu,
  4917. .ndo_validate_addr = eth_validate_addr,
  4918. .ndo_set_mac_address = nv_set_mac_address,
  4919. .ndo_set_multicast_list = nv_set_multicast,
  4920. .ndo_vlan_rx_register = nv_vlan_rx_register,
  4921. #ifdef CONFIG_NET_POLL_CONTROLLER
  4922. .ndo_poll_controller = nv_poll_controller,
  4923. #endif
  4924. };
  4925. static const struct net_device_ops nv_netdev_ops_optimized = {
  4926. .ndo_open = nv_open,
  4927. .ndo_stop = nv_close,
  4928. .ndo_get_stats = nv_get_stats,
  4929. .ndo_start_xmit = nv_start_xmit_optimized,
  4930. .ndo_tx_timeout = nv_tx_timeout,
  4931. .ndo_change_mtu = nv_change_mtu,
  4932. .ndo_validate_addr = eth_validate_addr,
  4933. .ndo_set_mac_address = nv_set_mac_address,
  4934. .ndo_set_multicast_list = nv_set_multicast,
  4935. .ndo_vlan_rx_register = nv_vlan_rx_register,
  4936. #ifdef CONFIG_NET_POLL_CONTROLLER
  4937. .ndo_poll_controller = nv_poll_controller,
  4938. #endif
  4939. };
  4940. static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
  4941. {
  4942. struct net_device *dev;
  4943. struct fe_priv *np;
  4944. unsigned long addr;
  4945. u8 __iomem *base;
  4946. int err, i;
  4947. u32 powerstate, txreg;
  4948. u32 phystate_orig = 0, phystate;
  4949. int phyinitialized = 0;
  4950. static int printed_version;
  4951. if (!printed_version++)
  4952. printk(KERN_INFO "%s: Reverse Engineered nForce ethernet"
  4953. " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION);
  4954. dev = alloc_etherdev(sizeof(struct fe_priv));
  4955. err = -ENOMEM;
  4956. if (!dev)
  4957. goto out;
  4958. np = netdev_priv(dev);
  4959. np->dev = dev;
  4960. np->pci_dev = pci_dev;
  4961. spin_lock_init(&np->lock);
  4962. SET_NETDEV_DEV(dev, &pci_dev->dev);
  4963. init_timer(&np->oom_kick);
  4964. np->oom_kick.data = (unsigned long) dev;
  4965. np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
  4966. init_timer(&np->nic_poll);
  4967. np->nic_poll.data = (unsigned long) dev;
  4968. np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
  4969. init_timer(&np->stats_poll);
  4970. np->stats_poll.data = (unsigned long) dev;
  4971. np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
  4972. err = pci_enable_device(pci_dev);
  4973. if (err)
  4974. goto out_free;
  4975. pci_set_master(pci_dev);
  4976. err = pci_request_regions(pci_dev, DRV_NAME);
  4977. if (err < 0)
  4978. goto out_disable;
  4979. if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
  4980. np->register_size = NV_PCI_REGSZ_VER3;
  4981. else if (id->driver_data & DEV_HAS_STATISTICS_V1)
  4982. np->register_size = NV_PCI_REGSZ_VER2;
  4983. else
  4984. np->register_size = NV_PCI_REGSZ_VER1;
  4985. err = -EINVAL;
  4986. addr = 0;
  4987. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  4988. dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
  4989. pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
  4990. pci_resource_len(pci_dev, i),
  4991. pci_resource_flags(pci_dev, i));
  4992. if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
  4993. pci_resource_len(pci_dev, i) >= np->register_size) {
  4994. addr = pci_resource_start(pci_dev, i);
  4995. break;
  4996. }
  4997. }
  4998. if (i == DEVICE_COUNT_RESOURCE) {
  4999. dev_printk(KERN_INFO, &pci_dev->dev,
  5000. "Couldn't find register window\n");
  5001. goto out_relreg;
  5002. }
  5003. /* copy of driver data */
  5004. np->driver_data = id->driver_data;
  5005. /* copy of device id */
  5006. np->device_id = id->device;
  5007. /* handle different descriptor versions */
  5008. if (id->driver_data & DEV_HAS_HIGH_DMA) {
  5009. /* packet format 3: supports 40-bit addressing */
  5010. np->desc_ver = DESC_VER_3;
  5011. np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
  5012. if (dma_64bit) {
  5013. if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
  5014. dev_printk(KERN_INFO, &pci_dev->dev,
  5015. "64-bit DMA failed, using 32-bit addressing\n");
  5016. else
  5017. dev->features |= NETIF_F_HIGHDMA;
  5018. if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
  5019. dev_printk(KERN_INFO, &pci_dev->dev,
  5020. "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
  5021. }
  5022. }
  5023. } else if (id->driver_data & DEV_HAS_LARGEDESC) {
  5024. /* packet format 2: supports jumbo frames */
  5025. np->desc_ver = DESC_VER_2;
  5026. np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
  5027. } else {
  5028. /* original packet format */
  5029. np->desc_ver = DESC_VER_1;
  5030. np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
  5031. }
  5032. np->pkt_limit = NV_PKTLIMIT_1;
  5033. if (id->driver_data & DEV_HAS_LARGEDESC)
  5034. np->pkt_limit = NV_PKTLIMIT_2;
  5035. if (id->driver_data & DEV_HAS_CHECKSUM) {
  5036. np->rx_csum = 1;
  5037. np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  5038. dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
  5039. dev->features |= NETIF_F_TSO;
  5040. }
  5041. np->vlanctl_bits = 0;
  5042. if (id->driver_data & DEV_HAS_VLAN) {
  5043. np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
  5044. dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
  5045. }
  5046. np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
  5047. if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
  5048. (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
  5049. (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
  5050. np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
  5051. }
  5052. err = -ENOMEM;
  5053. np->base = ioremap(addr, np->register_size);
  5054. if (!np->base)
  5055. goto out_relreg;
  5056. dev->base_addr = (unsigned long)np->base;
  5057. dev->irq = pci_dev->irq;
  5058. np->rx_ring_size = RX_RING_DEFAULT;
  5059. np->tx_ring_size = TX_RING_DEFAULT;
  5060. if (!nv_optimized(np)) {
  5061. np->rx_ring.orig = pci_alloc_consistent(pci_dev,
  5062. sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
  5063. &np->ring_addr);
  5064. if (!np->rx_ring.orig)
  5065. goto out_unmap;
  5066. np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
  5067. } else {
  5068. np->rx_ring.ex = pci_alloc_consistent(pci_dev,
  5069. sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
  5070. &np->ring_addr);
  5071. if (!np->rx_ring.ex)
  5072. goto out_unmap;
  5073. np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
  5074. }
  5075. np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
  5076. np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
  5077. if (!np->rx_skb || !np->tx_skb)
  5078. goto out_freering;
  5079. if (!nv_optimized(np))
  5080. dev->netdev_ops = &nv_netdev_ops;
  5081. else
  5082. dev->netdev_ops = &nv_netdev_ops_optimized;
  5083. #ifdef CONFIG_FORCEDETH_NAPI
  5084. netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
  5085. #endif
  5086. SET_ETHTOOL_OPS(dev, &ops);
  5087. dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
  5088. pci_set_drvdata(pci_dev, dev);
  5089. /* read the mac address */
  5090. base = get_hwbase(dev);
  5091. np->orig_mac[0] = readl(base + NvRegMacAddrA);
  5092. np->orig_mac[1] = readl(base + NvRegMacAddrB);
  5093. /* check the workaround bit for correct mac address order */
  5094. txreg = readl(base + NvRegTransmitPoll);
  5095. if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
  5096. /* mac address is already in correct order */
  5097. dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
  5098. dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
  5099. dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
  5100. dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
  5101. dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
  5102. dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
  5103. } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
  5104. /* mac address is already in correct order */
  5105. dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
  5106. dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
  5107. dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
  5108. dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
  5109. dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
  5110. dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
  5111. /*
  5112. * Set orig mac address back to the reversed version.
  5113. * This flag will be cleared during low power transition.
  5114. * Therefore, we should always put back the reversed address.
  5115. */
  5116. np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
  5117. (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
  5118. np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
  5119. } else {
  5120. /* need to reverse mac address to correct order */
  5121. dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
  5122. dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
  5123. dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
  5124. dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
  5125. dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
  5126. dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
  5127. writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
  5128. printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n");
  5129. }
  5130. memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
  5131. if (!is_valid_ether_addr(dev->perm_addr)) {
  5132. /*
  5133. * Bad mac address. At least one bios sets the mac address
  5134. * to 01:23:45:67:89:ab
  5135. */
  5136. dev_printk(KERN_ERR, &pci_dev->dev,
  5137. "Invalid Mac address detected: %pM\n",
  5138. dev->dev_addr);
  5139. dev_printk(KERN_ERR, &pci_dev->dev,
  5140. "Please complain to your hardware vendor. Switching to a random MAC.\n");
  5141. dev->dev_addr[0] = 0x00;
  5142. dev->dev_addr[1] = 0x00;
  5143. dev->dev_addr[2] = 0x6c;
  5144. get_random_bytes(&dev->dev_addr[3], 3);
  5145. }
  5146. dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
  5147. pci_name(pci_dev), dev->dev_addr);
  5148. /* set mac address */
  5149. nv_copy_mac_to_hw(dev);
  5150. /* Workaround current PCI init glitch: wakeup bits aren't
  5151. * being set from PCI PM capability.
  5152. */
  5153. device_init_wakeup(&pci_dev->dev, 1);
  5154. /* disable WOL */
  5155. writel(0, base + NvRegWakeUpFlags);
  5156. np->wolenabled = 0;
  5157. if (id->driver_data & DEV_HAS_POWER_CNTRL) {
  5158. /* take phy and nic out of low power mode */
  5159. powerstate = readl(base + NvRegPowerState2);
  5160. powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
  5161. if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
  5162. pci_dev->revision >= 0xA3)
  5163. powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
  5164. writel(powerstate, base + NvRegPowerState2);
  5165. }
  5166. if (np->desc_ver == DESC_VER_1) {
  5167. np->tx_flags = NV_TX_VALID;
  5168. } else {
  5169. np->tx_flags = NV_TX2_VALID;
  5170. }
  5171. np->msi_flags = 0;
  5172. if ((id->driver_data & DEV_HAS_MSI) && msi) {
  5173. np->msi_flags |= NV_MSI_CAPABLE;
  5174. }
  5175. if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
  5176. /* msix has had reported issues when modifying irqmask
  5177. as in the case of napi, therefore, disable for now
  5178. */
  5179. #ifndef CONFIG_FORCEDETH_NAPI
  5180. np->msi_flags |= NV_MSI_X_CAPABLE;
  5181. #endif
  5182. }
  5183. if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
  5184. np->irqmask = NVREG_IRQMASK_CPU;
  5185. if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
  5186. np->msi_flags |= 0x0001;
  5187. } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
  5188. !(id->driver_data & DEV_NEED_TIMERIRQ)) {
  5189. /* start off in throughput mode */
  5190. np->irqmask = NVREG_IRQMASK_THROUGHPUT;
  5191. /* remove support for msix mode */
  5192. np->msi_flags &= ~NV_MSI_X_CAPABLE;
  5193. } else {
  5194. optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
  5195. np->irqmask = NVREG_IRQMASK_THROUGHPUT;
  5196. if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
  5197. np->msi_flags |= 0x0003;
  5198. }
  5199. if (id->driver_data & DEV_NEED_TIMERIRQ)
  5200. np->irqmask |= NVREG_IRQ_TIMER;
  5201. if (id->driver_data & DEV_NEED_LINKTIMER) {
  5202. dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
  5203. np->need_linktimer = 1;
  5204. np->link_timeout = jiffies + LINK_TIMEOUT;
  5205. } else {
  5206. dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
  5207. np->need_linktimer = 0;
  5208. }
  5209. /* Limit the number of tx's outstanding for hw bug */
  5210. if (id->driver_data & DEV_NEED_TX_LIMIT) {
  5211. np->tx_limit = 1;
  5212. if ((id->driver_data & DEV_NEED_TX_LIMIT2) &&
  5213. pci_dev->revision >= 0xA2)
  5214. np->tx_limit = 0;
  5215. }
  5216. /* clear phy state and temporarily halt phy interrupts */
  5217. writel(0, base + NvRegMIIMask);
  5218. phystate = readl(base + NvRegAdapterControl);
  5219. if (phystate & NVREG_ADAPTCTL_RUNNING) {
  5220. phystate_orig = 1;
  5221. phystate &= ~NVREG_ADAPTCTL_RUNNING;
  5222. writel(phystate, base + NvRegAdapterControl);
  5223. }
  5224. writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  5225. if (id->driver_data & DEV_HAS_MGMT_UNIT) {
  5226. /* management unit running on the mac? */
  5227. if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
  5228. (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
  5229. nv_mgmt_acquire_sema(dev) &&
  5230. nv_mgmt_get_version(dev)) {
  5231. np->mac_in_use = 1;
  5232. if (np->mgmt_version > 0) {
  5233. np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
  5234. }
  5235. dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
  5236. pci_name(pci_dev), np->mac_in_use);
  5237. /* management unit setup the phy already? */
  5238. if (np->mac_in_use &&
  5239. ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
  5240. NVREG_XMITCTL_SYNC_PHY_INIT)) {
  5241. /* phy is inited by mgmt unit */
  5242. phyinitialized = 1;
  5243. dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n",
  5244. pci_name(pci_dev));
  5245. } else {
  5246. /* we need to init the phy */
  5247. }
  5248. }
  5249. }
  5250. /* find a suitable phy */
  5251. for (i = 1; i <= 32; i++) {
  5252. int id1, id2;
  5253. int phyaddr = i & 0x1F;
  5254. spin_lock_irq(&np->lock);
  5255. id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
  5256. spin_unlock_irq(&np->lock);
  5257. if (id1 < 0 || id1 == 0xffff)
  5258. continue;
  5259. spin_lock_irq(&np->lock);
  5260. id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
  5261. spin_unlock_irq(&np->lock);
  5262. if (id2 < 0 || id2 == 0xffff)
  5263. continue;
  5264. np->phy_model = id2 & PHYID2_MODEL_MASK;
  5265. id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
  5266. id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
  5267. dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
  5268. pci_name(pci_dev), id1, id2, phyaddr);
  5269. np->phyaddr = phyaddr;
  5270. np->phy_oui = id1 | id2;
  5271. /* Realtek hardcoded phy id1 to all zero's on certain phys */
  5272. if (np->phy_oui == PHY_OUI_REALTEK2)
  5273. np->phy_oui = PHY_OUI_REALTEK;
  5274. /* Setup phy revision for Realtek */
  5275. if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
  5276. np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
  5277. break;
  5278. }
  5279. if (i == 33) {
  5280. dev_printk(KERN_INFO, &pci_dev->dev,
  5281. "open: Could not find a valid PHY.\n");
  5282. goto out_error;
  5283. }
  5284. if (!phyinitialized) {
  5285. /* reset it */
  5286. phy_init(dev);
  5287. } else {
  5288. /* see if it is a gigabit phy */
  5289. u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  5290. if (mii_status & PHY_GIGABIT) {
  5291. np->gigabit = PHY_GIGABIT;
  5292. }
  5293. }
  5294. /* set default link speed settings */
  5295. np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  5296. np->duplex = 0;
  5297. np->autoneg = 1;
  5298. err = register_netdev(dev);
  5299. if (err) {
  5300. dev_printk(KERN_INFO, &pci_dev->dev,
  5301. "unable to register netdev: %d\n", err);
  5302. goto out_error;
  5303. }
  5304. dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, "
  5305. "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
  5306. dev->name,
  5307. np->phy_oui,
  5308. np->phyaddr,
  5309. dev->dev_addr[0],
  5310. dev->dev_addr[1],
  5311. dev->dev_addr[2],
  5312. dev->dev_addr[3],
  5313. dev->dev_addr[4],
  5314. dev->dev_addr[5]);
  5315. dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
  5316. dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
  5317. dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
  5318. "csum " : "",
  5319. dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
  5320. "vlan " : "",
  5321. id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
  5322. id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
  5323. id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
  5324. np->gigabit == PHY_GIGABIT ? "gbit " : "",
  5325. np->need_linktimer ? "lnktim " : "",
  5326. np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
  5327. np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
  5328. np->desc_ver);
  5329. return 0;
  5330. out_error:
  5331. if (phystate_orig)
  5332. writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
  5333. pci_set_drvdata(pci_dev, NULL);
  5334. out_freering:
  5335. free_rings(dev);
  5336. out_unmap:
  5337. iounmap(get_hwbase(dev));
  5338. out_relreg:
  5339. pci_release_regions(pci_dev);
  5340. out_disable:
  5341. pci_disable_device(pci_dev);
  5342. out_free:
  5343. free_netdev(dev);
  5344. out:
  5345. return err;
  5346. }
  5347. static void nv_restore_phy(struct net_device *dev)
  5348. {
  5349. struct fe_priv *np = netdev_priv(dev);
  5350. u16 phy_reserved, mii_control;
  5351. if (np->phy_oui == PHY_OUI_REALTEK &&
  5352. np->phy_model == PHY_MODEL_REALTEK_8201 &&
  5353. phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
  5354. mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
  5355. phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
  5356. phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
  5357. phy_reserved |= PHY_REALTEK_INIT8;
  5358. mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
  5359. mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
  5360. /* restart auto negotiation */
  5361. mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  5362. mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
  5363. mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
  5364. }
  5365. }
  5366. static void nv_restore_mac_addr(struct pci_dev *pci_dev)
  5367. {
  5368. struct net_device *dev = pci_get_drvdata(pci_dev);
  5369. struct fe_priv *np = netdev_priv(dev);
  5370. u8 __iomem *base = get_hwbase(dev);
  5371. /* special op: write back the misordered MAC address - otherwise
  5372. * the next nv_probe would see a wrong address.
  5373. */
  5374. writel(np->orig_mac[0], base + NvRegMacAddrA);
  5375. writel(np->orig_mac[1], base + NvRegMacAddrB);
  5376. writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
  5377. base + NvRegTransmitPoll);
  5378. }
  5379. static void __devexit nv_remove(struct pci_dev *pci_dev)
  5380. {
  5381. struct net_device *dev = pci_get_drvdata(pci_dev);
  5382. unregister_netdev(dev);
  5383. nv_restore_mac_addr(pci_dev);
  5384. /* restore any phy related changes */
  5385. nv_restore_phy(dev);
  5386. nv_mgmt_release_sema(dev);
  5387. /* free all structures */
  5388. free_rings(dev);
  5389. iounmap(get_hwbase(dev));
  5390. pci_release_regions(pci_dev);
  5391. pci_disable_device(pci_dev);
  5392. free_netdev(dev);
  5393. pci_set_drvdata(pci_dev, NULL);
  5394. }
  5395. #ifdef CONFIG_PM
  5396. static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
  5397. {
  5398. struct net_device *dev = pci_get_drvdata(pdev);
  5399. struct fe_priv *np = netdev_priv(dev);
  5400. u8 __iomem *base = get_hwbase(dev);
  5401. int i;
  5402. if (netif_running(dev)) {
  5403. // Gross.
  5404. nv_close(dev);
  5405. }
  5406. netif_device_detach(dev);
  5407. /* save non-pci configuration space */
  5408. for (i = 0;i <= np->register_size/sizeof(u32); i++)
  5409. np->saved_config_space[i] = readl(base + i*sizeof(u32));
  5410. pci_save_state(pdev);
  5411. pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
  5412. pci_disable_device(pdev);
  5413. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  5414. return 0;
  5415. }
  5416. static int nv_resume(struct pci_dev *pdev)
  5417. {
  5418. struct net_device *dev = pci_get_drvdata(pdev);
  5419. struct fe_priv *np = netdev_priv(dev);
  5420. u8 __iomem *base = get_hwbase(dev);
  5421. int i, rc = 0;
  5422. pci_set_power_state(pdev, PCI_D0);
  5423. pci_restore_state(pdev);
  5424. /* ack any pending wake events, disable PME */
  5425. pci_enable_wake(pdev, PCI_D0, 0);
  5426. /* restore non-pci configuration space */
  5427. for (i = 0;i <= np->register_size/sizeof(u32); i++)
  5428. writel(np->saved_config_space[i], base+i*sizeof(u32));
  5429. if (np->driver_data & DEV_NEED_MSI_FIX)
  5430. pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
  5431. /* restore phy state, including autoneg */
  5432. phy_init(dev);
  5433. netif_device_attach(dev);
  5434. if (netif_running(dev)) {
  5435. rc = nv_open(dev);
  5436. nv_set_multicast(dev);
  5437. }
  5438. return rc;
  5439. }
  5440. static void nv_shutdown(struct pci_dev *pdev)
  5441. {
  5442. struct net_device *dev = pci_get_drvdata(pdev);
  5443. struct fe_priv *np = netdev_priv(dev);
  5444. if (netif_running(dev))
  5445. nv_close(dev);
  5446. /*
  5447. * Restore the MAC so a kernel started by kexec won't get confused.
  5448. * If we really go for poweroff, we must not restore the MAC,
  5449. * otherwise the MAC for WOL will be reversed at least on some boards.
  5450. */
  5451. if (system_state != SYSTEM_POWER_OFF) {
  5452. nv_restore_mac_addr(pdev);
  5453. }
  5454. pci_disable_device(pdev);
  5455. /*
  5456. * Apparently it is not possible to reinitialise from D3 hot,
  5457. * only put the device into D3 if we really go for poweroff.
  5458. */
  5459. if (system_state == SYSTEM_POWER_OFF) {
  5460. if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
  5461. pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
  5462. pci_set_power_state(pdev, PCI_D3hot);
  5463. }
  5464. }
  5465. #else
  5466. #define nv_suspend NULL
  5467. #define nv_shutdown NULL
  5468. #define nv_resume NULL
  5469. #endif /* CONFIG_PM */
  5470. static struct pci_device_id pci_tbl[] = {
  5471. { /* nForce Ethernet Controller */
  5472. PCI_DEVICE(0x10DE, 0x01C3),
  5473. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  5474. },
  5475. { /* nForce2 Ethernet Controller */
  5476. PCI_DEVICE(0x10DE, 0x0066),
  5477. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  5478. },
  5479. { /* nForce3 Ethernet Controller */
  5480. PCI_DEVICE(0x10DE, 0x00D6),
  5481. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  5482. },
  5483. { /* nForce3 Ethernet Controller */
  5484. PCI_DEVICE(0x10DE, 0x0086),
  5485. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  5486. },
  5487. { /* nForce3 Ethernet Controller */
  5488. PCI_DEVICE(0x10DE, 0x008C),
  5489. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  5490. },
  5491. { /* nForce3 Ethernet Controller */
  5492. PCI_DEVICE(0x10DE, 0x00E6),
  5493. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  5494. },
  5495. { /* nForce3 Ethernet Controller */
  5496. PCI_DEVICE(0x10DE, 0x00DF),
  5497. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  5498. },
  5499. { /* CK804 Ethernet Controller */
  5500. PCI_DEVICE(0x10DE, 0x0056),
  5501. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
  5502. },
  5503. { /* CK804 Ethernet Controller */
  5504. PCI_DEVICE(0x10DE, 0x0057),
  5505. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
  5506. },
  5507. { /* MCP04 Ethernet Controller */
  5508. PCI_DEVICE(0x10DE, 0x0037),
  5509. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
  5510. },
  5511. { /* MCP04 Ethernet Controller */
  5512. PCI_DEVICE(0x10DE, 0x0038),
  5513. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
  5514. },
  5515. { /* MCP51 Ethernet Controller */
  5516. PCI_DEVICE(0x10DE, 0x0268),
  5517. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
  5518. },
  5519. { /* MCP51 Ethernet Controller */
  5520. PCI_DEVICE(0x10DE, 0x0269),
  5521. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
  5522. },
  5523. { /* MCP55 Ethernet Controller */
  5524. PCI_DEVICE(0x10DE, 0x0372),
  5525. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
  5526. },
  5527. { /* MCP55 Ethernet Controller */
  5528. PCI_DEVICE(0x10DE, 0x0373),
  5529. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
  5530. },
  5531. { /* MCP61 Ethernet Controller */
  5532. PCI_DEVICE(0x10DE, 0x03E5),
  5533. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
  5534. },
  5535. { /* MCP61 Ethernet Controller */
  5536. PCI_DEVICE(0x10DE, 0x03E6),
  5537. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
  5538. },
  5539. { /* MCP61 Ethernet Controller */
  5540. PCI_DEVICE(0x10DE, 0x03EE),
  5541. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
  5542. },
  5543. { /* MCP61 Ethernet Controller */
  5544. PCI_DEVICE(0x10DE, 0x03EF),
  5545. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
  5546. },
  5547. { /* MCP65 Ethernet Controller */
  5548. PCI_DEVICE(0x10DE, 0x0450),
  5549. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5550. },
  5551. { /* MCP65 Ethernet Controller */
  5552. PCI_DEVICE(0x10DE, 0x0451),
  5553. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5554. },
  5555. { /* MCP65 Ethernet Controller */
  5556. PCI_DEVICE(0x10DE, 0x0452),
  5557. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5558. },
  5559. { /* MCP65 Ethernet Controller */
  5560. PCI_DEVICE(0x10DE, 0x0453),
  5561. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5562. },
  5563. { /* MCP67 Ethernet Controller */
  5564. PCI_DEVICE(0x10DE, 0x054C),
  5565. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5566. },
  5567. { /* MCP67 Ethernet Controller */
  5568. PCI_DEVICE(0x10DE, 0x054D),
  5569. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5570. },
  5571. { /* MCP67 Ethernet Controller */
  5572. PCI_DEVICE(0x10DE, 0x054E),
  5573. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5574. },
  5575. { /* MCP67 Ethernet Controller */
  5576. PCI_DEVICE(0x10DE, 0x054F),
  5577. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5578. },
  5579. { /* MCP73 Ethernet Controller */
  5580. PCI_DEVICE(0x10DE, 0x07DC),
  5581. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5582. },
  5583. { /* MCP73 Ethernet Controller */
  5584. PCI_DEVICE(0x10DE, 0x07DD),
  5585. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5586. },
  5587. { /* MCP73 Ethernet Controller */
  5588. PCI_DEVICE(0x10DE, 0x07DE),
  5589. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5590. },
  5591. { /* MCP73 Ethernet Controller */
  5592. PCI_DEVICE(0x10DE, 0x07DF),
  5593. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5594. },
  5595. { /* MCP77 Ethernet Controller */
  5596. PCI_DEVICE(0x10DE, 0x0760),
  5597. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5598. },
  5599. { /* MCP77 Ethernet Controller */
  5600. PCI_DEVICE(0x10DE, 0x0761),
  5601. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5602. },
  5603. { /* MCP77 Ethernet Controller */
  5604. PCI_DEVICE(0x10DE, 0x0762),
  5605. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5606. },
  5607. { /* MCP77 Ethernet Controller */
  5608. PCI_DEVICE(0x10DE, 0x0763),
  5609. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5610. },
  5611. { /* MCP79 Ethernet Controller */
  5612. PCI_DEVICE(0x10DE, 0x0AB0),
  5613. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5614. },
  5615. { /* MCP79 Ethernet Controller */
  5616. PCI_DEVICE(0x10DE, 0x0AB1),
  5617. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5618. },
  5619. { /* MCP79 Ethernet Controller */
  5620. PCI_DEVICE(0x10DE, 0x0AB2),
  5621. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5622. },
  5623. { /* MCP79 Ethernet Controller */
  5624. PCI_DEVICE(0x10DE, 0x0AB3),
  5625. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5626. },
  5627. { /* MCP89 Ethernet Controller */
  5628. PCI_DEVICE(0x10DE, 0x0D7D),
  5629. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
  5630. },
  5631. {0,},
  5632. };
  5633. static struct pci_driver driver = {
  5634. .name = DRV_NAME,
  5635. .id_table = pci_tbl,
  5636. .probe = nv_probe,
  5637. .remove = __devexit_p(nv_remove),
  5638. .suspend = nv_suspend,
  5639. .resume = nv_resume,
  5640. .shutdown = nv_shutdown,
  5641. };
  5642. static int __init init_nic(void)
  5643. {
  5644. return pci_register_driver(&driver);
  5645. }
  5646. static void __exit exit_nic(void)
  5647. {
  5648. pci_unregister_driver(&driver);
  5649. }
  5650. module_param(max_interrupt_work, int, 0);
  5651. MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
  5652. module_param(optimization_mode, int, 0);
  5653. MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
  5654. module_param(poll_interval, int, 0);
  5655. MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
  5656. module_param(msi, int, 0);
  5657. MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
  5658. module_param(msix, int, 0);
  5659. MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
  5660. module_param(dma_64bit, int, 0);
  5661. MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
  5662. module_param(phy_cross, int, 0);
  5663. MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
  5664. module_param(phy_power_down, int, 0);
  5665. MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
  5666. MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
  5667. MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
  5668. MODULE_LICENSE("GPL");
  5669. MODULE_DEVICE_TABLE(pci, pci_tbl);
  5670. module_init(init_nic);
  5671. module_exit(exit_nic);