tg3.c 357 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497
  1. /*
  2. * tg3.c: Broadcom Tigon3 ethernet driver.
  3. *
  4. * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  5. * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  6. * Copyright (C) 2004 Sun Microsystems Inc.
  7. * Copyright (C) 2005-2007 Broadcom Corporation.
  8. *
  9. * Firmware is:
  10. * Derived from proprietary unpublished source code,
  11. * Copyright (C) 2000-2003 Broadcom Corporation.
  12. *
  13. * Permission is hereby granted for the distribution of this firmware
  14. * data in hexadecimal or equivalent format, provided this copyright
  15. * notice is accompanying it.
  16. */
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/kernel.h>
  20. #include <linux/types.h>
  21. #include <linux/compiler.h>
  22. #include <linux/slab.h>
  23. #include <linux/delay.h>
  24. #include <linux/in.h>
  25. #include <linux/init.h>
  26. #include <linux/ioport.h>
  27. #include <linux/pci.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/ethtool.h>
  32. #include <linux/mii.h>
  33. #include <linux/phy.h>
  34. #include <linux/brcmphy.h>
  35. #include <linux/if_vlan.h>
  36. #include <linux/ip.h>
  37. #include <linux/tcp.h>
  38. #include <linux/workqueue.h>
  39. #include <linux/prefetch.h>
  40. #include <linux/dma-mapping.h>
  41. #include <linux/firmware.h>
  42. #include <net/checksum.h>
  43. #include <net/ip.h>
  44. #include <asm/system.h>
  45. #include <asm/io.h>
  46. #include <asm/byteorder.h>
  47. #include <asm/uaccess.h>
  48. #ifdef CONFIG_SPARC
  49. #include <asm/idprom.h>
  50. #include <asm/prom.h>
  51. #endif
  52. #define BAR_0 0
  53. #define BAR_2 2
  54. #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  55. #define TG3_VLAN_TAG_USED 1
  56. #else
  57. #define TG3_VLAN_TAG_USED 0
  58. #endif
  59. #include "tg3.h"
  60. #define DRV_MODULE_NAME "tg3"
  61. #define PFX DRV_MODULE_NAME ": "
  62. #define DRV_MODULE_VERSION "3.97"
  63. #define DRV_MODULE_RELDATE "December 10, 2008"
  64. #define TG3_DEF_MAC_MODE 0
  65. #define TG3_DEF_RX_MODE 0
  66. #define TG3_DEF_TX_MODE 0
  67. #define TG3_DEF_MSG_ENABLE \
  68. (NETIF_MSG_DRV | \
  69. NETIF_MSG_PROBE | \
  70. NETIF_MSG_LINK | \
  71. NETIF_MSG_TIMER | \
  72. NETIF_MSG_IFDOWN | \
  73. NETIF_MSG_IFUP | \
  74. NETIF_MSG_RX_ERR | \
  75. NETIF_MSG_TX_ERR)
  76. /* length of time before we decide the hardware is borked,
  77. * and dev->tx_timeout() should be called to fix the problem
  78. */
  79. #define TG3_TX_TIMEOUT (5 * HZ)
  80. /* hardware minimum and maximum for a single frame's data payload */
  81. #define TG3_MIN_MTU 60
  82. #define TG3_MAX_MTU(tp) \
  83. ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
  84. /* These numbers seem to be hard coded in the NIC firmware somehow.
  85. * You can't change the ring sizes, but you can change where you place
  86. * them in the NIC onboard memory.
  87. */
  88. #define TG3_RX_RING_SIZE 512
  89. #define TG3_DEF_RX_RING_PENDING 200
  90. #define TG3_RX_JUMBO_RING_SIZE 256
  91. #define TG3_DEF_RX_JUMBO_RING_PENDING 100
  92. /* Do not place this n-ring entries value into the tp struct itself,
  93. * we really want to expose these constants to GCC so that modulo et
  94. * al. operations are done with shifts and masks instead of with
  95. * hw multiply/modulo instructions. Another solution would be to
  96. * replace things like '% foo' with '& (foo - 1)'.
  97. */
  98. #define TG3_RX_RCB_RING_SIZE(tp) \
  99. ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
  100. #define TG3_TX_RING_SIZE 512
  101. #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
  102. #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
  103. TG3_RX_RING_SIZE)
  104. #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
  105. TG3_RX_JUMBO_RING_SIZE)
  106. #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
  107. TG3_RX_RCB_RING_SIZE(tp))
  108. #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
  109. TG3_TX_RING_SIZE)
  110. #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
  111. #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
  112. #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
  113. /* minimum number of free TX descriptors required to wake up TX process */
  114. #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
  115. #define TG3_RAW_IP_ALIGN 2
  116. /* number of ETHTOOL_GSTATS u64's */
  117. #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
  118. #define TG3_NUM_TEST 6
  119. #define FIRMWARE_TG3 "tigon/tg3.bin"
  120. #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
  121. #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
  122. static char version[] __devinitdata =
  123. DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  124. MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
  125. MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
  126. MODULE_LICENSE("GPL");
  127. MODULE_VERSION(DRV_MODULE_VERSION);
  128. MODULE_FIRMWARE(FIRMWARE_TG3);
  129. MODULE_FIRMWARE(FIRMWARE_TG3TSO);
  130. MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
  131. static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
  132. module_param(tg3_debug, int, 0);
  133. MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
  134. static struct pci_device_id tg3_pci_tbl[] = {
  135. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
  136. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
  137. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
  138. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
  139. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
  140. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
  141. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
  142. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
  143. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
  144. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
  145. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
  146. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
  147. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
  148. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
  149. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
  150. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
  151. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
  152. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
  153. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
  154. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
  155. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
  156. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
  157. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
  158. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
  159. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
  160. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
  161. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
  162. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
  163. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
  164. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
  165. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
  166. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
  167. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
  168. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
  169. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
  170. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
  171. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
  172. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
  173. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
  174. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
  175. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
  176. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
  177. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
  178. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
  179. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
  180. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
  181. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
  182. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
  183. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
  184. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
  185. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
  186. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
  187. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
  188. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
  189. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
  190. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
  191. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
  192. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
  193. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
  194. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
  195. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
  196. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
  197. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
  198. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
  199. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57720)},
  200. {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
  201. {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
  202. {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
  203. {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
  204. {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
  205. {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
  206. {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
  207. {}
  208. };
  209. MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
  210. static const struct {
  211. const char string[ETH_GSTRING_LEN];
  212. } ethtool_stats_keys[TG3_NUM_STATS] = {
  213. { "rx_octets" },
  214. { "rx_fragments" },
  215. { "rx_ucast_packets" },
  216. { "rx_mcast_packets" },
  217. { "rx_bcast_packets" },
  218. { "rx_fcs_errors" },
  219. { "rx_align_errors" },
  220. { "rx_xon_pause_rcvd" },
  221. { "rx_xoff_pause_rcvd" },
  222. { "rx_mac_ctrl_rcvd" },
  223. { "rx_xoff_entered" },
  224. { "rx_frame_too_long_errors" },
  225. { "rx_jabbers" },
  226. { "rx_undersize_packets" },
  227. { "rx_in_length_errors" },
  228. { "rx_out_length_errors" },
  229. { "rx_64_or_less_octet_packets" },
  230. { "rx_65_to_127_octet_packets" },
  231. { "rx_128_to_255_octet_packets" },
  232. { "rx_256_to_511_octet_packets" },
  233. { "rx_512_to_1023_octet_packets" },
  234. { "rx_1024_to_1522_octet_packets" },
  235. { "rx_1523_to_2047_octet_packets" },
  236. { "rx_2048_to_4095_octet_packets" },
  237. { "rx_4096_to_8191_octet_packets" },
  238. { "rx_8192_to_9022_octet_packets" },
  239. { "tx_octets" },
  240. { "tx_collisions" },
  241. { "tx_xon_sent" },
  242. { "tx_xoff_sent" },
  243. { "tx_flow_control" },
  244. { "tx_mac_errors" },
  245. { "tx_single_collisions" },
  246. { "tx_mult_collisions" },
  247. { "tx_deferred" },
  248. { "tx_excessive_collisions" },
  249. { "tx_late_collisions" },
  250. { "tx_collide_2times" },
  251. { "tx_collide_3times" },
  252. { "tx_collide_4times" },
  253. { "tx_collide_5times" },
  254. { "tx_collide_6times" },
  255. { "tx_collide_7times" },
  256. { "tx_collide_8times" },
  257. { "tx_collide_9times" },
  258. { "tx_collide_10times" },
  259. { "tx_collide_11times" },
  260. { "tx_collide_12times" },
  261. { "tx_collide_13times" },
  262. { "tx_collide_14times" },
  263. { "tx_collide_15times" },
  264. { "tx_ucast_packets" },
  265. { "tx_mcast_packets" },
  266. { "tx_bcast_packets" },
  267. { "tx_carrier_sense_errors" },
  268. { "tx_discards" },
  269. { "tx_errors" },
  270. { "dma_writeq_full" },
  271. { "dma_write_prioq_full" },
  272. { "rxbds_empty" },
  273. { "rx_discards" },
  274. { "rx_errors" },
  275. { "rx_threshold_hit" },
  276. { "dma_readq_full" },
  277. { "dma_read_prioq_full" },
  278. { "tx_comp_queue_full" },
  279. { "ring_set_send_prod_index" },
  280. { "ring_status_update" },
  281. { "nic_irqs" },
  282. { "nic_avoided_irqs" },
  283. { "nic_tx_threshold_hit" }
  284. };
  285. static const struct {
  286. const char string[ETH_GSTRING_LEN];
  287. } ethtool_test_keys[TG3_NUM_TEST] = {
  288. { "nvram test (online) " },
  289. { "link test (online) " },
  290. { "register test (offline)" },
  291. { "memory test (offline)" },
  292. { "loopback test (offline)" },
  293. { "interrupt test (offline)" },
  294. };
  295. static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
  296. {
  297. writel(val, tp->regs + off);
  298. }
  299. static u32 tg3_read32(struct tg3 *tp, u32 off)
  300. {
  301. return (readl(tp->regs + off));
  302. }
  303. static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
  304. {
  305. writel(val, tp->aperegs + off);
  306. }
  307. static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
  308. {
  309. return (readl(tp->aperegs + off));
  310. }
  311. static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
  312. {
  313. unsigned long flags;
  314. spin_lock_irqsave(&tp->indirect_lock, flags);
  315. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  316. pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  317. spin_unlock_irqrestore(&tp->indirect_lock, flags);
  318. }
  319. static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
  320. {
  321. writel(val, tp->regs + off);
  322. readl(tp->regs + off);
  323. }
  324. static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
  325. {
  326. unsigned long flags;
  327. u32 val;
  328. spin_lock_irqsave(&tp->indirect_lock, flags);
  329. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  330. pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  331. spin_unlock_irqrestore(&tp->indirect_lock, flags);
  332. return val;
  333. }
  334. static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
  335. {
  336. unsigned long flags;
  337. if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
  338. pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
  339. TG3_64BIT_REG_LOW, val);
  340. return;
  341. }
  342. if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
  343. pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
  344. TG3_64BIT_REG_LOW, val);
  345. return;
  346. }
  347. spin_lock_irqsave(&tp->indirect_lock, flags);
  348. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  349. pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  350. spin_unlock_irqrestore(&tp->indirect_lock, flags);
  351. /* In indirect mode when disabling interrupts, we also need
  352. * to clear the interrupt bit in the GRC local ctrl register.
  353. */
  354. if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
  355. (val == 0x1)) {
  356. pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
  357. tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
  358. }
  359. }
  360. static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
  361. {
  362. unsigned long flags;
  363. u32 val;
  364. spin_lock_irqsave(&tp->indirect_lock, flags);
  365. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  366. pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  367. spin_unlock_irqrestore(&tp->indirect_lock, flags);
  368. return val;
  369. }
  370. /* usec_wait specifies the wait time in usec when writing to certain registers
  371. * where it is unsafe to read back the register without some delay.
  372. * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
  373. * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
  374. */
  375. static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
  376. {
  377. if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
  378. (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
  379. /* Non-posted methods */
  380. tp->write32(tp, off, val);
  381. else {
  382. /* Posted method */
  383. tg3_write32(tp, off, val);
  384. if (usec_wait)
  385. udelay(usec_wait);
  386. tp->read32(tp, off);
  387. }
  388. /* Wait again after the read for the posted method to guarantee that
  389. * the wait time is met.
  390. */
  391. if (usec_wait)
  392. udelay(usec_wait);
  393. }
  394. static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
  395. {
  396. tp->write32_mbox(tp, off, val);
  397. if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
  398. !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
  399. tp->read32_mbox(tp, off);
  400. }
  401. static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
  402. {
  403. void __iomem *mbox = tp->regs + off;
  404. writel(val, mbox);
  405. if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
  406. writel(val, mbox);
  407. if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
  408. readl(mbox);
  409. }
  410. static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
  411. {
  412. return (readl(tp->regs + off + GRCMBOX_BASE));
  413. }
  414. static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
  415. {
  416. writel(val, tp->regs + off + GRCMBOX_BASE);
  417. }
  418. #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
  419. #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
  420. #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
  421. #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
  422. #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
  423. #define tw32(reg,val) tp->write32(tp, reg, val)
  424. #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
  425. #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
  426. #define tr32(reg) tp->read32(tp, reg)
  427. static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
  428. {
  429. unsigned long flags;
  430. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
  431. (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
  432. return;
  433. spin_lock_irqsave(&tp->indirect_lock, flags);
  434. if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
  435. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  436. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  437. /* Always leave this as zero. */
  438. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  439. } else {
  440. tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
  441. tw32_f(TG3PCI_MEM_WIN_DATA, val);
  442. /* Always leave this as zero. */
  443. tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  444. }
  445. spin_unlock_irqrestore(&tp->indirect_lock, flags);
  446. }
  447. static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
  448. {
  449. unsigned long flags;
  450. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
  451. (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
  452. *val = 0;
  453. return;
  454. }
  455. spin_lock_irqsave(&tp->indirect_lock, flags);
  456. if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
  457. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  458. pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  459. /* Always leave this as zero. */
  460. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  461. } else {
  462. tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
  463. *val = tr32(TG3PCI_MEM_WIN_DATA);
  464. /* Always leave this as zero. */
  465. tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  466. }
  467. spin_unlock_irqrestore(&tp->indirect_lock, flags);
  468. }
  469. static void tg3_ape_lock_init(struct tg3 *tp)
  470. {
  471. int i;
  472. /* Make sure the driver hasn't any stale locks. */
  473. for (i = 0; i < 8; i++)
  474. tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
  475. APE_LOCK_GRANT_DRIVER);
  476. }
  477. static int tg3_ape_lock(struct tg3 *tp, int locknum)
  478. {
  479. int i, off;
  480. int ret = 0;
  481. u32 status;
  482. if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
  483. return 0;
  484. switch (locknum) {
  485. case TG3_APE_LOCK_GRC:
  486. case TG3_APE_LOCK_MEM:
  487. break;
  488. default:
  489. return -EINVAL;
  490. }
  491. off = 4 * locknum;
  492. tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
  493. /* Wait for up to 1 millisecond to acquire lock. */
  494. for (i = 0; i < 100; i++) {
  495. status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
  496. if (status == APE_LOCK_GRANT_DRIVER)
  497. break;
  498. udelay(10);
  499. }
  500. if (status != APE_LOCK_GRANT_DRIVER) {
  501. /* Revoke the lock request. */
  502. tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
  503. APE_LOCK_GRANT_DRIVER);
  504. ret = -EBUSY;
  505. }
  506. return ret;
  507. }
  508. static void tg3_ape_unlock(struct tg3 *tp, int locknum)
  509. {
  510. int off;
  511. if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
  512. return;
  513. switch (locknum) {
  514. case TG3_APE_LOCK_GRC:
  515. case TG3_APE_LOCK_MEM:
  516. break;
  517. default:
  518. return;
  519. }
  520. off = 4 * locknum;
  521. tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
  522. }
  523. static void tg3_disable_ints(struct tg3 *tp)
  524. {
  525. tw32(TG3PCI_MISC_HOST_CTRL,
  526. (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
  527. tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
  528. }
  529. static inline void tg3_cond_int(struct tg3 *tp)
  530. {
  531. if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
  532. (tp->hw_status->status & SD_STATUS_UPDATED))
  533. tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
  534. else
  535. tw32(HOSTCC_MODE, tp->coalesce_mode |
  536. (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
  537. }
  538. static void tg3_enable_ints(struct tg3 *tp)
  539. {
  540. tp->irq_sync = 0;
  541. wmb();
  542. tw32(TG3PCI_MISC_HOST_CTRL,
  543. (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
  544. tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
  545. (tp->last_tag << 24));
  546. if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
  547. tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
  548. (tp->last_tag << 24));
  549. tg3_cond_int(tp);
  550. }
  551. static inline unsigned int tg3_has_work(struct tg3 *tp)
  552. {
  553. struct tg3_hw_status *sblk = tp->hw_status;
  554. unsigned int work_exists = 0;
  555. /* check for phy events */
  556. if (!(tp->tg3_flags &
  557. (TG3_FLAG_USE_LINKCHG_REG |
  558. TG3_FLAG_POLL_SERDES))) {
  559. if (sblk->status & SD_STATUS_LINK_CHG)
  560. work_exists = 1;
  561. }
  562. /* check for RX/TX work to do */
  563. if (sblk->idx[0].tx_consumer != tp->tx_cons ||
  564. sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
  565. work_exists = 1;
  566. return work_exists;
  567. }
  568. /* tg3_restart_ints
  569. * similar to tg3_enable_ints, but it accurately determines whether there
  570. * is new work pending and can return without flushing the PIO write
  571. * which reenables interrupts
  572. */
  573. static void tg3_restart_ints(struct tg3 *tp)
  574. {
  575. tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
  576. tp->last_tag << 24);
  577. mmiowb();
  578. /* When doing tagged status, this work check is unnecessary.
  579. * The last_tag we write above tells the chip which piece of
  580. * work we've completed.
  581. */
  582. if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
  583. tg3_has_work(tp))
  584. tw32(HOSTCC_MODE, tp->coalesce_mode |
  585. (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
  586. }
  587. static inline void tg3_netif_stop(struct tg3 *tp)
  588. {
  589. tp->dev->trans_start = jiffies; /* prevent tx timeout */
  590. napi_disable(&tp->napi);
  591. netif_tx_disable(tp->dev);
  592. }
  593. static inline void tg3_netif_start(struct tg3 *tp)
  594. {
  595. netif_wake_queue(tp->dev);
  596. /* NOTE: unconditional netif_wake_queue is only appropriate
  597. * so long as all callers are assured to have free tx slots
  598. * (such as after tg3_init_hw)
  599. */
  600. napi_enable(&tp->napi);
  601. tp->hw_status->status |= SD_STATUS_UPDATED;
  602. tg3_enable_ints(tp);
  603. }
  604. static void tg3_switch_clocks(struct tg3 *tp)
  605. {
  606. u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
  607. u32 orig_clock_ctrl;
  608. if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
  609. (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
  610. return;
  611. orig_clock_ctrl = clock_ctrl;
  612. clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
  613. CLOCK_CTRL_CLKRUN_OENABLE |
  614. 0x1f);
  615. tp->pci_clock_ctrl = clock_ctrl;
  616. if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
  617. if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
  618. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  619. clock_ctrl | CLOCK_CTRL_625_CORE, 40);
  620. }
  621. } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
  622. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  623. clock_ctrl |
  624. (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
  625. 40);
  626. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  627. clock_ctrl | (CLOCK_CTRL_ALTCLK),
  628. 40);
  629. }
  630. tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
  631. }
  632. #define PHY_BUSY_LOOPS 5000
  633. static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
  634. {
  635. u32 frame_val;
  636. unsigned int loops;
  637. int ret;
  638. if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
  639. tw32_f(MAC_MI_MODE,
  640. (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
  641. udelay(80);
  642. }
  643. *val = 0x0;
  644. frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
  645. MI_COM_PHY_ADDR_MASK);
  646. frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
  647. MI_COM_REG_ADDR_MASK);
  648. frame_val |= (MI_COM_CMD_READ | MI_COM_START);
  649. tw32_f(MAC_MI_COM, frame_val);
  650. loops = PHY_BUSY_LOOPS;
  651. while (loops != 0) {
  652. udelay(10);
  653. frame_val = tr32(MAC_MI_COM);
  654. if ((frame_val & MI_COM_BUSY) == 0) {
  655. udelay(5);
  656. frame_val = tr32(MAC_MI_COM);
  657. break;
  658. }
  659. loops -= 1;
  660. }
  661. ret = -EBUSY;
  662. if (loops != 0) {
  663. *val = frame_val & MI_COM_DATA_MASK;
  664. ret = 0;
  665. }
  666. if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
  667. tw32_f(MAC_MI_MODE, tp->mi_mode);
  668. udelay(80);
  669. }
  670. return ret;
  671. }
  672. static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
  673. {
  674. u32 frame_val;
  675. unsigned int loops;
  676. int ret;
  677. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
  678. (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
  679. return 0;
  680. if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
  681. tw32_f(MAC_MI_MODE,
  682. (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
  683. udelay(80);
  684. }
  685. frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
  686. MI_COM_PHY_ADDR_MASK);
  687. frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
  688. MI_COM_REG_ADDR_MASK);
  689. frame_val |= (val & MI_COM_DATA_MASK);
  690. frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
  691. tw32_f(MAC_MI_COM, frame_val);
  692. loops = PHY_BUSY_LOOPS;
  693. while (loops != 0) {
  694. udelay(10);
  695. frame_val = tr32(MAC_MI_COM);
  696. if ((frame_val & MI_COM_BUSY) == 0) {
  697. udelay(5);
  698. frame_val = tr32(MAC_MI_COM);
  699. break;
  700. }
  701. loops -= 1;
  702. }
  703. ret = -EBUSY;
  704. if (loops != 0)
  705. ret = 0;
  706. if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
  707. tw32_f(MAC_MI_MODE, tp->mi_mode);
  708. udelay(80);
  709. }
  710. return ret;
  711. }
  712. static int tg3_bmcr_reset(struct tg3 *tp)
  713. {
  714. u32 phy_control;
  715. int limit, err;
  716. /* OK, reset it, and poll the BMCR_RESET bit until it
  717. * clears or we time out.
  718. */
  719. phy_control = BMCR_RESET;
  720. err = tg3_writephy(tp, MII_BMCR, phy_control);
  721. if (err != 0)
  722. return -EBUSY;
  723. limit = 5000;
  724. while (limit--) {
  725. err = tg3_readphy(tp, MII_BMCR, &phy_control);
  726. if (err != 0)
  727. return -EBUSY;
  728. if ((phy_control & BMCR_RESET) == 0) {
  729. udelay(40);
  730. break;
  731. }
  732. udelay(10);
  733. }
  734. if (limit < 0)
  735. return -EBUSY;
  736. return 0;
  737. }
  738. static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
  739. {
  740. struct tg3 *tp = bp->priv;
  741. u32 val;
  742. if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
  743. return -EAGAIN;
  744. if (tg3_readphy(tp, reg, &val))
  745. return -EIO;
  746. return val;
  747. }
  748. static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
  749. {
  750. struct tg3 *tp = bp->priv;
  751. if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
  752. return -EAGAIN;
  753. if (tg3_writephy(tp, reg, val))
  754. return -EIO;
  755. return 0;
  756. }
  757. static int tg3_mdio_reset(struct mii_bus *bp)
  758. {
  759. return 0;
  760. }
  761. static void tg3_mdio_config_5785(struct tg3 *tp)
  762. {
  763. u32 val;
  764. struct phy_device *phydev;
  765. phydev = tp->mdio_bus->phy_map[PHY_ADDR];
  766. switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
  767. case TG3_PHY_ID_BCM50610:
  768. val = MAC_PHYCFG2_50610_LED_MODES;
  769. break;
  770. case TG3_PHY_ID_BCMAC131:
  771. val = MAC_PHYCFG2_AC131_LED_MODES;
  772. break;
  773. case TG3_PHY_ID_RTL8211C:
  774. val = MAC_PHYCFG2_RTL8211C_LED_MODES;
  775. break;
  776. case TG3_PHY_ID_RTL8201E:
  777. val = MAC_PHYCFG2_RTL8201E_LED_MODES;
  778. break;
  779. default:
  780. return;
  781. }
  782. if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
  783. tw32(MAC_PHYCFG2, val);
  784. val = tr32(MAC_PHYCFG1);
  785. val &= ~MAC_PHYCFG1_RGMII_INT;
  786. tw32(MAC_PHYCFG1, val);
  787. return;
  788. }
  789. if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
  790. val |= MAC_PHYCFG2_EMODE_MASK_MASK |
  791. MAC_PHYCFG2_FMODE_MASK_MASK |
  792. MAC_PHYCFG2_GMODE_MASK_MASK |
  793. MAC_PHYCFG2_ACT_MASK_MASK |
  794. MAC_PHYCFG2_QUAL_MASK_MASK |
  795. MAC_PHYCFG2_INBAND_ENABLE;
  796. tw32(MAC_PHYCFG2, val);
  797. val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
  798. MAC_PHYCFG1_RGMII_SND_STAT_EN);
  799. if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
  800. if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
  801. val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
  802. if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
  803. val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
  804. }
  805. tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
  806. val = tr32(MAC_EXT_RGMII_MODE);
  807. val &= ~(MAC_RGMII_MODE_RX_INT_B |
  808. MAC_RGMII_MODE_RX_QUALITY |
  809. MAC_RGMII_MODE_RX_ACTIVITY |
  810. MAC_RGMII_MODE_RX_ENG_DET |
  811. MAC_RGMII_MODE_TX_ENABLE |
  812. MAC_RGMII_MODE_TX_LOWPWR |
  813. MAC_RGMII_MODE_TX_RESET);
  814. if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
  815. if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
  816. val |= MAC_RGMII_MODE_RX_INT_B |
  817. MAC_RGMII_MODE_RX_QUALITY |
  818. MAC_RGMII_MODE_RX_ACTIVITY |
  819. MAC_RGMII_MODE_RX_ENG_DET;
  820. if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
  821. val |= MAC_RGMII_MODE_TX_ENABLE |
  822. MAC_RGMII_MODE_TX_LOWPWR |
  823. MAC_RGMII_MODE_TX_RESET;
  824. }
  825. tw32(MAC_EXT_RGMII_MODE, val);
  826. }
  827. static void tg3_mdio_start(struct tg3 *tp)
  828. {
  829. if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
  830. mutex_lock(&tp->mdio_bus->mdio_lock);
  831. tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
  832. mutex_unlock(&tp->mdio_bus->mdio_lock);
  833. }
  834. tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
  835. tw32_f(MAC_MI_MODE, tp->mi_mode);
  836. udelay(80);
  837. if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
  838. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
  839. tg3_mdio_config_5785(tp);
  840. }
  841. static void tg3_mdio_stop(struct tg3 *tp)
  842. {
  843. if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
  844. mutex_lock(&tp->mdio_bus->mdio_lock);
  845. tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
  846. mutex_unlock(&tp->mdio_bus->mdio_lock);
  847. }
  848. }
  849. static int tg3_mdio_init(struct tg3 *tp)
  850. {
  851. int i;
  852. u32 reg;
  853. struct phy_device *phydev;
  854. tg3_mdio_start(tp);
  855. if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
  856. (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
  857. return 0;
  858. tp->mdio_bus = mdiobus_alloc();
  859. if (tp->mdio_bus == NULL)
  860. return -ENOMEM;
  861. tp->mdio_bus->name = "tg3 mdio bus";
  862. snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
  863. (tp->pdev->bus->number << 8) | tp->pdev->devfn);
  864. tp->mdio_bus->priv = tp;
  865. tp->mdio_bus->parent = &tp->pdev->dev;
  866. tp->mdio_bus->read = &tg3_mdio_read;
  867. tp->mdio_bus->write = &tg3_mdio_write;
  868. tp->mdio_bus->reset = &tg3_mdio_reset;
  869. tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
  870. tp->mdio_bus->irq = &tp->mdio_irq[0];
  871. for (i = 0; i < PHY_MAX_ADDR; i++)
  872. tp->mdio_bus->irq[i] = PHY_POLL;
  873. /* The bus registration will look for all the PHYs on the mdio bus.
  874. * Unfortunately, it does not ensure the PHY is powered up before
  875. * accessing the PHY ID registers. A chip reset is the
  876. * quickest way to bring the device back to an operational state..
  877. */
  878. if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
  879. tg3_bmcr_reset(tp);
  880. i = mdiobus_register(tp->mdio_bus);
  881. if (i) {
  882. printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
  883. tp->dev->name, i);
  884. mdiobus_free(tp->mdio_bus);
  885. return i;
  886. }
  887. phydev = tp->mdio_bus->phy_map[PHY_ADDR];
  888. if (!phydev || !phydev->drv) {
  889. printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
  890. mdiobus_unregister(tp->mdio_bus);
  891. mdiobus_free(tp->mdio_bus);
  892. return -ENODEV;
  893. }
  894. switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
  895. case TG3_PHY_ID_BCM57780:
  896. phydev->interface = PHY_INTERFACE_MODE_GMII;
  897. break;
  898. case TG3_PHY_ID_BCM50610:
  899. if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
  900. phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
  901. if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
  902. phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
  903. if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
  904. phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
  905. /* fallthru */
  906. case TG3_PHY_ID_RTL8211C:
  907. phydev->interface = PHY_INTERFACE_MODE_RGMII;
  908. break;
  909. case TG3_PHY_ID_RTL8201E:
  910. case TG3_PHY_ID_BCMAC131:
  911. phydev->interface = PHY_INTERFACE_MODE_MII;
  912. break;
  913. }
  914. tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
  915. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
  916. tg3_mdio_config_5785(tp);
  917. return 0;
  918. }
  919. static void tg3_mdio_fini(struct tg3 *tp)
  920. {
  921. if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
  922. tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
  923. mdiobus_unregister(tp->mdio_bus);
  924. mdiobus_free(tp->mdio_bus);
  925. tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
  926. }
  927. }
  928. /* tp->lock is held. */
  929. static inline void tg3_generate_fw_event(struct tg3 *tp)
  930. {
  931. u32 val;
  932. val = tr32(GRC_RX_CPU_EVENT);
  933. val |= GRC_RX_CPU_DRIVER_EVENT;
  934. tw32_f(GRC_RX_CPU_EVENT, val);
  935. tp->last_event_jiffies = jiffies;
  936. }
  937. #define TG3_FW_EVENT_TIMEOUT_USEC 2500
  938. /* tp->lock is held. */
  939. static void tg3_wait_for_event_ack(struct tg3 *tp)
  940. {
  941. int i;
  942. unsigned int delay_cnt;
  943. long time_remain;
  944. /* If enough time has passed, no wait is necessary. */
  945. time_remain = (long)(tp->last_event_jiffies + 1 +
  946. usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
  947. (long)jiffies;
  948. if (time_remain < 0)
  949. return;
  950. /* Check if we can shorten the wait time. */
  951. delay_cnt = jiffies_to_usecs(time_remain);
  952. if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
  953. delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
  954. delay_cnt = (delay_cnt >> 3) + 1;
  955. for (i = 0; i < delay_cnt; i++) {
  956. if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
  957. break;
  958. udelay(8);
  959. }
  960. }
  961. /* tp->lock is held. */
  962. static void tg3_ump_link_report(struct tg3 *tp)
  963. {
  964. u32 reg;
  965. u32 val;
  966. if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
  967. !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
  968. return;
  969. tg3_wait_for_event_ack(tp);
  970. tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
  971. tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
  972. val = 0;
  973. if (!tg3_readphy(tp, MII_BMCR, &reg))
  974. val = reg << 16;
  975. if (!tg3_readphy(tp, MII_BMSR, &reg))
  976. val |= (reg & 0xffff);
  977. tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
  978. val = 0;
  979. if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
  980. val = reg << 16;
  981. if (!tg3_readphy(tp, MII_LPA, &reg))
  982. val |= (reg & 0xffff);
  983. tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
  984. val = 0;
  985. if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
  986. if (!tg3_readphy(tp, MII_CTRL1000, &reg))
  987. val = reg << 16;
  988. if (!tg3_readphy(tp, MII_STAT1000, &reg))
  989. val |= (reg & 0xffff);
  990. }
  991. tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
  992. if (!tg3_readphy(tp, MII_PHYADDR, &reg))
  993. val = reg << 16;
  994. else
  995. val = 0;
  996. tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
  997. tg3_generate_fw_event(tp);
  998. }
  999. static void tg3_link_report(struct tg3 *tp)
  1000. {
  1001. if (!netif_carrier_ok(tp->dev)) {
  1002. if (netif_msg_link(tp))
  1003. printk(KERN_INFO PFX "%s: Link is down.\n",
  1004. tp->dev->name);
  1005. tg3_ump_link_report(tp);
  1006. } else if (netif_msg_link(tp)) {
  1007. printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
  1008. tp->dev->name,
  1009. (tp->link_config.active_speed == SPEED_1000 ?
  1010. 1000 :
  1011. (tp->link_config.active_speed == SPEED_100 ?
  1012. 100 : 10)),
  1013. (tp->link_config.active_duplex == DUPLEX_FULL ?
  1014. "full" : "half"));
  1015. printk(KERN_INFO PFX
  1016. "%s: Flow control is %s for TX and %s for RX.\n",
  1017. tp->dev->name,
  1018. (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
  1019. "on" : "off",
  1020. (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
  1021. "on" : "off");
  1022. tg3_ump_link_report(tp);
  1023. }
  1024. }
  1025. static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
  1026. {
  1027. u16 miireg;
  1028. if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
  1029. miireg = ADVERTISE_PAUSE_CAP;
  1030. else if (flow_ctrl & FLOW_CTRL_TX)
  1031. miireg = ADVERTISE_PAUSE_ASYM;
  1032. else if (flow_ctrl & FLOW_CTRL_RX)
  1033. miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  1034. else
  1035. miireg = 0;
  1036. return miireg;
  1037. }
  1038. static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
  1039. {
  1040. u16 miireg;
  1041. if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
  1042. miireg = ADVERTISE_1000XPAUSE;
  1043. else if (flow_ctrl & FLOW_CTRL_TX)
  1044. miireg = ADVERTISE_1000XPSE_ASYM;
  1045. else if (flow_ctrl & FLOW_CTRL_RX)
  1046. miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
  1047. else
  1048. miireg = 0;
  1049. return miireg;
  1050. }
  1051. static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
  1052. {
  1053. u8 cap = 0;
  1054. if (lcladv & ADVERTISE_1000XPAUSE) {
  1055. if (lcladv & ADVERTISE_1000XPSE_ASYM) {
  1056. if (rmtadv & LPA_1000XPAUSE)
  1057. cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
  1058. else if (rmtadv & LPA_1000XPAUSE_ASYM)
  1059. cap = FLOW_CTRL_RX;
  1060. } else {
  1061. if (rmtadv & LPA_1000XPAUSE)
  1062. cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
  1063. }
  1064. } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
  1065. if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
  1066. cap = FLOW_CTRL_TX;
  1067. }
  1068. return cap;
  1069. }
  1070. static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
  1071. {
  1072. u8 autoneg;
  1073. u8 flowctrl = 0;
  1074. u32 old_rx_mode = tp->rx_mode;
  1075. u32 old_tx_mode = tp->tx_mode;
  1076. if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
  1077. autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
  1078. else
  1079. autoneg = tp->link_config.autoneg;
  1080. if (autoneg == AUTONEG_ENABLE &&
  1081. (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
  1082. if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
  1083. flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
  1084. else
  1085. flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
  1086. } else
  1087. flowctrl = tp->link_config.flowctrl;
  1088. tp->link_config.active_flowctrl = flowctrl;
  1089. if (flowctrl & FLOW_CTRL_RX)
  1090. tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
  1091. else
  1092. tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
  1093. if (old_rx_mode != tp->rx_mode)
  1094. tw32_f(MAC_RX_MODE, tp->rx_mode);
  1095. if (flowctrl & FLOW_CTRL_TX)
  1096. tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
  1097. else
  1098. tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
  1099. if (old_tx_mode != tp->tx_mode)
  1100. tw32_f(MAC_TX_MODE, tp->tx_mode);
  1101. }
  1102. static void tg3_adjust_link(struct net_device *dev)
  1103. {
  1104. u8 oldflowctrl, linkmesg = 0;
  1105. u32 mac_mode, lcl_adv, rmt_adv;
  1106. struct tg3 *tp = netdev_priv(dev);
  1107. struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
  1108. spin_lock(&tp->lock);
  1109. mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
  1110. MAC_MODE_HALF_DUPLEX);
  1111. oldflowctrl = tp->link_config.active_flowctrl;
  1112. if (phydev->link) {
  1113. lcl_adv = 0;
  1114. rmt_adv = 0;
  1115. if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
  1116. mac_mode |= MAC_MODE_PORT_MODE_MII;
  1117. else
  1118. mac_mode |= MAC_MODE_PORT_MODE_GMII;
  1119. if (phydev->duplex == DUPLEX_HALF)
  1120. mac_mode |= MAC_MODE_HALF_DUPLEX;
  1121. else {
  1122. lcl_adv = tg3_advert_flowctrl_1000T(
  1123. tp->link_config.flowctrl);
  1124. if (phydev->pause)
  1125. rmt_adv = LPA_PAUSE_CAP;
  1126. if (phydev->asym_pause)
  1127. rmt_adv |= LPA_PAUSE_ASYM;
  1128. }
  1129. tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
  1130. } else
  1131. mac_mode |= MAC_MODE_PORT_MODE_GMII;
  1132. if (mac_mode != tp->mac_mode) {
  1133. tp->mac_mode = mac_mode;
  1134. tw32_f(MAC_MODE, tp->mac_mode);
  1135. udelay(40);
  1136. }
  1137. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
  1138. if (phydev->speed == SPEED_10)
  1139. tw32(MAC_MI_STAT,
  1140. MAC_MI_STAT_10MBPS_MODE |
  1141. MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
  1142. else
  1143. tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
  1144. }
  1145. if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
  1146. tw32(MAC_TX_LENGTHS,
  1147. ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
  1148. (6 << TX_LENGTHS_IPG_SHIFT) |
  1149. (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
  1150. else
  1151. tw32(MAC_TX_LENGTHS,
  1152. ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
  1153. (6 << TX_LENGTHS_IPG_SHIFT) |
  1154. (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
  1155. if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
  1156. (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
  1157. phydev->speed != tp->link_config.active_speed ||
  1158. phydev->duplex != tp->link_config.active_duplex ||
  1159. oldflowctrl != tp->link_config.active_flowctrl)
  1160. linkmesg = 1;
  1161. tp->link_config.active_speed = phydev->speed;
  1162. tp->link_config.active_duplex = phydev->duplex;
  1163. spin_unlock(&tp->lock);
  1164. if (linkmesg)
  1165. tg3_link_report(tp);
  1166. }
  1167. static int tg3_phy_init(struct tg3 *tp)
  1168. {
  1169. struct phy_device *phydev;
  1170. if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
  1171. return 0;
  1172. /* Bring the PHY back to a known state. */
  1173. tg3_bmcr_reset(tp);
  1174. phydev = tp->mdio_bus->phy_map[PHY_ADDR];
  1175. /* Attach the MAC to the PHY. */
  1176. phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
  1177. phydev->dev_flags, phydev->interface);
  1178. if (IS_ERR(phydev)) {
  1179. printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
  1180. return PTR_ERR(phydev);
  1181. }
  1182. /* Mask with MAC supported features. */
  1183. switch (phydev->interface) {
  1184. case PHY_INTERFACE_MODE_GMII:
  1185. case PHY_INTERFACE_MODE_RGMII:
  1186. if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
  1187. phydev->supported &= (PHY_GBIT_FEATURES |
  1188. SUPPORTED_Pause |
  1189. SUPPORTED_Asym_Pause);
  1190. break;
  1191. }
  1192. /* fallthru */
  1193. case PHY_INTERFACE_MODE_MII:
  1194. phydev->supported &= (PHY_BASIC_FEATURES |
  1195. SUPPORTED_Pause |
  1196. SUPPORTED_Asym_Pause);
  1197. break;
  1198. default:
  1199. phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
  1200. return -EINVAL;
  1201. }
  1202. tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
  1203. phydev->advertising = phydev->supported;
  1204. return 0;
  1205. }
  1206. static void tg3_phy_start(struct tg3 *tp)
  1207. {
  1208. struct phy_device *phydev;
  1209. if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
  1210. return;
  1211. phydev = tp->mdio_bus->phy_map[PHY_ADDR];
  1212. if (tp->link_config.phy_is_low_power) {
  1213. tp->link_config.phy_is_low_power = 0;
  1214. phydev->speed = tp->link_config.orig_speed;
  1215. phydev->duplex = tp->link_config.orig_duplex;
  1216. phydev->autoneg = tp->link_config.orig_autoneg;
  1217. phydev->advertising = tp->link_config.orig_advertising;
  1218. }
  1219. phy_start(phydev);
  1220. phy_start_aneg(phydev);
  1221. }
  1222. static void tg3_phy_stop(struct tg3 *tp)
  1223. {
  1224. if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
  1225. return;
  1226. phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
  1227. }
  1228. static void tg3_phy_fini(struct tg3 *tp)
  1229. {
  1230. if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
  1231. phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
  1232. tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
  1233. }
  1234. }
  1235. static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
  1236. {
  1237. tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
  1238. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
  1239. }
  1240. static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
  1241. {
  1242. u32 reg;
  1243. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
  1244. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  1245. return;
  1246. reg = MII_TG3_MISC_SHDW_WREN |
  1247. MII_TG3_MISC_SHDW_SCR5_SEL |
  1248. MII_TG3_MISC_SHDW_SCR5_LPED |
  1249. MII_TG3_MISC_SHDW_SCR5_DLPTLM |
  1250. MII_TG3_MISC_SHDW_SCR5_SDTL |
  1251. MII_TG3_MISC_SHDW_SCR5_C125OE;
  1252. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
  1253. reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
  1254. tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
  1255. reg = MII_TG3_MISC_SHDW_WREN |
  1256. MII_TG3_MISC_SHDW_APD_SEL |
  1257. MII_TG3_MISC_SHDW_APD_WKTM_84MS;
  1258. if (enable)
  1259. reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
  1260. tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
  1261. }
  1262. static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
  1263. {
  1264. u32 phy;
  1265. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
  1266. (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
  1267. return;
  1268. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  1269. u32 ephy;
  1270. if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
  1271. tg3_writephy(tp, MII_TG3_EPHY_TEST,
  1272. ephy | MII_TG3_EPHY_SHADOW_EN);
  1273. if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
  1274. if (enable)
  1275. phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
  1276. else
  1277. phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
  1278. tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
  1279. }
  1280. tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
  1281. }
  1282. } else {
  1283. phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
  1284. MII_TG3_AUXCTL_SHDWSEL_MISC;
  1285. if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
  1286. !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
  1287. if (enable)
  1288. phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
  1289. else
  1290. phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
  1291. phy |= MII_TG3_AUXCTL_MISC_WREN;
  1292. tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
  1293. }
  1294. }
  1295. }
  1296. static void tg3_phy_set_wirespeed(struct tg3 *tp)
  1297. {
  1298. u32 val;
  1299. if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
  1300. return;
  1301. if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
  1302. !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
  1303. tg3_writephy(tp, MII_TG3_AUX_CTRL,
  1304. (val | (1 << 15) | (1 << 4)));
  1305. }
  1306. static void tg3_phy_apply_otp(struct tg3 *tp)
  1307. {
  1308. u32 otp, phy;
  1309. if (!tp->phy_otp)
  1310. return;
  1311. otp = tp->phy_otp;
  1312. /* Enable SM_DSP clock and tx 6dB coding. */
  1313. phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
  1314. MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
  1315. MII_TG3_AUXCTL_ACTL_TX_6DB;
  1316. tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
  1317. phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
  1318. phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
  1319. tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
  1320. phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
  1321. ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
  1322. tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
  1323. phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
  1324. phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
  1325. tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
  1326. phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
  1327. tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
  1328. phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
  1329. tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
  1330. phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
  1331. ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
  1332. tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
  1333. /* Turn off SM_DSP clock. */
  1334. phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
  1335. MII_TG3_AUXCTL_ACTL_TX_6DB;
  1336. tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
  1337. }
  1338. static int tg3_wait_macro_done(struct tg3 *tp)
  1339. {
  1340. int limit = 100;
  1341. while (limit--) {
  1342. u32 tmp32;
  1343. if (!tg3_readphy(tp, 0x16, &tmp32)) {
  1344. if ((tmp32 & 0x1000) == 0)
  1345. break;
  1346. }
  1347. }
  1348. if (limit < 0)
  1349. return -EBUSY;
  1350. return 0;
  1351. }
  1352. static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
  1353. {
  1354. static const u32 test_pat[4][6] = {
  1355. { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
  1356. { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
  1357. { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
  1358. { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
  1359. };
  1360. int chan;
  1361. for (chan = 0; chan < 4; chan++) {
  1362. int i;
  1363. tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
  1364. (chan * 0x2000) | 0x0200);
  1365. tg3_writephy(tp, 0x16, 0x0002);
  1366. for (i = 0; i < 6; i++)
  1367. tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
  1368. test_pat[chan][i]);
  1369. tg3_writephy(tp, 0x16, 0x0202);
  1370. if (tg3_wait_macro_done(tp)) {
  1371. *resetp = 1;
  1372. return -EBUSY;
  1373. }
  1374. tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
  1375. (chan * 0x2000) | 0x0200);
  1376. tg3_writephy(tp, 0x16, 0x0082);
  1377. if (tg3_wait_macro_done(tp)) {
  1378. *resetp = 1;
  1379. return -EBUSY;
  1380. }
  1381. tg3_writephy(tp, 0x16, 0x0802);
  1382. if (tg3_wait_macro_done(tp)) {
  1383. *resetp = 1;
  1384. return -EBUSY;
  1385. }
  1386. for (i = 0; i < 6; i += 2) {
  1387. u32 low, high;
  1388. if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
  1389. tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
  1390. tg3_wait_macro_done(tp)) {
  1391. *resetp = 1;
  1392. return -EBUSY;
  1393. }
  1394. low &= 0x7fff;
  1395. high &= 0x000f;
  1396. if (low != test_pat[chan][i] ||
  1397. high != test_pat[chan][i+1]) {
  1398. tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
  1399. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
  1400. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
  1401. return -EBUSY;
  1402. }
  1403. }
  1404. }
  1405. return 0;
  1406. }
  1407. static int tg3_phy_reset_chanpat(struct tg3 *tp)
  1408. {
  1409. int chan;
  1410. for (chan = 0; chan < 4; chan++) {
  1411. int i;
  1412. tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
  1413. (chan * 0x2000) | 0x0200);
  1414. tg3_writephy(tp, 0x16, 0x0002);
  1415. for (i = 0; i < 6; i++)
  1416. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
  1417. tg3_writephy(tp, 0x16, 0x0202);
  1418. if (tg3_wait_macro_done(tp))
  1419. return -EBUSY;
  1420. }
  1421. return 0;
  1422. }
  1423. static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
  1424. {
  1425. u32 reg32, phy9_orig;
  1426. int retries, do_phy_reset, err;
  1427. retries = 10;
  1428. do_phy_reset = 1;
  1429. do {
  1430. if (do_phy_reset) {
  1431. err = tg3_bmcr_reset(tp);
  1432. if (err)
  1433. return err;
  1434. do_phy_reset = 0;
  1435. }
  1436. /* Disable transmitter and interrupt. */
  1437. if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
  1438. continue;
  1439. reg32 |= 0x3000;
  1440. tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
  1441. /* Set full-duplex, 1000 mbps. */
  1442. tg3_writephy(tp, MII_BMCR,
  1443. BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
  1444. /* Set to master mode. */
  1445. if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
  1446. continue;
  1447. tg3_writephy(tp, MII_TG3_CTRL,
  1448. (MII_TG3_CTRL_AS_MASTER |
  1449. MII_TG3_CTRL_ENABLE_AS_MASTER));
  1450. /* Enable SM_DSP_CLOCK and 6dB. */
  1451. tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
  1452. /* Block the PHY control access. */
  1453. tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
  1454. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
  1455. err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
  1456. if (!err)
  1457. break;
  1458. } while (--retries);
  1459. err = tg3_phy_reset_chanpat(tp);
  1460. if (err)
  1461. return err;
  1462. tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
  1463. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
  1464. tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
  1465. tg3_writephy(tp, 0x16, 0x0000);
  1466. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
  1467. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
  1468. /* Set Extended packet length bit for jumbo frames */
  1469. tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
  1470. }
  1471. else {
  1472. tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
  1473. }
  1474. tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
  1475. if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
  1476. reg32 &= ~0x3000;
  1477. tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
  1478. } else if (!err)
  1479. err = -EBUSY;
  1480. return err;
  1481. }
  1482. /* This will reset the tigon3 PHY if there is no valid
  1483. * link unless the FORCE argument is non-zero.
  1484. */
  1485. static int tg3_phy_reset(struct tg3 *tp)
  1486. {
  1487. u32 cpmuctrl;
  1488. u32 phy_status;
  1489. int err;
  1490. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  1491. u32 val;
  1492. val = tr32(GRC_MISC_CFG);
  1493. tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
  1494. udelay(40);
  1495. }
  1496. err = tg3_readphy(tp, MII_BMSR, &phy_status);
  1497. err |= tg3_readphy(tp, MII_BMSR, &phy_status);
  1498. if (err != 0)
  1499. return -EBUSY;
  1500. if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
  1501. netif_carrier_off(tp->dev);
  1502. tg3_link_report(tp);
  1503. }
  1504. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
  1505. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
  1506. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
  1507. err = tg3_phy_reset_5703_4_5(tp);
  1508. if (err)
  1509. return err;
  1510. goto out;
  1511. }
  1512. cpmuctrl = 0;
  1513. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
  1514. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
  1515. cpmuctrl = tr32(TG3_CPMU_CTRL);
  1516. if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
  1517. tw32(TG3_CPMU_CTRL,
  1518. cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
  1519. }
  1520. err = tg3_bmcr_reset(tp);
  1521. if (err)
  1522. return err;
  1523. if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
  1524. u32 phy;
  1525. phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
  1526. tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
  1527. tw32(TG3_CPMU_CTRL, cpmuctrl);
  1528. }
  1529. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
  1530. GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
  1531. u32 val;
  1532. val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
  1533. if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
  1534. CPMU_LSPD_1000MB_MACCLK_12_5) {
  1535. val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
  1536. udelay(40);
  1537. tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
  1538. }
  1539. }
  1540. tg3_phy_apply_otp(tp);
  1541. if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
  1542. tg3_phy_toggle_apd(tp, true);
  1543. else
  1544. tg3_phy_toggle_apd(tp, false);
  1545. out:
  1546. if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
  1547. tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
  1548. tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
  1549. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
  1550. tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
  1551. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
  1552. tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
  1553. }
  1554. if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
  1555. tg3_writephy(tp, 0x1c, 0x8d68);
  1556. tg3_writephy(tp, 0x1c, 0x8d68);
  1557. }
  1558. if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
  1559. tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
  1560. tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
  1561. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
  1562. tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
  1563. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
  1564. tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
  1565. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
  1566. tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
  1567. }
  1568. else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
  1569. tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
  1570. tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
  1571. if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
  1572. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
  1573. tg3_writephy(tp, MII_TG3_TEST1,
  1574. MII_TG3_TEST1_TRIM_EN | 0x4);
  1575. } else
  1576. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
  1577. tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
  1578. }
  1579. /* Set Extended packet length bit (bit 14) on all chips that */
  1580. /* support jumbo frames */
  1581. if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
  1582. /* Cannot do read-modify-write on 5401 */
  1583. tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
  1584. } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
  1585. u32 phy_reg;
  1586. /* Set bit 14 with read-modify-write to preserve other bits */
  1587. if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
  1588. !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
  1589. tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
  1590. }
  1591. /* Set phy register 0x10 bit 0 to high fifo elasticity to support
  1592. * jumbo frames transmission.
  1593. */
  1594. if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
  1595. u32 phy_reg;
  1596. if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
  1597. tg3_writephy(tp, MII_TG3_EXT_CTRL,
  1598. phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
  1599. }
  1600. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  1601. /* adjust output voltage */
  1602. tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
  1603. }
  1604. tg3_phy_toggle_automdix(tp, 1);
  1605. tg3_phy_set_wirespeed(tp);
  1606. return 0;
  1607. }
  1608. static void tg3_frob_aux_power(struct tg3 *tp)
  1609. {
  1610. struct tg3 *tp_peer = tp;
  1611. if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
  1612. return;
  1613. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
  1614. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
  1615. struct net_device *dev_peer;
  1616. dev_peer = pci_get_drvdata(tp->pdev_peer);
  1617. /* remove_one() may have been run on the peer. */
  1618. if (!dev_peer)
  1619. tp_peer = tp;
  1620. else
  1621. tp_peer = netdev_priv(dev_peer);
  1622. }
  1623. if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
  1624. (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
  1625. (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
  1626. (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
  1627. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  1628. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
  1629. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
  1630. (GRC_LCLCTRL_GPIO_OE0 |
  1631. GRC_LCLCTRL_GPIO_OE1 |
  1632. GRC_LCLCTRL_GPIO_OE2 |
  1633. GRC_LCLCTRL_GPIO_OUTPUT0 |
  1634. GRC_LCLCTRL_GPIO_OUTPUT1),
  1635. 100);
  1636. } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
  1637. /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
  1638. u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
  1639. GRC_LCLCTRL_GPIO_OE1 |
  1640. GRC_LCLCTRL_GPIO_OE2 |
  1641. GRC_LCLCTRL_GPIO_OUTPUT0 |
  1642. GRC_LCLCTRL_GPIO_OUTPUT1 |
  1643. tp->grc_local_ctrl;
  1644. tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
  1645. grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
  1646. tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
  1647. grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
  1648. tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
  1649. } else {
  1650. u32 no_gpio2;
  1651. u32 grc_local_ctrl = 0;
  1652. if (tp_peer != tp &&
  1653. (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
  1654. return;
  1655. /* Workaround to prevent overdrawing Amps. */
  1656. if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
  1657. ASIC_REV_5714) {
  1658. grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
  1659. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
  1660. grc_local_ctrl, 100);
  1661. }
  1662. /* On 5753 and variants, GPIO2 cannot be used. */
  1663. no_gpio2 = tp->nic_sram_data_cfg &
  1664. NIC_SRAM_DATA_CFG_NO_GPIO2;
  1665. grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
  1666. GRC_LCLCTRL_GPIO_OE1 |
  1667. GRC_LCLCTRL_GPIO_OE2 |
  1668. GRC_LCLCTRL_GPIO_OUTPUT1 |
  1669. GRC_LCLCTRL_GPIO_OUTPUT2;
  1670. if (no_gpio2) {
  1671. grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
  1672. GRC_LCLCTRL_GPIO_OUTPUT2);
  1673. }
  1674. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
  1675. grc_local_ctrl, 100);
  1676. grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
  1677. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
  1678. grc_local_ctrl, 100);
  1679. if (!no_gpio2) {
  1680. grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
  1681. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
  1682. grc_local_ctrl, 100);
  1683. }
  1684. }
  1685. } else {
  1686. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
  1687. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
  1688. if (tp_peer != tp &&
  1689. (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
  1690. return;
  1691. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
  1692. (GRC_LCLCTRL_GPIO_OE1 |
  1693. GRC_LCLCTRL_GPIO_OUTPUT1), 100);
  1694. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
  1695. GRC_LCLCTRL_GPIO_OE1, 100);
  1696. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
  1697. (GRC_LCLCTRL_GPIO_OE1 |
  1698. GRC_LCLCTRL_GPIO_OUTPUT1), 100);
  1699. }
  1700. }
  1701. }
  1702. static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
  1703. {
  1704. if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
  1705. return 1;
  1706. else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
  1707. if (speed != SPEED_10)
  1708. return 1;
  1709. } else if (speed == SPEED_10)
  1710. return 1;
  1711. return 0;
  1712. }
  1713. static int tg3_setup_phy(struct tg3 *, int);
  1714. #define RESET_KIND_SHUTDOWN 0
  1715. #define RESET_KIND_INIT 1
  1716. #define RESET_KIND_SUSPEND 2
  1717. static void tg3_write_sig_post_reset(struct tg3 *, int);
  1718. static int tg3_halt_cpu(struct tg3 *, u32);
  1719. static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
  1720. {
  1721. u32 val;
  1722. if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
  1723. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
  1724. u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
  1725. u32 serdes_cfg = tr32(MAC_SERDES_CFG);
  1726. sg_dig_ctrl |=
  1727. SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
  1728. tw32(SG_DIG_CTRL, sg_dig_ctrl);
  1729. tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
  1730. }
  1731. return;
  1732. }
  1733. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  1734. tg3_bmcr_reset(tp);
  1735. val = tr32(GRC_MISC_CFG);
  1736. tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
  1737. udelay(40);
  1738. return;
  1739. } else if (do_low_power) {
  1740. tg3_writephy(tp, MII_TG3_EXT_CTRL,
  1741. MII_TG3_EXT_CTRL_FORCE_LED_OFF);
  1742. tg3_writephy(tp, MII_TG3_AUX_CTRL,
  1743. MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
  1744. MII_TG3_AUXCTL_PCTL_100TX_LPWR |
  1745. MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
  1746. MII_TG3_AUXCTL_PCTL_VREG_11V);
  1747. }
  1748. /* The PHY should not be powered down on some chips because
  1749. * of bugs.
  1750. */
  1751. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  1752. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
  1753. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
  1754. (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
  1755. return;
  1756. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
  1757. GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
  1758. val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
  1759. val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
  1760. val |= CPMU_LSPD_1000MB_MACCLK_12_5;
  1761. tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
  1762. }
  1763. tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
  1764. }
  1765. /* tp->lock is held. */
  1766. static int tg3_nvram_lock(struct tg3 *tp)
  1767. {
  1768. if (tp->tg3_flags & TG3_FLAG_NVRAM) {
  1769. int i;
  1770. if (tp->nvram_lock_cnt == 0) {
  1771. tw32(NVRAM_SWARB, SWARB_REQ_SET1);
  1772. for (i = 0; i < 8000; i++) {
  1773. if (tr32(NVRAM_SWARB) & SWARB_GNT1)
  1774. break;
  1775. udelay(20);
  1776. }
  1777. if (i == 8000) {
  1778. tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
  1779. return -ENODEV;
  1780. }
  1781. }
  1782. tp->nvram_lock_cnt++;
  1783. }
  1784. return 0;
  1785. }
  1786. /* tp->lock is held. */
  1787. static void tg3_nvram_unlock(struct tg3 *tp)
  1788. {
  1789. if (tp->tg3_flags & TG3_FLAG_NVRAM) {
  1790. if (tp->nvram_lock_cnt > 0)
  1791. tp->nvram_lock_cnt--;
  1792. if (tp->nvram_lock_cnt == 0)
  1793. tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
  1794. }
  1795. }
  1796. /* tp->lock is held. */
  1797. static void tg3_enable_nvram_access(struct tg3 *tp)
  1798. {
  1799. if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
  1800. !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
  1801. u32 nvaccess = tr32(NVRAM_ACCESS);
  1802. tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
  1803. }
  1804. }
  1805. /* tp->lock is held. */
  1806. static void tg3_disable_nvram_access(struct tg3 *tp)
  1807. {
  1808. if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
  1809. !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
  1810. u32 nvaccess = tr32(NVRAM_ACCESS);
  1811. tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
  1812. }
  1813. }
  1814. static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
  1815. u32 offset, u32 *val)
  1816. {
  1817. u32 tmp;
  1818. int i;
  1819. if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
  1820. return -EINVAL;
  1821. tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
  1822. EEPROM_ADDR_DEVID_MASK |
  1823. EEPROM_ADDR_READ);
  1824. tw32(GRC_EEPROM_ADDR,
  1825. tmp |
  1826. (0 << EEPROM_ADDR_DEVID_SHIFT) |
  1827. ((offset << EEPROM_ADDR_ADDR_SHIFT) &
  1828. EEPROM_ADDR_ADDR_MASK) |
  1829. EEPROM_ADDR_READ | EEPROM_ADDR_START);
  1830. for (i = 0; i < 1000; i++) {
  1831. tmp = tr32(GRC_EEPROM_ADDR);
  1832. if (tmp & EEPROM_ADDR_COMPLETE)
  1833. break;
  1834. msleep(1);
  1835. }
  1836. if (!(tmp & EEPROM_ADDR_COMPLETE))
  1837. return -EBUSY;
  1838. *val = tr32(GRC_EEPROM_DATA);
  1839. return 0;
  1840. }
  1841. #define NVRAM_CMD_TIMEOUT 10000
  1842. static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
  1843. {
  1844. int i;
  1845. tw32(NVRAM_CMD, nvram_cmd);
  1846. for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
  1847. udelay(10);
  1848. if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
  1849. udelay(10);
  1850. break;
  1851. }
  1852. }
  1853. if (i == NVRAM_CMD_TIMEOUT)
  1854. return -EBUSY;
  1855. return 0;
  1856. }
  1857. static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
  1858. {
  1859. if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
  1860. (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
  1861. (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
  1862. !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
  1863. (tp->nvram_jedecnum == JEDEC_ATMEL))
  1864. addr = ((addr / tp->nvram_pagesize) <<
  1865. ATMEL_AT45DB0X1B_PAGE_POS) +
  1866. (addr % tp->nvram_pagesize);
  1867. return addr;
  1868. }
  1869. static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
  1870. {
  1871. if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
  1872. (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
  1873. (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
  1874. !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
  1875. (tp->nvram_jedecnum == JEDEC_ATMEL))
  1876. addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
  1877. tp->nvram_pagesize) +
  1878. (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
  1879. return addr;
  1880. }
  1881. /* NOTE: Data read in from NVRAM is byteswapped according to
  1882. * the byteswapping settings for all other register accesses.
  1883. * tg3 devices are BE devices, so on a BE machine, the data
  1884. * returned will be exactly as it is seen in NVRAM. On a LE
  1885. * machine, the 32-bit value will be byteswapped.
  1886. */
  1887. static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
  1888. {
  1889. int ret;
  1890. if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
  1891. return tg3_nvram_read_using_eeprom(tp, offset, val);
  1892. offset = tg3_nvram_phys_addr(tp, offset);
  1893. if (offset > NVRAM_ADDR_MSK)
  1894. return -EINVAL;
  1895. ret = tg3_nvram_lock(tp);
  1896. if (ret)
  1897. return ret;
  1898. tg3_enable_nvram_access(tp);
  1899. tw32(NVRAM_ADDR, offset);
  1900. ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
  1901. NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
  1902. if (ret == 0)
  1903. *val = tr32(NVRAM_RDDATA);
  1904. tg3_disable_nvram_access(tp);
  1905. tg3_nvram_unlock(tp);
  1906. return ret;
  1907. }
  1908. static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
  1909. {
  1910. int err;
  1911. u32 tmp;
  1912. err = tg3_nvram_read(tp, offset, &tmp);
  1913. *val = swab32(tmp);
  1914. return err;
  1915. }
  1916. static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
  1917. {
  1918. u32 v;
  1919. int res = tg3_nvram_read_swab(tp, offset, &v);
  1920. if (!res)
  1921. *val = cpu_to_le32(v);
  1922. return res;
  1923. }
  1924. /* tp->lock is held. */
  1925. static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
  1926. {
  1927. u32 addr_high, addr_low;
  1928. int i;
  1929. addr_high = ((tp->dev->dev_addr[0] << 8) |
  1930. tp->dev->dev_addr[1]);
  1931. addr_low = ((tp->dev->dev_addr[2] << 24) |
  1932. (tp->dev->dev_addr[3] << 16) |
  1933. (tp->dev->dev_addr[4] << 8) |
  1934. (tp->dev->dev_addr[5] << 0));
  1935. for (i = 0; i < 4; i++) {
  1936. if (i == 1 && skip_mac_1)
  1937. continue;
  1938. tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
  1939. tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
  1940. }
  1941. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
  1942. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
  1943. for (i = 0; i < 12; i++) {
  1944. tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
  1945. tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
  1946. }
  1947. }
  1948. addr_high = (tp->dev->dev_addr[0] +
  1949. tp->dev->dev_addr[1] +
  1950. tp->dev->dev_addr[2] +
  1951. tp->dev->dev_addr[3] +
  1952. tp->dev->dev_addr[4] +
  1953. tp->dev->dev_addr[5]) &
  1954. TX_BACKOFF_SEED_MASK;
  1955. tw32(MAC_TX_BACKOFF_SEED, addr_high);
  1956. }
  1957. static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
  1958. {
  1959. u32 misc_host_ctrl;
  1960. bool device_should_wake, do_low_power;
  1961. /* Make sure register accesses (indirect or otherwise)
  1962. * will function correctly.
  1963. */
  1964. pci_write_config_dword(tp->pdev,
  1965. TG3PCI_MISC_HOST_CTRL,
  1966. tp->misc_host_ctrl);
  1967. switch (state) {
  1968. case PCI_D0:
  1969. pci_enable_wake(tp->pdev, state, false);
  1970. pci_set_power_state(tp->pdev, PCI_D0);
  1971. /* Switch out of Vaux if it is a NIC */
  1972. if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
  1973. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
  1974. return 0;
  1975. case PCI_D1:
  1976. case PCI_D2:
  1977. case PCI_D3hot:
  1978. break;
  1979. default:
  1980. printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
  1981. tp->dev->name, state);
  1982. return -EINVAL;
  1983. }
  1984. /* Restore the CLKREQ setting. */
  1985. if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
  1986. u16 lnkctl;
  1987. pci_read_config_word(tp->pdev,
  1988. tp->pcie_cap + PCI_EXP_LNKCTL,
  1989. &lnkctl);
  1990. lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
  1991. pci_write_config_word(tp->pdev,
  1992. tp->pcie_cap + PCI_EXP_LNKCTL,
  1993. lnkctl);
  1994. }
  1995. misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
  1996. tw32(TG3PCI_MISC_HOST_CTRL,
  1997. misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
  1998. device_should_wake = pci_pme_capable(tp->pdev, state) &&
  1999. device_may_wakeup(&tp->pdev->dev) &&
  2000. (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
  2001. if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
  2002. do_low_power = false;
  2003. if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
  2004. !tp->link_config.phy_is_low_power) {
  2005. struct phy_device *phydev;
  2006. u32 phyid, advertising;
  2007. phydev = tp->mdio_bus->phy_map[PHY_ADDR];
  2008. tp->link_config.phy_is_low_power = 1;
  2009. tp->link_config.orig_speed = phydev->speed;
  2010. tp->link_config.orig_duplex = phydev->duplex;
  2011. tp->link_config.orig_autoneg = phydev->autoneg;
  2012. tp->link_config.orig_advertising = phydev->advertising;
  2013. advertising = ADVERTISED_TP |
  2014. ADVERTISED_Pause |
  2015. ADVERTISED_Autoneg |
  2016. ADVERTISED_10baseT_Half;
  2017. if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
  2018. device_should_wake) {
  2019. if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
  2020. advertising |=
  2021. ADVERTISED_100baseT_Half |
  2022. ADVERTISED_100baseT_Full |
  2023. ADVERTISED_10baseT_Full;
  2024. else
  2025. advertising |= ADVERTISED_10baseT_Full;
  2026. }
  2027. phydev->advertising = advertising;
  2028. phy_start_aneg(phydev);
  2029. phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
  2030. if (phyid != TG3_PHY_ID_BCMAC131) {
  2031. phyid &= TG3_PHY_OUI_MASK;
  2032. if (phyid == TG3_PHY_OUI_1 ||
  2033. phyid == TG3_PHY_OUI_2 ||
  2034. phyid == TG3_PHY_OUI_3)
  2035. do_low_power = true;
  2036. }
  2037. }
  2038. } else {
  2039. do_low_power = true;
  2040. if (tp->link_config.phy_is_low_power == 0) {
  2041. tp->link_config.phy_is_low_power = 1;
  2042. tp->link_config.orig_speed = tp->link_config.speed;
  2043. tp->link_config.orig_duplex = tp->link_config.duplex;
  2044. tp->link_config.orig_autoneg = tp->link_config.autoneg;
  2045. }
  2046. if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
  2047. tp->link_config.speed = SPEED_10;
  2048. tp->link_config.duplex = DUPLEX_HALF;
  2049. tp->link_config.autoneg = AUTONEG_ENABLE;
  2050. tg3_setup_phy(tp, 0);
  2051. }
  2052. }
  2053. __tg3_set_mac_addr(tp, 0);
  2054. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  2055. u32 val;
  2056. val = tr32(GRC_VCPU_EXT_CTRL);
  2057. tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
  2058. } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
  2059. int i;
  2060. u32 val;
  2061. for (i = 0; i < 200; i++) {
  2062. tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
  2063. if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
  2064. break;
  2065. msleep(1);
  2066. }
  2067. }
  2068. if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
  2069. tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
  2070. WOL_DRV_STATE_SHUTDOWN |
  2071. WOL_DRV_WOL |
  2072. WOL_SET_MAGIC_PKT);
  2073. if (device_should_wake) {
  2074. u32 mac_mode;
  2075. if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
  2076. if (do_low_power) {
  2077. tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
  2078. udelay(40);
  2079. }
  2080. if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
  2081. mac_mode = MAC_MODE_PORT_MODE_GMII;
  2082. else
  2083. mac_mode = MAC_MODE_PORT_MODE_MII;
  2084. mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
  2085. if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
  2086. ASIC_REV_5700) {
  2087. u32 speed = (tp->tg3_flags &
  2088. TG3_FLAG_WOL_SPEED_100MB) ?
  2089. SPEED_100 : SPEED_10;
  2090. if (tg3_5700_link_polarity(tp, speed))
  2091. mac_mode |= MAC_MODE_LINK_POLARITY;
  2092. else
  2093. mac_mode &= ~MAC_MODE_LINK_POLARITY;
  2094. }
  2095. } else {
  2096. mac_mode = MAC_MODE_PORT_MODE_TBI;
  2097. }
  2098. if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
  2099. tw32(MAC_LED_CTRL, tp->led_ctrl);
  2100. mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
  2101. if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
  2102. !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
  2103. ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
  2104. (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
  2105. mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
  2106. if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
  2107. mac_mode |= tp->mac_mode &
  2108. (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
  2109. if (mac_mode & MAC_MODE_APE_TX_EN)
  2110. mac_mode |= MAC_MODE_TDE_ENABLE;
  2111. }
  2112. tw32_f(MAC_MODE, mac_mode);
  2113. udelay(100);
  2114. tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
  2115. udelay(10);
  2116. }
  2117. if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
  2118. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  2119. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
  2120. u32 base_val;
  2121. base_val = tp->pci_clock_ctrl;
  2122. base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
  2123. CLOCK_CTRL_TXCLK_DISABLE);
  2124. tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
  2125. CLOCK_CTRL_PWRDOWN_PLL133, 40);
  2126. } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
  2127. (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
  2128. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
  2129. /* do nothing */
  2130. } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
  2131. (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
  2132. u32 newbits1, newbits2;
  2133. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  2134. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
  2135. newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
  2136. CLOCK_CTRL_TXCLK_DISABLE |
  2137. CLOCK_CTRL_ALTCLK);
  2138. newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
  2139. } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
  2140. newbits1 = CLOCK_CTRL_625_CORE;
  2141. newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
  2142. } else {
  2143. newbits1 = CLOCK_CTRL_ALTCLK;
  2144. newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
  2145. }
  2146. tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
  2147. 40);
  2148. tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
  2149. 40);
  2150. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
  2151. u32 newbits3;
  2152. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  2153. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
  2154. newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
  2155. CLOCK_CTRL_TXCLK_DISABLE |
  2156. CLOCK_CTRL_44MHZ_CORE);
  2157. } else {
  2158. newbits3 = CLOCK_CTRL_44MHZ_CORE;
  2159. }
  2160. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  2161. tp->pci_clock_ctrl | newbits3, 40);
  2162. }
  2163. }
  2164. if (!(device_should_wake) &&
  2165. !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
  2166. tg3_power_down_phy(tp, do_low_power);
  2167. tg3_frob_aux_power(tp);
  2168. /* Workaround for unstable PLL clock */
  2169. if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
  2170. (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
  2171. u32 val = tr32(0x7d00);
  2172. val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
  2173. tw32(0x7d00, val);
  2174. if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
  2175. int err;
  2176. err = tg3_nvram_lock(tp);
  2177. tg3_halt_cpu(tp, RX_CPU_BASE);
  2178. if (!err)
  2179. tg3_nvram_unlock(tp);
  2180. }
  2181. }
  2182. tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
  2183. if (device_should_wake)
  2184. pci_enable_wake(tp->pdev, state, true);
  2185. /* Finally, set the new power state. */
  2186. pci_set_power_state(tp->pdev, state);
  2187. return 0;
  2188. }
  2189. static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
  2190. {
  2191. switch (val & MII_TG3_AUX_STAT_SPDMASK) {
  2192. case MII_TG3_AUX_STAT_10HALF:
  2193. *speed = SPEED_10;
  2194. *duplex = DUPLEX_HALF;
  2195. break;
  2196. case MII_TG3_AUX_STAT_10FULL:
  2197. *speed = SPEED_10;
  2198. *duplex = DUPLEX_FULL;
  2199. break;
  2200. case MII_TG3_AUX_STAT_100HALF:
  2201. *speed = SPEED_100;
  2202. *duplex = DUPLEX_HALF;
  2203. break;
  2204. case MII_TG3_AUX_STAT_100FULL:
  2205. *speed = SPEED_100;
  2206. *duplex = DUPLEX_FULL;
  2207. break;
  2208. case MII_TG3_AUX_STAT_1000HALF:
  2209. *speed = SPEED_1000;
  2210. *duplex = DUPLEX_HALF;
  2211. break;
  2212. case MII_TG3_AUX_STAT_1000FULL:
  2213. *speed = SPEED_1000;
  2214. *duplex = DUPLEX_FULL;
  2215. break;
  2216. default:
  2217. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  2218. *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
  2219. SPEED_10;
  2220. *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
  2221. DUPLEX_HALF;
  2222. break;
  2223. }
  2224. *speed = SPEED_INVALID;
  2225. *duplex = DUPLEX_INVALID;
  2226. break;
  2227. }
  2228. }
  2229. static void tg3_phy_copper_begin(struct tg3 *tp)
  2230. {
  2231. u32 new_adv;
  2232. int i;
  2233. if (tp->link_config.phy_is_low_power) {
  2234. /* Entering low power mode. Disable gigabit and
  2235. * 100baseT advertisements.
  2236. */
  2237. tg3_writephy(tp, MII_TG3_CTRL, 0);
  2238. new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
  2239. ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
  2240. if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
  2241. new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
  2242. tg3_writephy(tp, MII_ADVERTISE, new_adv);
  2243. } else if (tp->link_config.speed == SPEED_INVALID) {
  2244. if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
  2245. tp->link_config.advertising &=
  2246. ~(ADVERTISED_1000baseT_Half |
  2247. ADVERTISED_1000baseT_Full);
  2248. new_adv = ADVERTISE_CSMA;
  2249. if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
  2250. new_adv |= ADVERTISE_10HALF;
  2251. if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
  2252. new_adv |= ADVERTISE_10FULL;
  2253. if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
  2254. new_adv |= ADVERTISE_100HALF;
  2255. if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
  2256. new_adv |= ADVERTISE_100FULL;
  2257. new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
  2258. tg3_writephy(tp, MII_ADVERTISE, new_adv);
  2259. if (tp->link_config.advertising &
  2260. (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
  2261. new_adv = 0;
  2262. if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
  2263. new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
  2264. if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
  2265. new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
  2266. if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
  2267. (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
  2268. tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
  2269. new_adv |= (MII_TG3_CTRL_AS_MASTER |
  2270. MII_TG3_CTRL_ENABLE_AS_MASTER);
  2271. tg3_writephy(tp, MII_TG3_CTRL, new_adv);
  2272. } else {
  2273. tg3_writephy(tp, MII_TG3_CTRL, 0);
  2274. }
  2275. } else {
  2276. new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
  2277. new_adv |= ADVERTISE_CSMA;
  2278. /* Asking for a specific link mode. */
  2279. if (tp->link_config.speed == SPEED_1000) {
  2280. tg3_writephy(tp, MII_ADVERTISE, new_adv);
  2281. if (tp->link_config.duplex == DUPLEX_FULL)
  2282. new_adv = MII_TG3_CTRL_ADV_1000_FULL;
  2283. else
  2284. new_adv = MII_TG3_CTRL_ADV_1000_HALF;
  2285. if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
  2286. tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
  2287. new_adv |= (MII_TG3_CTRL_AS_MASTER |
  2288. MII_TG3_CTRL_ENABLE_AS_MASTER);
  2289. } else {
  2290. if (tp->link_config.speed == SPEED_100) {
  2291. if (tp->link_config.duplex == DUPLEX_FULL)
  2292. new_adv |= ADVERTISE_100FULL;
  2293. else
  2294. new_adv |= ADVERTISE_100HALF;
  2295. } else {
  2296. if (tp->link_config.duplex == DUPLEX_FULL)
  2297. new_adv |= ADVERTISE_10FULL;
  2298. else
  2299. new_adv |= ADVERTISE_10HALF;
  2300. }
  2301. tg3_writephy(tp, MII_ADVERTISE, new_adv);
  2302. new_adv = 0;
  2303. }
  2304. tg3_writephy(tp, MII_TG3_CTRL, new_adv);
  2305. }
  2306. if (tp->link_config.autoneg == AUTONEG_DISABLE &&
  2307. tp->link_config.speed != SPEED_INVALID) {
  2308. u32 bmcr, orig_bmcr;
  2309. tp->link_config.active_speed = tp->link_config.speed;
  2310. tp->link_config.active_duplex = tp->link_config.duplex;
  2311. bmcr = 0;
  2312. switch (tp->link_config.speed) {
  2313. default:
  2314. case SPEED_10:
  2315. break;
  2316. case SPEED_100:
  2317. bmcr |= BMCR_SPEED100;
  2318. break;
  2319. case SPEED_1000:
  2320. bmcr |= TG3_BMCR_SPEED1000;
  2321. break;
  2322. }
  2323. if (tp->link_config.duplex == DUPLEX_FULL)
  2324. bmcr |= BMCR_FULLDPLX;
  2325. if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
  2326. (bmcr != orig_bmcr)) {
  2327. tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
  2328. for (i = 0; i < 1500; i++) {
  2329. u32 tmp;
  2330. udelay(10);
  2331. if (tg3_readphy(tp, MII_BMSR, &tmp) ||
  2332. tg3_readphy(tp, MII_BMSR, &tmp))
  2333. continue;
  2334. if (!(tmp & BMSR_LSTATUS)) {
  2335. udelay(40);
  2336. break;
  2337. }
  2338. }
  2339. tg3_writephy(tp, MII_BMCR, bmcr);
  2340. udelay(40);
  2341. }
  2342. } else {
  2343. tg3_writephy(tp, MII_BMCR,
  2344. BMCR_ANENABLE | BMCR_ANRESTART);
  2345. }
  2346. }
  2347. static int tg3_init_5401phy_dsp(struct tg3 *tp)
  2348. {
  2349. int err;
  2350. /* Turn off tap power management. */
  2351. /* Set Extended packet length bit */
  2352. err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
  2353. err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
  2354. err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
  2355. err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
  2356. err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
  2357. err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
  2358. err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
  2359. err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
  2360. err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
  2361. err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
  2362. err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
  2363. udelay(40);
  2364. return err;
  2365. }
  2366. static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
  2367. {
  2368. u32 adv_reg, all_mask = 0;
  2369. if (mask & ADVERTISED_10baseT_Half)
  2370. all_mask |= ADVERTISE_10HALF;
  2371. if (mask & ADVERTISED_10baseT_Full)
  2372. all_mask |= ADVERTISE_10FULL;
  2373. if (mask & ADVERTISED_100baseT_Half)
  2374. all_mask |= ADVERTISE_100HALF;
  2375. if (mask & ADVERTISED_100baseT_Full)
  2376. all_mask |= ADVERTISE_100FULL;
  2377. if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
  2378. return 0;
  2379. if ((adv_reg & all_mask) != all_mask)
  2380. return 0;
  2381. if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
  2382. u32 tg3_ctrl;
  2383. all_mask = 0;
  2384. if (mask & ADVERTISED_1000baseT_Half)
  2385. all_mask |= ADVERTISE_1000HALF;
  2386. if (mask & ADVERTISED_1000baseT_Full)
  2387. all_mask |= ADVERTISE_1000FULL;
  2388. if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
  2389. return 0;
  2390. if ((tg3_ctrl & all_mask) != all_mask)
  2391. return 0;
  2392. }
  2393. return 1;
  2394. }
  2395. static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
  2396. {
  2397. u32 curadv, reqadv;
  2398. if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
  2399. return 1;
  2400. curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  2401. reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
  2402. if (tp->link_config.active_duplex == DUPLEX_FULL) {
  2403. if (curadv != reqadv)
  2404. return 0;
  2405. if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
  2406. tg3_readphy(tp, MII_LPA, rmtadv);
  2407. } else {
  2408. /* Reprogram the advertisement register, even if it
  2409. * does not affect the current link. If the link
  2410. * gets renegotiated in the future, we can save an
  2411. * additional renegotiation cycle by advertising
  2412. * it correctly in the first place.
  2413. */
  2414. if (curadv != reqadv) {
  2415. *lcladv &= ~(ADVERTISE_PAUSE_CAP |
  2416. ADVERTISE_PAUSE_ASYM);
  2417. tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
  2418. }
  2419. }
  2420. return 1;
  2421. }
  2422. static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
  2423. {
  2424. int current_link_up;
  2425. u32 bmsr, dummy;
  2426. u32 lcl_adv, rmt_adv;
  2427. u16 current_speed;
  2428. u8 current_duplex;
  2429. int i, err;
  2430. tw32(MAC_EVENT, 0);
  2431. tw32_f(MAC_STATUS,
  2432. (MAC_STATUS_SYNC_CHANGED |
  2433. MAC_STATUS_CFG_CHANGED |
  2434. MAC_STATUS_MI_COMPLETION |
  2435. MAC_STATUS_LNKSTATE_CHANGED));
  2436. udelay(40);
  2437. if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
  2438. tw32_f(MAC_MI_MODE,
  2439. (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
  2440. udelay(80);
  2441. }
  2442. tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
  2443. /* Some third-party PHYs need to be reset on link going
  2444. * down.
  2445. */
  2446. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
  2447. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
  2448. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
  2449. netif_carrier_ok(tp->dev)) {
  2450. tg3_readphy(tp, MII_BMSR, &bmsr);
  2451. if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
  2452. !(bmsr & BMSR_LSTATUS))
  2453. force_reset = 1;
  2454. }
  2455. if (force_reset)
  2456. tg3_phy_reset(tp);
  2457. if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
  2458. tg3_readphy(tp, MII_BMSR, &bmsr);
  2459. if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
  2460. !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
  2461. bmsr = 0;
  2462. if (!(bmsr & BMSR_LSTATUS)) {
  2463. err = tg3_init_5401phy_dsp(tp);
  2464. if (err)
  2465. return err;
  2466. tg3_readphy(tp, MII_BMSR, &bmsr);
  2467. for (i = 0; i < 1000; i++) {
  2468. udelay(10);
  2469. if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
  2470. (bmsr & BMSR_LSTATUS)) {
  2471. udelay(40);
  2472. break;
  2473. }
  2474. }
  2475. if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
  2476. !(bmsr & BMSR_LSTATUS) &&
  2477. tp->link_config.active_speed == SPEED_1000) {
  2478. err = tg3_phy_reset(tp);
  2479. if (!err)
  2480. err = tg3_init_5401phy_dsp(tp);
  2481. if (err)
  2482. return err;
  2483. }
  2484. }
  2485. } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
  2486. tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
  2487. /* 5701 {A0,B0} CRC bug workaround */
  2488. tg3_writephy(tp, 0x15, 0x0a75);
  2489. tg3_writephy(tp, 0x1c, 0x8c68);
  2490. tg3_writephy(tp, 0x1c, 0x8d68);
  2491. tg3_writephy(tp, 0x1c, 0x8c68);
  2492. }
  2493. /* Clear pending interrupts... */
  2494. tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
  2495. tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
  2496. if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
  2497. tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
  2498. else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
  2499. tg3_writephy(tp, MII_TG3_IMASK, ~0);
  2500. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  2501. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
  2502. if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
  2503. tg3_writephy(tp, MII_TG3_EXT_CTRL,
  2504. MII_TG3_EXT_CTRL_LNK3_LED_MODE);
  2505. else
  2506. tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
  2507. }
  2508. current_link_up = 0;
  2509. current_speed = SPEED_INVALID;
  2510. current_duplex = DUPLEX_INVALID;
  2511. if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
  2512. u32 val;
  2513. tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
  2514. tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
  2515. if (!(val & (1 << 10))) {
  2516. val |= (1 << 10);
  2517. tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
  2518. goto relink;
  2519. }
  2520. }
  2521. bmsr = 0;
  2522. for (i = 0; i < 100; i++) {
  2523. tg3_readphy(tp, MII_BMSR, &bmsr);
  2524. if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
  2525. (bmsr & BMSR_LSTATUS))
  2526. break;
  2527. udelay(40);
  2528. }
  2529. if (bmsr & BMSR_LSTATUS) {
  2530. u32 aux_stat, bmcr;
  2531. tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
  2532. for (i = 0; i < 2000; i++) {
  2533. udelay(10);
  2534. if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
  2535. aux_stat)
  2536. break;
  2537. }
  2538. tg3_aux_stat_to_speed_duplex(tp, aux_stat,
  2539. &current_speed,
  2540. &current_duplex);
  2541. bmcr = 0;
  2542. for (i = 0; i < 200; i++) {
  2543. tg3_readphy(tp, MII_BMCR, &bmcr);
  2544. if (tg3_readphy(tp, MII_BMCR, &bmcr))
  2545. continue;
  2546. if (bmcr && bmcr != 0x7fff)
  2547. break;
  2548. udelay(10);
  2549. }
  2550. lcl_adv = 0;
  2551. rmt_adv = 0;
  2552. tp->link_config.active_speed = current_speed;
  2553. tp->link_config.active_duplex = current_duplex;
  2554. if (tp->link_config.autoneg == AUTONEG_ENABLE) {
  2555. if ((bmcr & BMCR_ANENABLE) &&
  2556. tg3_copper_is_advertising_all(tp,
  2557. tp->link_config.advertising)) {
  2558. if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
  2559. &rmt_adv))
  2560. current_link_up = 1;
  2561. }
  2562. } else {
  2563. if (!(bmcr & BMCR_ANENABLE) &&
  2564. tp->link_config.speed == current_speed &&
  2565. tp->link_config.duplex == current_duplex &&
  2566. tp->link_config.flowctrl ==
  2567. tp->link_config.active_flowctrl) {
  2568. current_link_up = 1;
  2569. }
  2570. }
  2571. if (current_link_up == 1 &&
  2572. tp->link_config.active_duplex == DUPLEX_FULL)
  2573. tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
  2574. }
  2575. relink:
  2576. if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
  2577. u32 tmp;
  2578. tg3_phy_copper_begin(tp);
  2579. tg3_readphy(tp, MII_BMSR, &tmp);
  2580. if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
  2581. (tmp & BMSR_LSTATUS))
  2582. current_link_up = 1;
  2583. }
  2584. tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
  2585. if (current_link_up == 1) {
  2586. if (tp->link_config.active_speed == SPEED_100 ||
  2587. tp->link_config.active_speed == SPEED_10)
  2588. tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
  2589. else
  2590. tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
  2591. } else
  2592. tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
  2593. tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
  2594. if (tp->link_config.active_duplex == DUPLEX_HALF)
  2595. tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
  2596. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
  2597. if (current_link_up == 1 &&
  2598. tg3_5700_link_polarity(tp, tp->link_config.active_speed))
  2599. tp->mac_mode |= MAC_MODE_LINK_POLARITY;
  2600. else
  2601. tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
  2602. }
  2603. /* ??? Without this setting Netgear GA302T PHY does not
  2604. * ??? send/receive packets...
  2605. */
  2606. if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
  2607. tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
  2608. tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
  2609. tw32_f(MAC_MI_MODE, tp->mi_mode);
  2610. udelay(80);
  2611. }
  2612. tw32_f(MAC_MODE, tp->mac_mode);
  2613. udelay(40);
  2614. if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
  2615. /* Polled via timer. */
  2616. tw32_f(MAC_EVENT, 0);
  2617. } else {
  2618. tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
  2619. }
  2620. udelay(40);
  2621. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
  2622. current_link_up == 1 &&
  2623. tp->link_config.active_speed == SPEED_1000 &&
  2624. ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
  2625. (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
  2626. udelay(120);
  2627. tw32_f(MAC_STATUS,
  2628. (MAC_STATUS_SYNC_CHANGED |
  2629. MAC_STATUS_CFG_CHANGED));
  2630. udelay(40);
  2631. tg3_write_mem(tp,
  2632. NIC_SRAM_FIRMWARE_MBOX,
  2633. NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
  2634. }
  2635. /* Prevent send BD corruption. */
  2636. if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
  2637. u16 oldlnkctl, newlnkctl;
  2638. pci_read_config_word(tp->pdev,
  2639. tp->pcie_cap + PCI_EXP_LNKCTL,
  2640. &oldlnkctl);
  2641. if (tp->link_config.active_speed == SPEED_100 ||
  2642. tp->link_config.active_speed == SPEED_10)
  2643. newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
  2644. else
  2645. newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
  2646. if (newlnkctl != oldlnkctl)
  2647. pci_write_config_word(tp->pdev,
  2648. tp->pcie_cap + PCI_EXP_LNKCTL,
  2649. newlnkctl);
  2650. }
  2651. if (current_link_up != netif_carrier_ok(tp->dev)) {
  2652. if (current_link_up)
  2653. netif_carrier_on(tp->dev);
  2654. else
  2655. netif_carrier_off(tp->dev);
  2656. tg3_link_report(tp);
  2657. }
  2658. return 0;
  2659. }
  2660. struct tg3_fiber_aneginfo {
  2661. int state;
  2662. #define ANEG_STATE_UNKNOWN 0
  2663. #define ANEG_STATE_AN_ENABLE 1
  2664. #define ANEG_STATE_RESTART_INIT 2
  2665. #define ANEG_STATE_RESTART 3
  2666. #define ANEG_STATE_DISABLE_LINK_OK 4
  2667. #define ANEG_STATE_ABILITY_DETECT_INIT 5
  2668. #define ANEG_STATE_ABILITY_DETECT 6
  2669. #define ANEG_STATE_ACK_DETECT_INIT 7
  2670. #define ANEG_STATE_ACK_DETECT 8
  2671. #define ANEG_STATE_COMPLETE_ACK_INIT 9
  2672. #define ANEG_STATE_COMPLETE_ACK 10
  2673. #define ANEG_STATE_IDLE_DETECT_INIT 11
  2674. #define ANEG_STATE_IDLE_DETECT 12
  2675. #define ANEG_STATE_LINK_OK 13
  2676. #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
  2677. #define ANEG_STATE_NEXT_PAGE_WAIT 15
  2678. u32 flags;
  2679. #define MR_AN_ENABLE 0x00000001
  2680. #define MR_RESTART_AN 0x00000002
  2681. #define MR_AN_COMPLETE 0x00000004
  2682. #define MR_PAGE_RX 0x00000008
  2683. #define MR_NP_LOADED 0x00000010
  2684. #define MR_TOGGLE_TX 0x00000020
  2685. #define MR_LP_ADV_FULL_DUPLEX 0x00000040
  2686. #define MR_LP_ADV_HALF_DUPLEX 0x00000080
  2687. #define MR_LP_ADV_SYM_PAUSE 0x00000100
  2688. #define MR_LP_ADV_ASYM_PAUSE 0x00000200
  2689. #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
  2690. #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
  2691. #define MR_LP_ADV_NEXT_PAGE 0x00001000
  2692. #define MR_TOGGLE_RX 0x00002000
  2693. #define MR_NP_RX 0x00004000
  2694. #define MR_LINK_OK 0x80000000
  2695. unsigned long link_time, cur_time;
  2696. u32 ability_match_cfg;
  2697. int ability_match_count;
  2698. char ability_match, idle_match, ack_match;
  2699. u32 txconfig, rxconfig;
  2700. #define ANEG_CFG_NP 0x00000080
  2701. #define ANEG_CFG_ACK 0x00000040
  2702. #define ANEG_CFG_RF2 0x00000020
  2703. #define ANEG_CFG_RF1 0x00000010
  2704. #define ANEG_CFG_PS2 0x00000001
  2705. #define ANEG_CFG_PS1 0x00008000
  2706. #define ANEG_CFG_HD 0x00004000
  2707. #define ANEG_CFG_FD 0x00002000
  2708. #define ANEG_CFG_INVAL 0x00001f06
  2709. };
  2710. #define ANEG_OK 0
  2711. #define ANEG_DONE 1
  2712. #define ANEG_TIMER_ENAB 2
  2713. #define ANEG_FAILED -1
  2714. #define ANEG_STATE_SETTLE_TIME 10000
  2715. static int tg3_fiber_aneg_smachine(struct tg3 *tp,
  2716. struct tg3_fiber_aneginfo *ap)
  2717. {
  2718. u16 flowctrl;
  2719. unsigned long delta;
  2720. u32 rx_cfg_reg;
  2721. int ret;
  2722. if (ap->state == ANEG_STATE_UNKNOWN) {
  2723. ap->rxconfig = 0;
  2724. ap->link_time = 0;
  2725. ap->cur_time = 0;
  2726. ap->ability_match_cfg = 0;
  2727. ap->ability_match_count = 0;
  2728. ap->ability_match = 0;
  2729. ap->idle_match = 0;
  2730. ap->ack_match = 0;
  2731. }
  2732. ap->cur_time++;
  2733. if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
  2734. rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
  2735. if (rx_cfg_reg != ap->ability_match_cfg) {
  2736. ap->ability_match_cfg = rx_cfg_reg;
  2737. ap->ability_match = 0;
  2738. ap->ability_match_count = 0;
  2739. } else {
  2740. if (++ap->ability_match_count > 1) {
  2741. ap->ability_match = 1;
  2742. ap->ability_match_cfg = rx_cfg_reg;
  2743. }
  2744. }
  2745. if (rx_cfg_reg & ANEG_CFG_ACK)
  2746. ap->ack_match = 1;
  2747. else
  2748. ap->ack_match = 0;
  2749. ap->idle_match = 0;
  2750. } else {
  2751. ap->idle_match = 1;
  2752. ap->ability_match_cfg = 0;
  2753. ap->ability_match_count = 0;
  2754. ap->ability_match = 0;
  2755. ap->ack_match = 0;
  2756. rx_cfg_reg = 0;
  2757. }
  2758. ap->rxconfig = rx_cfg_reg;
  2759. ret = ANEG_OK;
  2760. switch(ap->state) {
  2761. case ANEG_STATE_UNKNOWN:
  2762. if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
  2763. ap->state = ANEG_STATE_AN_ENABLE;
  2764. /* fallthru */
  2765. case ANEG_STATE_AN_ENABLE:
  2766. ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
  2767. if (ap->flags & MR_AN_ENABLE) {
  2768. ap->link_time = 0;
  2769. ap->cur_time = 0;
  2770. ap->ability_match_cfg = 0;
  2771. ap->ability_match_count = 0;
  2772. ap->ability_match = 0;
  2773. ap->idle_match = 0;
  2774. ap->ack_match = 0;
  2775. ap->state = ANEG_STATE_RESTART_INIT;
  2776. } else {
  2777. ap->state = ANEG_STATE_DISABLE_LINK_OK;
  2778. }
  2779. break;
  2780. case ANEG_STATE_RESTART_INIT:
  2781. ap->link_time = ap->cur_time;
  2782. ap->flags &= ~(MR_NP_LOADED);
  2783. ap->txconfig = 0;
  2784. tw32(MAC_TX_AUTO_NEG, 0);
  2785. tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
  2786. tw32_f(MAC_MODE, tp->mac_mode);
  2787. udelay(40);
  2788. ret = ANEG_TIMER_ENAB;
  2789. ap->state = ANEG_STATE_RESTART;
  2790. /* fallthru */
  2791. case ANEG_STATE_RESTART:
  2792. delta = ap->cur_time - ap->link_time;
  2793. if (delta > ANEG_STATE_SETTLE_TIME) {
  2794. ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
  2795. } else {
  2796. ret = ANEG_TIMER_ENAB;
  2797. }
  2798. break;
  2799. case ANEG_STATE_DISABLE_LINK_OK:
  2800. ret = ANEG_DONE;
  2801. break;
  2802. case ANEG_STATE_ABILITY_DETECT_INIT:
  2803. ap->flags &= ~(MR_TOGGLE_TX);
  2804. ap->txconfig = ANEG_CFG_FD;
  2805. flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
  2806. if (flowctrl & ADVERTISE_1000XPAUSE)
  2807. ap->txconfig |= ANEG_CFG_PS1;
  2808. if (flowctrl & ADVERTISE_1000XPSE_ASYM)
  2809. ap->txconfig |= ANEG_CFG_PS2;
  2810. tw32(MAC_TX_AUTO_NEG, ap->txconfig);
  2811. tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
  2812. tw32_f(MAC_MODE, tp->mac_mode);
  2813. udelay(40);
  2814. ap->state = ANEG_STATE_ABILITY_DETECT;
  2815. break;
  2816. case ANEG_STATE_ABILITY_DETECT:
  2817. if (ap->ability_match != 0 && ap->rxconfig != 0) {
  2818. ap->state = ANEG_STATE_ACK_DETECT_INIT;
  2819. }
  2820. break;
  2821. case ANEG_STATE_ACK_DETECT_INIT:
  2822. ap->txconfig |= ANEG_CFG_ACK;
  2823. tw32(MAC_TX_AUTO_NEG, ap->txconfig);
  2824. tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
  2825. tw32_f(MAC_MODE, tp->mac_mode);
  2826. udelay(40);
  2827. ap->state = ANEG_STATE_ACK_DETECT;
  2828. /* fallthru */
  2829. case ANEG_STATE_ACK_DETECT:
  2830. if (ap->ack_match != 0) {
  2831. if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
  2832. (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
  2833. ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
  2834. } else {
  2835. ap->state = ANEG_STATE_AN_ENABLE;
  2836. }
  2837. } else if (ap->ability_match != 0 &&
  2838. ap->rxconfig == 0) {
  2839. ap->state = ANEG_STATE_AN_ENABLE;
  2840. }
  2841. break;
  2842. case ANEG_STATE_COMPLETE_ACK_INIT:
  2843. if (ap->rxconfig & ANEG_CFG_INVAL) {
  2844. ret = ANEG_FAILED;
  2845. break;
  2846. }
  2847. ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
  2848. MR_LP_ADV_HALF_DUPLEX |
  2849. MR_LP_ADV_SYM_PAUSE |
  2850. MR_LP_ADV_ASYM_PAUSE |
  2851. MR_LP_ADV_REMOTE_FAULT1 |
  2852. MR_LP_ADV_REMOTE_FAULT2 |
  2853. MR_LP_ADV_NEXT_PAGE |
  2854. MR_TOGGLE_RX |
  2855. MR_NP_RX);
  2856. if (ap->rxconfig & ANEG_CFG_FD)
  2857. ap->flags |= MR_LP_ADV_FULL_DUPLEX;
  2858. if (ap->rxconfig & ANEG_CFG_HD)
  2859. ap->flags |= MR_LP_ADV_HALF_DUPLEX;
  2860. if (ap->rxconfig & ANEG_CFG_PS1)
  2861. ap->flags |= MR_LP_ADV_SYM_PAUSE;
  2862. if (ap->rxconfig & ANEG_CFG_PS2)
  2863. ap->flags |= MR_LP_ADV_ASYM_PAUSE;
  2864. if (ap->rxconfig & ANEG_CFG_RF1)
  2865. ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
  2866. if (ap->rxconfig & ANEG_CFG_RF2)
  2867. ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
  2868. if (ap->rxconfig & ANEG_CFG_NP)
  2869. ap->flags |= MR_LP_ADV_NEXT_PAGE;
  2870. ap->link_time = ap->cur_time;
  2871. ap->flags ^= (MR_TOGGLE_TX);
  2872. if (ap->rxconfig & 0x0008)
  2873. ap->flags |= MR_TOGGLE_RX;
  2874. if (ap->rxconfig & ANEG_CFG_NP)
  2875. ap->flags |= MR_NP_RX;
  2876. ap->flags |= MR_PAGE_RX;
  2877. ap->state = ANEG_STATE_COMPLETE_ACK;
  2878. ret = ANEG_TIMER_ENAB;
  2879. break;
  2880. case ANEG_STATE_COMPLETE_ACK:
  2881. if (ap->ability_match != 0 &&
  2882. ap->rxconfig == 0) {
  2883. ap->state = ANEG_STATE_AN_ENABLE;
  2884. break;
  2885. }
  2886. delta = ap->cur_time - ap->link_time;
  2887. if (delta > ANEG_STATE_SETTLE_TIME) {
  2888. if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
  2889. ap->state = ANEG_STATE_IDLE_DETECT_INIT;
  2890. } else {
  2891. if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
  2892. !(ap->flags & MR_NP_RX)) {
  2893. ap->state = ANEG_STATE_IDLE_DETECT_INIT;
  2894. } else {
  2895. ret = ANEG_FAILED;
  2896. }
  2897. }
  2898. }
  2899. break;
  2900. case ANEG_STATE_IDLE_DETECT_INIT:
  2901. ap->link_time = ap->cur_time;
  2902. tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
  2903. tw32_f(MAC_MODE, tp->mac_mode);
  2904. udelay(40);
  2905. ap->state = ANEG_STATE_IDLE_DETECT;
  2906. ret = ANEG_TIMER_ENAB;
  2907. break;
  2908. case ANEG_STATE_IDLE_DETECT:
  2909. if (ap->ability_match != 0 &&
  2910. ap->rxconfig == 0) {
  2911. ap->state = ANEG_STATE_AN_ENABLE;
  2912. break;
  2913. }
  2914. delta = ap->cur_time - ap->link_time;
  2915. if (delta > ANEG_STATE_SETTLE_TIME) {
  2916. /* XXX another gem from the Broadcom driver :( */
  2917. ap->state = ANEG_STATE_LINK_OK;
  2918. }
  2919. break;
  2920. case ANEG_STATE_LINK_OK:
  2921. ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
  2922. ret = ANEG_DONE;
  2923. break;
  2924. case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
  2925. /* ??? unimplemented */
  2926. break;
  2927. case ANEG_STATE_NEXT_PAGE_WAIT:
  2928. /* ??? unimplemented */
  2929. break;
  2930. default:
  2931. ret = ANEG_FAILED;
  2932. break;
  2933. }
  2934. return ret;
  2935. }
  2936. static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
  2937. {
  2938. int res = 0;
  2939. struct tg3_fiber_aneginfo aninfo;
  2940. int status = ANEG_FAILED;
  2941. unsigned int tick;
  2942. u32 tmp;
  2943. tw32_f(MAC_TX_AUTO_NEG, 0);
  2944. tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
  2945. tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
  2946. udelay(40);
  2947. tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
  2948. udelay(40);
  2949. memset(&aninfo, 0, sizeof(aninfo));
  2950. aninfo.flags |= MR_AN_ENABLE;
  2951. aninfo.state = ANEG_STATE_UNKNOWN;
  2952. aninfo.cur_time = 0;
  2953. tick = 0;
  2954. while (++tick < 195000) {
  2955. status = tg3_fiber_aneg_smachine(tp, &aninfo);
  2956. if (status == ANEG_DONE || status == ANEG_FAILED)
  2957. break;
  2958. udelay(1);
  2959. }
  2960. tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
  2961. tw32_f(MAC_MODE, tp->mac_mode);
  2962. udelay(40);
  2963. *txflags = aninfo.txconfig;
  2964. *rxflags = aninfo.flags;
  2965. if (status == ANEG_DONE &&
  2966. (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
  2967. MR_LP_ADV_FULL_DUPLEX)))
  2968. res = 1;
  2969. return res;
  2970. }
  2971. static void tg3_init_bcm8002(struct tg3 *tp)
  2972. {
  2973. u32 mac_status = tr32(MAC_STATUS);
  2974. int i;
  2975. /* Reset when initting first time or we have a link. */
  2976. if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
  2977. !(mac_status & MAC_STATUS_PCS_SYNCED))
  2978. return;
  2979. /* Set PLL lock range. */
  2980. tg3_writephy(tp, 0x16, 0x8007);
  2981. /* SW reset */
  2982. tg3_writephy(tp, MII_BMCR, BMCR_RESET);
  2983. /* Wait for reset to complete. */
  2984. /* XXX schedule_timeout() ... */
  2985. for (i = 0; i < 500; i++)
  2986. udelay(10);
  2987. /* Config mode; select PMA/Ch 1 regs. */
  2988. tg3_writephy(tp, 0x10, 0x8411);
  2989. /* Enable auto-lock and comdet, select txclk for tx. */
  2990. tg3_writephy(tp, 0x11, 0x0a10);
  2991. tg3_writephy(tp, 0x18, 0x00a0);
  2992. tg3_writephy(tp, 0x16, 0x41ff);
  2993. /* Assert and deassert POR. */
  2994. tg3_writephy(tp, 0x13, 0x0400);
  2995. udelay(40);
  2996. tg3_writephy(tp, 0x13, 0x0000);
  2997. tg3_writephy(tp, 0x11, 0x0a50);
  2998. udelay(40);
  2999. tg3_writephy(tp, 0x11, 0x0a10);
  3000. /* Wait for signal to stabilize */
  3001. /* XXX schedule_timeout() ... */
  3002. for (i = 0; i < 15000; i++)
  3003. udelay(10);
  3004. /* Deselect the channel register so we can read the PHYID
  3005. * later.
  3006. */
  3007. tg3_writephy(tp, 0x10, 0x8011);
  3008. }
  3009. static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
  3010. {
  3011. u16 flowctrl;
  3012. u32 sg_dig_ctrl, sg_dig_status;
  3013. u32 serdes_cfg, expected_sg_dig_ctrl;
  3014. int workaround, port_a;
  3015. int current_link_up;
  3016. serdes_cfg = 0;
  3017. expected_sg_dig_ctrl = 0;
  3018. workaround = 0;
  3019. port_a = 1;
  3020. current_link_up = 0;
  3021. if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
  3022. tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
  3023. workaround = 1;
  3024. if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
  3025. port_a = 0;
  3026. /* preserve bits 0-11,13,14 for signal pre-emphasis */
  3027. /* preserve bits 20-23 for voltage regulator */
  3028. serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
  3029. }
  3030. sg_dig_ctrl = tr32(SG_DIG_CTRL);
  3031. if (tp->link_config.autoneg != AUTONEG_ENABLE) {
  3032. if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
  3033. if (workaround) {
  3034. u32 val = serdes_cfg;
  3035. if (port_a)
  3036. val |= 0xc010000;
  3037. else
  3038. val |= 0x4010000;
  3039. tw32_f(MAC_SERDES_CFG, val);
  3040. }
  3041. tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
  3042. }
  3043. if (mac_status & MAC_STATUS_PCS_SYNCED) {
  3044. tg3_setup_flow_control(tp, 0, 0);
  3045. current_link_up = 1;
  3046. }
  3047. goto out;
  3048. }
  3049. /* Want auto-negotiation. */
  3050. expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
  3051. flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
  3052. if (flowctrl & ADVERTISE_1000XPAUSE)
  3053. expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
  3054. if (flowctrl & ADVERTISE_1000XPSE_ASYM)
  3055. expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
  3056. if (sg_dig_ctrl != expected_sg_dig_ctrl) {
  3057. if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
  3058. tp->serdes_counter &&
  3059. ((mac_status & (MAC_STATUS_PCS_SYNCED |
  3060. MAC_STATUS_RCVD_CFG)) ==
  3061. MAC_STATUS_PCS_SYNCED)) {
  3062. tp->serdes_counter--;
  3063. current_link_up = 1;
  3064. goto out;
  3065. }
  3066. restart_autoneg:
  3067. if (workaround)
  3068. tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
  3069. tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
  3070. udelay(5);
  3071. tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
  3072. tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
  3073. tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
  3074. } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
  3075. MAC_STATUS_SIGNAL_DET)) {
  3076. sg_dig_status = tr32(SG_DIG_STATUS);
  3077. mac_status = tr32(MAC_STATUS);
  3078. if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
  3079. (mac_status & MAC_STATUS_PCS_SYNCED)) {
  3080. u32 local_adv = 0, remote_adv = 0;
  3081. if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
  3082. local_adv |= ADVERTISE_1000XPAUSE;
  3083. if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
  3084. local_adv |= ADVERTISE_1000XPSE_ASYM;
  3085. if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
  3086. remote_adv |= LPA_1000XPAUSE;
  3087. if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
  3088. remote_adv |= LPA_1000XPAUSE_ASYM;
  3089. tg3_setup_flow_control(tp, local_adv, remote_adv);
  3090. current_link_up = 1;
  3091. tp->serdes_counter = 0;
  3092. tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
  3093. } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
  3094. if (tp->serdes_counter)
  3095. tp->serdes_counter--;
  3096. else {
  3097. if (workaround) {
  3098. u32 val = serdes_cfg;
  3099. if (port_a)
  3100. val |= 0xc010000;
  3101. else
  3102. val |= 0x4010000;
  3103. tw32_f(MAC_SERDES_CFG, val);
  3104. }
  3105. tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
  3106. udelay(40);
  3107. /* Link parallel detection - link is up */
  3108. /* only if we have PCS_SYNC and not */
  3109. /* receiving config code words */
  3110. mac_status = tr32(MAC_STATUS);
  3111. if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
  3112. !(mac_status & MAC_STATUS_RCVD_CFG)) {
  3113. tg3_setup_flow_control(tp, 0, 0);
  3114. current_link_up = 1;
  3115. tp->tg3_flags2 |=
  3116. TG3_FLG2_PARALLEL_DETECT;
  3117. tp->serdes_counter =
  3118. SERDES_PARALLEL_DET_TIMEOUT;
  3119. } else
  3120. goto restart_autoneg;
  3121. }
  3122. }
  3123. } else {
  3124. tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
  3125. tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
  3126. }
  3127. out:
  3128. return current_link_up;
  3129. }
  3130. static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
  3131. {
  3132. int current_link_up = 0;
  3133. if (!(mac_status & MAC_STATUS_PCS_SYNCED))
  3134. goto out;
  3135. if (tp->link_config.autoneg == AUTONEG_ENABLE) {
  3136. u32 txflags, rxflags;
  3137. int i;
  3138. if (fiber_autoneg(tp, &txflags, &rxflags)) {
  3139. u32 local_adv = 0, remote_adv = 0;
  3140. if (txflags & ANEG_CFG_PS1)
  3141. local_adv |= ADVERTISE_1000XPAUSE;
  3142. if (txflags & ANEG_CFG_PS2)
  3143. local_adv |= ADVERTISE_1000XPSE_ASYM;
  3144. if (rxflags & MR_LP_ADV_SYM_PAUSE)
  3145. remote_adv |= LPA_1000XPAUSE;
  3146. if (rxflags & MR_LP_ADV_ASYM_PAUSE)
  3147. remote_adv |= LPA_1000XPAUSE_ASYM;
  3148. tg3_setup_flow_control(tp, local_adv, remote_adv);
  3149. current_link_up = 1;
  3150. }
  3151. for (i = 0; i < 30; i++) {
  3152. udelay(20);
  3153. tw32_f(MAC_STATUS,
  3154. (MAC_STATUS_SYNC_CHANGED |
  3155. MAC_STATUS_CFG_CHANGED));
  3156. udelay(40);
  3157. if ((tr32(MAC_STATUS) &
  3158. (MAC_STATUS_SYNC_CHANGED |
  3159. MAC_STATUS_CFG_CHANGED)) == 0)
  3160. break;
  3161. }
  3162. mac_status = tr32(MAC_STATUS);
  3163. if (current_link_up == 0 &&
  3164. (mac_status & MAC_STATUS_PCS_SYNCED) &&
  3165. !(mac_status & MAC_STATUS_RCVD_CFG))
  3166. current_link_up = 1;
  3167. } else {
  3168. tg3_setup_flow_control(tp, 0, 0);
  3169. /* Forcing 1000FD link up. */
  3170. current_link_up = 1;
  3171. tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
  3172. udelay(40);
  3173. tw32_f(MAC_MODE, tp->mac_mode);
  3174. udelay(40);
  3175. }
  3176. out:
  3177. return current_link_up;
  3178. }
  3179. static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
  3180. {
  3181. u32 orig_pause_cfg;
  3182. u16 orig_active_speed;
  3183. u8 orig_active_duplex;
  3184. u32 mac_status;
  3185. int current_link_up;
  3186. int i;
  3187. orig_pause_cfg = tp->link_config.active_flowctrl;
  3188. orig_active_speed = tp->link_config.active_speed;
  3189. orig_active_duplex = tp->link_config.active_duplex;
  3190. if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
  3191. netif_carrier_ok(tp->dev) &&
  3192. (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
  3193. mac_status = tr32(MAC_STATUS);
  3194. mac_status &= (MAC_STATUS_PCS_SYNCED |
  3195. MAC_STATUS_SIGNAL_DET |
  3196. MAC_STATUS_CFG_CHANGED |
  3197. MAC_STATUS_RCVD_CFG);
  3198. if (mac_status == (MAC_STATUS_PCS_SYNCED |
  3199. MAC_STATUS_SIGNAL_DET)) {
  3200. tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
  3201. MAC_STATUS_CFG_CHANGED));
  3202. return 0;
  3203. }
  3204. }
  3205. tw32_f(MAC_TX_AUTO_NEG, 0);
  3206. tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
  3207. tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
  3208. tw32_f(MAC_MODE, tp->mac_mode);
  3209. udelay(40);
  3210. if (tp->phy_id == PHY_ID_BCM8002)
  3211. tg3_init_bcm8002(tp);
  3212. /* Enable link change event even when serdes polling. */
  3213. tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
  3214. udelay(40);
  3215. current_link_up = 0;
  3216. mac_status = tr32(MAC_STATUS);
  3217. if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
  3218. current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
  3219. else
  3220. current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
  3221. tp->hw_status->status =
  3222. (SD_STATUS_UPDATED |
  3223. (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
  3224. for (i = 0; i < 100; i++) {
  3225. tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
  3226. MAC_STATUS_CFG_CHANGED));
  3227. udelay(5);
  3228. if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
  3229. MAC_STATUS_CFG_CHANGED |
  3230. MAC_STATUS_LNKSTATE_CHANGED)) == 0)
  3231. break;
  3232. }
  3233. mac_status = tr32(MAC_STATUS);
  3234. if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
  3235. current_link_up = 0;
  3236. if (tp->link_config.autoneg == AUTONEG_ENABLE &&
  3237. tp->serdes_counter == 0) {
  3238. tw32_f(MAC_MODE, (tp->mac_mode |
  3239. MAC_MODE_SEND_CONFIGS));
  3240. udelay(1);
  3241. tw32_f(MAC_MODE, tp->mac_mode);
  3242. }
  3243. }
  3244. if (current_link_up == 1) {
  3245. tp->link_config.active_speed = SPEED_1000;
  3246. tp->link_config.active_duplex = DUPLEX_FULL;
  3247. tw32(MAC_LED_CTRL, (tp->led_ctrl |
  3248. LED_CTRL_LNKLED_OVERRIDE |
  3249. LED_CTRL_1000MBPS_ON));
  3250. } else {
  3251. tp->link_config.active_speed = SPEED_INVALID;
  3252. tp->link_config.active_duplex = DUPLEX_INVALID;
  3253. tw32(MAC_LED_CTRL, (tp->led_ctrl |
  3254. LED_CTRL_LNKLED_OVERRIDE |
  3255. LED_CTRL_TRAFFIC_OVERRIDE));
  3256. }
  3257. if (current_link_up != netif_carrier_ok(tp->dev)) {
  3258. if (current_link_up)
  3259. netif_carrier_on(tp->dev);
  3260. else
  3261. netif_carrier_off(tp->dev);
  3262. tg3_link_report(tp);
  3263. } else {
  3264. u32 now_pause_cfg = tp->link_config.active_flowctrl;
  3265. if (orig_pause_cfg != now_pause_cfg ||
  3266. orig_active_speed != tp->link_config.active_speed ||
  3267. orig_active_duplex != tp->link_config.active_duplex)
  3268. tg3_link_report(tp);
  3269. }
  3270. return 0;
  3271. }
  3272. static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
  3273. {
  3274. int current_link_up, err = 0;
  3275. u32 bmsr, bmcr;
  3276. u16 current_speed;
  3277. u8 current_duplex;
  3278. u32 local_adv, remote_adv;
  3279. tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
  3280. tw32_f(MAC_MODE, tp->mac_mode);
  3281. udelay(40);
  3282. tw32(MAC_EVENT, 0);
  3283. tw32_f(MAC_STATUS,
  3284. (MAC_STATUS_SYNC_CHANGED |
  3285. MAC_STATUS_CFG_CHANGED |
  3286. MAC_STATUS_MI_COMPLETION |
  3287. MAC_STATUS_LNKSTATE_CHANGED));
  3288. udelay(40);
  3289. if (force_reset)
  3290. tg3_phy_reset(tp);
  3291. current_link_up = 0;
  3292. current_speed = SPEED_INVALID;
  3293. current_duplex = DUPLEX_INVALID;
  3294. err |= tg3_readphy(tp, MII_BMSR, &bmsr);
  3295. err |= tg3_readphy(tp, MII_BMSR, &bmsr);
  3296. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
  3297. if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
  3298. bmsr |= BMSR_LSTATUS;
  3299. else
  3300. bmsr &= ~BMSR_LSTATUS;
  3301. }
  3302. err |= tg3_readphy(tp, MII_BMCR, &bmcr);
  3303. if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
  3304. (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
  3305. /* do nothing, just check for link up at the end */
  3306. } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
  3307. u32 adv, new_adv;
  3308. err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
  3309. new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
  3310. ADVERTISE_1000XPAUSE |
  3311. ADVERTISE_1000XPSE_ASYM |
  3312. ADVERTISE_SLCT);
  3313. new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
  3314. if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
  3315. new_adv |= ADVERTISE_1000XHALF;
  3316. if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
  3317. new_adv |= ADVERTISE_1000XFULL;
  3318. if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
  3319. tg3_writephy(tp, MII_ADVERTISE, new_adv);
  3320. bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
  3321. tg3_writephy(tp, MII_BMCR, bmcr);
  3322. tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
  3323. tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
  3324. tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
  3325. return err;
  3326. }
  3327. } else {
  3328. u32 new_bmcr;
  3329. bmcr &= ~BMCR_SPEED1000;
  3330. new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
  3331. if (tp->link_config.duplex == DUPLEX_FULL)
  3332. new_bmcr |= BMCR_FULLDPLX;
  3333. if (new_bmcr != bmcr) {
  3334. /* BMCR_SPEED1000 is a reserved bit that needs
  3335. * to be set on write.
  3336. */
  3337. new_bmcr |= BMCR_SPEED1000;
  3338. /* Force a linkdown */
  3339. if (netif_carrier_ok(tp->dev)) {
  3340. u32 adv;
  3341. err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
  3342. adv &= ~(ADVERTISE_1000XFULL |
  3343. ADVERTISE_1000XHALF |
  3344. ADVERTISE_SLCT);
  3345. tg3_writephy(tp, MII_ADVERTISE, adv);
  3346. tg3_writephy(tp, MII_BMCR, bmcr |
  3347. BMCR_ANRESTART |
  3348. BMCR_ANENABLE);
  3349. udelay(10);
  3350. netif_carrier_off(tp->dev);
  3351. }
  3352. tg3_writephy(tp, MII_BMCR, new_bmcr);
  3353. bmcr = new_bmcr;
  3354. err |= tg3_readphy(tp, MII_BMSR, &bmsr);
  3355. err |= tg3_readphy(tp, MII_BMSR, &bmsr);
  3356. if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
  3357. ASIC_REV_5714) {
  3358. if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
  3359. bmsr |= BMSR_LSTATUS;
  3360. else
  3361. bmsr &= ~BMSR_LSTATUS;
  3362. }
  3363. tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
  3364. }
  3365. }
  3366. if (bmsr & BMSR_LSTATUS) {
  3367. current_speed = SPEED_1000;
  3368. current_link_up = 1;
  3369. if (bmcr & BMCR_FULLDPLX)
  3370. current_duplex = DUPLEX_FULL;
  3371. else
  3372. current_duplex = DUPLEX_HALF;
  3373. local_adv = 0;
  3374. remote_adv = 0;
  3375. if (bmcr & BMCR_ANENABLE) {
  3376. u32 common;
  3377. err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
  3378. err |= tg3_readphy(tp, MII_LPA, &remote_adv);
  3379. common = local_adv & remote_adv;
  3380. if (common & (ADVERTISE_1000XHALF |
  3381. ADVERTISE_1000XFULL)) {
  3382. if (common & ADVERTISE_1000XFULL)
  3383. current_duplex = DUPLEX_FULL;
  3384. else
  3385. current_duplex = DUPLEX_HALF;
  3386. }
  3387. else
  3388. current_link_up = 0;
  3389. }
  3390. }
  3391. if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
  3392. tg3_setup_flow_control(tp, local_adv, remote_adv);
  3393. tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
  3394. if (tp->link_config.active_duplex == DUPLEX_HALF)
  3395. tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
  3396. tw32_f(MAC_MODE, tp->mac_mode);
  3397. udelay(40);
  3398. tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
  3399. tp->link_config.active_speed = current_speed;
  3400. tp->link_config.active_duplex = current_duplex;
  3401. if (current_link_up != netif_carrier_ok(tp->dev)) {
  3402. if (current_link_up)
  3403. netif_carrier_on(tp->dev);
  3404. else {
  3405. netif_carrier_off(tp->dev);
  3406. tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
  3407. }
  3408. tg3_link_report(tp);
  3409. }
  3410. return err;
  3411. }
  3412. static void tg3_serdes_parallel_detect(struct tg3 *tp)
  3413. {
  3414. if (tp->serdes_counter) {
  3415. /* Give autoneg time to complete. */
  3416. tp->serdes_counter--;
  3417. return;
  3418. }
  3419. if (!netif_carrier_ok(tp->dev) &&
  3420. (tp->link_config.autoneg == AUTONEG_ENABLE)) {
  3421. u32 bmcr;
  3422. tg3_readphy(tp, MII_BMCR, &bmcr);
  3423. if (bmcr & BMCR_ANENABLE) {
  3424. u32 phy1, phy2;
  3425. /* Select shadow register 0x1f */
  3426. tg3_writephy(tp, 0x1c, 0x7c00);
  3427. tg3_readphy(tp, 0x1c, &phy1);
  3428. /* Select expansion interrupt status register */
  3429. tg3_writephy(tp, 0x17, 0x0f01);
  3430. tg3_readphy(tp, 0x15, &phy2);
  3431. tg3_readphy(tp, 0x15, &phy2);
  3432. if ((phy1 & 0x10) && !(phy2 & 0x20)) {
  3433. /* We have signal detect and not receiving
  3434. * config code words, link is up by parallel
  3435. * detection.
  3436. */
  3437. bmcr &= ~BMCR_ANENABLE;
  3438. bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
  3439. tg3_writephy(tp, MII_BMCR, bmcr);
  3440. tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
  3441. }
  3442. }
  3443. }
  3444. else if (netif_carrier_ok(tp->dev) &&
  3445. (tp->link_config.autoneg == AUTONEG_ENABLE) &&
  3446. (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
  3447. u32 phy2;
  3448. /* Select expansion interrupt status register */
  3449. tg3_writephy(tp, 0x17, 0x0f01);
  3450. tg3_readphy(tp, 0x15, &phy2);
  3451. if (phy2 & 0x20) {
  3452. u32 bmcr;
  3453. /* Config code words received, turn on autoneg. */
  3454. tg3_readphy(tp, MII_BMCR, &bmcr);
  3455. tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
  3456. tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
  3457. }
  3458. }
  3459. }
  3460. static int tg3_setup_phy(struct tg3 *tp, int force_reset)
  3461. {
  3462. int err;
  3463. if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
  3464. err = tg3_setup_fiber_phy(tp, force_reset);
  3465. } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
  3466. err = tg3_setup_fiber_mii_phy(tp, force_reset);
  3467. } else {
  3468. err = tg3_setup_copper_phy(tp, force_reset);
  3469. }
  3470. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
  3471. u32 val, scale;
  3472. val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
  3473. if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
  3474. scale = 65;
  3475. else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
  3476. scale = 6;
  3477. else
  3478. scale = 12;
  3479. val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
  3480. val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
  3481. tw32(GRC_MISC_CFG, val);
  3482. }
  3483. if (tp->link_config.active_speed == SPEED_1000 &&
  3484. tp->link_config.active_duplex == DUPLEX_HALF)
  3485. tw32(MAC_TX_LENGTHS,
  3486. ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
  3487. (6 << TX_LENGTHS_IPG_SHIFT) |
  3488. (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
  3489. else
  3490. tw32(MAC_TX_LENGTHS,
  3491. ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
  3492. (6 << TX_LENGTHS_IPG_SHIFT) |
  3493. (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
  3494. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
  3495. if (netif_carrier_ok(tp->dev)) {
  3496. tw32(HOSTCC_STAT_COAL_TICKS,
  3497. tp->coal.stats_block_coalesce_usecs);
  3498. } else {
  3499. tw32(HOSTCC_STAT_COAL_TICKS, 0);
  3500. }
  3501. }
  3502. if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
  3503. u32 val = tr32(PCIE_PWR_MGMT_THRESH);
  3504. if (!netif_carrier_ok(tp->dev))
  3505. val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
  3506. tp->pwrmgmt_thresh;
  3507. else
  3508. val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
  3509. tw32(PCIE_PWR_MGMT_THRESH, val);
  3510. }
  3511. return err;
  3512. }
  3513. /* This is called whenever we suspect that the system chipset is re-
  3514. * ordering the sequence of MMIO to the tx send mailbox. The symptom
  3515. * is bogus tx completions. We try to recover by setting the
  3516. * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
  3517. * in the workqueue.
  3518. */
  3519. static void tg3_tx_recover(struct tg3 *tp)
  3520. {
  3521. BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
  3522. tp->write32_tx_mbox == tg3_write_indirect_mbox);
  3523. printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
  3524. "mapped I/O cycles to the network device, attempting to "
  3525. "recover. Please report the problem to the driver maintainer "
  3526. "and include system chipset information.\n", tp->dev->name);
  3527. spin_lock(&tp->lock);
  3528. tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
  3529. spin_unlock(&tp->lock);
  3530. }
  3531. static inline u32 tg3_tx_avail(struct tg3 *tp)
  3532. {
  3533. smp_mb();
  3534. return (tp->tx_pending -
  3535. ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
  3536. }
  3537. /* Tigon3 never reports partial packet sends. So we do not
  3538. * need special logic to handle SKBs that have not had all
  3539. * of their frags sent yet, like SunGEM does.
  3540. */
  3541. static void tg3_tx(struct tg3 *tp)
  3542. {
  3543. u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
  3544. u32 sw_idx = tp->tx_cons;
  3545. while (sw_idx != hw_idx) {
  3546. struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
  3547. struct sk_buff *skb = ri->skb;
  3548. int i, tx_bug = 0;
  3549. if (unlikely(skb == NULL)) {
  3550. tg3_tx_recover(tp);
  3551. return;
  3552. }
  3553. skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
  3554. ri->skb = NULL;
  3555. sw_idx = NEXT_TX(sw_idx);
  3556. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  3557. ri = &tp->tx_buffers[sw_idx];
  3558. if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
  3559. tx_bug = 1;
  3560. sw_idx = NEXT_TX(sw_idx);
  3561. }
  3562. dev_kfree_skb(skb);
  3563. if (unlikely(tx_bug)) {
  3564. tg3_tx_recover(tp);
  3565. return;
  3566. }
  3567. }
  3568. tp->tx_cons = sw_idx;
  3569. /* Need to make the tx_cons update visible to tg3_start_xmit()
  3570. * before checking for netif_queue_stopped(). Without the
  3571. * memory barrier, there is a small possibility that tg3_start_xmit()
  3572. * will miss it and cause the queue to be stopped forever.
  3573. */
  3574. smp_mb();
  3575. if (unlikely(netif_queue_stopped(tp->dev) &&
  3576. (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
  3577. netif_tx_lock(tp->dev);
  3578. if (netif_queue_stopped(tp->dev) &&
  3579. (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
  3580. netif_wake_queue(tp->dev);
  3581. netif_tx_unlock(tp->dev);
  3582. }
  3583. }
  3584. /* Returns size of skb allocated or < 0 on error.
  3585. *
  3586. * We only need to fill in the address because the other members
  3587. * of the RX descriptor are invariant, see tg3_init_rings.
  3588. *
  3589. * Note the purposeful assymetry of cpu vs. chip accesses. For
  3590. * posting buffers we only dirty the first cache line of the RX
  3591. * descriptor (containing the address). Whereas for the RX status
  3592. * buffers the cpu only reads the last cacheline of the RX descriptor
  3593. * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
  3594. */
  3595. static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
  3596. int src_idx, u32 dest_idx_unmasked)
  3597. {
  3598. struct tg3_rx_buffer_desc *desc;
  3599. struct ring_info *map, *src_map;
  3600. struct sk_buff *skb;
  3601. dma_addr_t mapping;
  3602. int skb_size, dest_idx;
  3603. src_map = NULL;
  3604. switch (opaque_key) {
  3605. case RXD_OPAQUE_RING_STD:
  3606. dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
  3607. desc = &tp->rx_std[dest_idx];
  3608. map = &tp->rx_std_buffers[dest_idx];
  3609. if (src_idx >= 0)
  3610. src_map = &tp->rx_std_buffers[src_idx];
  3611. skb_size = tp->rx_pkt_buf_sz;
  3612. break;
  3613. case RXD_OPAQUE_RING_JUMBO:
  3614. dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
  3615. desc = &tp->rx_jumbo[dest_idx];
  3616. map = &tp->rx_jumbo_buffers[dest_idx];
  3617. if (src_idx >= 0)
  3618. src_map = &tp->rx_jumbo_buffers[src_idx];
  3619. skb_size = RX_JUMBO_PKT_BUF_SZ;
  3620. break;
  3621. default:
  3622. return -EINVAL;
  3623. }
  3624. /* Do not overwrite any of the map or rp information
  3625. * until we are sure we can commit to a new buffer.
  3626. *
  3627. * Callers depend upon this behavior and assume that
  3628. * we leave everything unchanged if we fail.
  3629. */
  3630. skb = netdev_alloc_skb(tp->dev, skb_size);
  3631. if (skb == NULL)
  3632. return -ENOMEM;
  3633. skb_reserve(skb, tp->rx_offset);
  3634. mapping = pci_map_single(tp->pdev, skb->data,
  3635. skb_size - tp->rx_offset,
  3636. PCI_DMA_FROMDEVICE);
  3637. map->skb = skb;
  3638. pci_unmap_addr_set(map, mapping, mapping);
  3639. if (src_map != NULL)
  3640. src_map->skb = NULL;
  3641. desc->addr_hi = ((u64)mapping >> 32);
  3642. desc->addr_lo = ((u64)mapping & 0xffffffff);
  3643. return skb_size;
  3644. }
  3645. /* We only need to move over in the address because the other
  3646. * members of the RX descriptor are invariant. See notes above
  3647. * tg3_alloc_rx_skb for full details.
  3648. */
  3649. static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
  3650. int src_idx, u32 dest_idx_unmasked)
  3651. {
  3652. struct tg3_rx_buffer_desc *src_desc, *dest_desc;
  3653. struct ring_info *src_map, *dest_map;
  3654. int dest_idx;
  3655. switch (opaque_key) {
  3656. case RXD_OPAQUE_RING_STD:
  3657. dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
  3658. dest_desc = &tp->rx_std[dest_idx];
  3659. dest_map = &tp->rx_std_buffers[dest_idx];
  3660. src_desc = &tp->rx_std[src_idx];
  3661. src_map = &tp->rx_std_buffers[src_idx];
  3662. break;
  3663. case RXD_OPAQUE_RING_JUMBO:
  3664. dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
  3665. dest_desc = &tp->rx_jumbo[dest_idx];
  3666. dest_map = &tp->rx_jumbo_buffers[dest_idx];
  3667. src_desc = &tp->rx_jumbo[src_idx];
  3668. src_map = &tp->rx_jumbo_buffers[src_idx];
  3669. break;
  3670. default:
  3671. return;
  3672. }
  3673. dest_map->skb = src_map->skb;
  3674. pci_unmap_addr_set(dest_map, mapping,
  3675. pci_unmap_addr(src_map, mapping));
  3676. dest_desc->addr_hi = src_desc->addr_hi;
  3677. dest_desc->addr_lo = src_desc->addr_lo;
  3678. src_map->skb = NULL;
  3679. }
  3680. #if TG3_VLAN_TAG_USED
  3681. static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
  3682. {
  3683. return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
  3684. }
  3685. #endif
  3686. /* The RX ring scheme is composed of multiple rings which post fresh
  3687. * buffers to the chip, and one special ring the chip uses to report
  3688. * status back to the host.
  3689. *
  3690. * The special ring reports the status of received packets to the
  3691. * host. The chip does not write into the original descriptor the
  3692. * RX buffer was obtained from. The chip simply takes the original
  3693. * descriptor as provided by the host, updates the status and length
  3694. * field, then writes this into the next status ring entry.
  3695. *
  3696. * Each ring the host uses to post buffers to the chip is described
  3697. * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
  3698. * it is first placed into the on-chip ram. When the packet's length
  3699. * is known, it walks down the TG3_BDINFO entries to select the ring.
  3700. * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
  3701. * which is within the range of the new packet's length is chosen.
  3702. *
  3703. * The "separate ring for rx status" scheme may sound queer, but it makes
  3704. * sense from a cache coherency perspective. If only the host writes
  3705. * to the buffer post rings, and only the chip writes to the rx status
  3706. * rings, then cache lines never move beyond shared-modified state.
  3707. * If both the host and chip were to write into the same ring, cache line
  3708. * eviction could occur since both entities want it in an exclusive state.
  3709. */
  3710. static int tg3_rx(struct tg3 *tp, int budget)
  3711. {
  3712. u32 work_mask, rx_std_posted = 0;
  3713. u32 sw_idx = tp->rx_rcb_ptr;
  3714. u16 hw_idx;
  3715. int received;
  3716. hw_idx = tp->hw_status->idx[0].rx_producer;
  3717. /*
  3718. * We need to order the read of hw_idx and the read of
  3719. * the opaque cookie.
  3720. */
  3721. rmb();
  3722. work_mask = 0;
  3723. received = 0;
  3724. while (sw_idx != hw_idx && budget > 0) {
  3725. struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
  3726. unsigned int len;
  3727. struct sk_buff *skb;
  3728. dma_addr_t dma_addr;
  3729. u32 opaque_key, desc_idx, *post_ptr;
  3730. desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
  3731. opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
  3732. if (opaque_key == RXD_OPAQUE_RING_STD) {
  3733. dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
  3734. mapping);
  3735. skb = tp->rx_std_buffers[desc_idx].skb;
  3736. post_ptr = &tp->rx_std_ptr;
  3737. rx_std_posted++;
  3738. } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
  3739. dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
  3740. mapping);
  3741. skb = tp->rx_jumbo_buffers[desc_idx].skb;
  3742. post_ptr = &tp->rx_jumbo_ptr;
  3743. }
  3744. else {
  3745. goto next_pkt_nopost;
  3746. }
  3747. work_mask |= opaque_key;
  3748. if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
  3749. (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
  3750. drop_it:
  3751. tg3_recycle_rx(tp, opaque_key,
  3752. desc_idx, *post_ptr);
  3753. drop_it_no_recycle:
  3754. /* Other statistics kept track of by card. */
  3755. tp->net_stats.rx_dropped++;
  3756. goto next_pkt;
  3757. }
  3758. len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
  3759. ETH_FCS_LEN;
  3760. if (len > RX_COPY_THRESHOLD
  3761. && tp->rx_offset == NET_IP_ALIGN
  3762. /* rx_offset will likely not equal NET_IP_ALIGN
  3763. * if this is a 5701 card running in PCI-X mode
  3764. * [see tg3_get_invariants()]
  3765. */
  3766. ) {
  3767. int skb_size;
  3768. skb_size = tg3_alloc_rx_skb(tp, opaque_key,
  3769. desc_idx, *post_ptr);
  3770. if (skb_size < 0)
  3771. goto drop_it;
  3772. pci_unmap_single(tp->pdev, dma_addr,
  3773. skb_size - tp->rx_offset,
  3774. PCI_DMA_FROMDEVICE);
  3775. skb_put(skb, len);
  3776. } else {
  3777. struct sk_buff *copy_skb;
  3778. tg3_recycle_rx(tp, opaque_key,
  3779. desc_idx, *post_ptr);
  3780. copy_skb = netdev_alloc_skb(tp->dev,
  3781. len + TG3_RAW_IP_ALIGN);
  3782. if (copy_skb == NULL)
  3783. goto drop_it_no_recycle;
  3784. skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
  3785. skb_put(copy_skb, len);
  3786. pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
  3787. skb_copy_from_linear_data(skb, copy_skb->data, len);
  3788. pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
  3789. /* We'll reuse the original ring buffer. */
  3790. skb = copy_skb;
  3791. }
  3792. if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
  3793. (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
  3794. (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
  3795. >> RXD_TCPCSUM_SHIFT) == 0xffff))
  3796. skb->ip_summed = CHECKSUM_UNNECESSARY;
  3797. else
  3798. skb->ip_summed = CHECKSUM_NONE;
  3799. skb->protocol = eth_type_trans(skb, tp->dev);
  3800. if (len > (tp->dev->mtu + ETH_HLEN) &&
  3801. skb->protocol != htons(ETH_P_8021Q)) {
  3802. dev_kfree_skb(skb);
  3803. goto next_pkt;
  3804. }
  3805. #if TG3_VLAN_TAG_USED
  3806. if (tp->vlgrp != NULL &&
  3807. desc->type_flags & RXD_FLAG_VLAN) {
  3808. tg3_vlan_rx(tp, skb,
  3809. desc->err_vlan & RXD_VLAN_MASK);
  3810. } else
  3811. #endif
  3812. netif_receive_skb(skb);
  3813. received++;
  3814. budget--;
  3815. next_pkt:
  3816. (*post_ptr)++;
  3817. if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
  3818. u32 idx = *post_ptr % TG3_RX_RING_SIZE;
  3819. tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
  3820. TG3_64BIT_REG_LOW, idx);
  3821. work_mask &= ~RXD_OPAQUE_RING_STD;
  3822. rx_std_posted = 0;
  3823. }
  3824. next_pkt_nopost:
  3825. sw_idx++;
  3826. sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
  3827. /* Refresh hw_idx to see if there is new work */
  3828. if (sw_idx == hw_idx) {
  3829. hw_idx = tp->hw_status->idx[0].rx_producer;
  3830. rmb();
  3831. }
  3832. }
  3833. /* ACK the status ring. */
  3834. tp->rx_rcb_ptr = sw_idx;
  3835. tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
  3836. /* Refill RX ring(s). */
  3837. if (work_mask & RXD_OPAQUE_RING_STD) {
  3838. sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
  3839. tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
  3840. sw_idx);
  3841. }
  3842. if (work_mask & RXD_OPAQUE_RING_JUMBO) {
  3843. sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
  3844. tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
  3845. sw_idx);
  3846. }
  3847. mmiowb();
  3848. return received;
  3849. }
  3850. static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
  3851. {
  3852. struct tg3_hw_status *sblk = tp->hw_status;
  3853. /* handle link change and other phy events */
  3854. if (!(tp->tg3_flags &
  3855. (TG3_FLAG_USE_LINKCHG_REG |
  3856. TG3_FLAG_POLL_SERDES))) {
  3857. if (sblk->status & SD_STATUS_LINK_CHG) {
  3858. sblk->status = SD_STATUS_UPDATED |
  3859. (sblk->status & ~SD_STATUS_LINK_CHG);
  3860. spin_lock(&tp->lock);
  3861. if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
  3862. tw32_f(MAC_STATUS,
  3863. (MAC_STATUS_SYNC_CHANGED |
  3864. MAC_STATUS_CFG_CHANGED |
  3865. MAC_STATUS_MI_COMPLETION |
  3866. MAC_STATUS_LNKSTATE_CHANGED));
  3867. udelay(40);
  3868. } else
  3869. tg3_setup_phy(tp, 0);
  3870. spin_unlock(&tp->lock);
  3871. }
  3872. }
  3873. /* run TX completion thread */
  3874. if (sblk->idx[0].tx_consumer != tp->tx_cons) {
  3875. tg3_tx(tp);
  3876. if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
  3877. return work_done;
  3878. }
  3879. /* run RX thread, within the bounds set by NAPI.
  3880. * All RX "locking" is done by ensuring outside
  3881. * code synchronizes with tg3->napi.poll()
  3882. */
  3883. if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
  3884. work_done += tg3_rx(tp, budget - work_done);
  3885. return work_done;
  3886. }
  3887. static int tg3_poll(struct napi_struct *napi, int budget)
  3888. {
  3889. struct tg3 *tp = container_of(napi, struct tg3, napi);
  3890. int work_done = 0;
  3891. struct tg3_hw_status *sblk = tp->hw_status;
  3892. while (1) {
  3893. work_done = tg3_poll_work(tp, work_done, budget);
  3894. if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
  3895. goto tx_recovery;
  3896. if (unlikely(work_done >= budget))
  3897. break;
  3898. if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
  3899. /* tp->last_tag is used in tg3_restart_ints() below
  3900. * to tell the hw how much work has been processed,
  3901. * so we must read it before checking for more work.
  3902. */
  3903. tp->last_tag = sblk->status_tag;
  3904. rmb();
  3905. } else
  3906. sblk->status &= ~SD_STATUS_UPDATED;
  3907. if (likely(!tg3_has_work(tp))) {
  3908. napi_complete(napi);
  3909. tg3_restart_ints(tp);
  3910. break;
  3911. }
  3912. }
  3913. return work_done;
  3914. tx_recovery:
  3915. /* work_done is guaranteed to be less than budget. */
  3916. napi_complete(napi);
  3917. schedule_work(&tp->reset_task);
  3918. return work_done;
  3919. }
  3920. static void tg3_irq_quiesce(struct tg3 *tp)
  3921. {
  3922. BUG_ON(tp->irq_sync);
  3923. tp->irq_sync = 1;
  3924. smp_mb();
  3925. synchronize_irq(tp->pdev->irq);
  3926. }
  3927. static inline int tg3_irq_sync(struct tg3 *tp)
  3928. {
  3929. return tp->irq_sync;
  3930. }
  3931. /* Fully shutdown all tg3 driver activity elsewhere in the system.
  3932. * If irq_sync is non-zero, then the IRQ handler must be synchronized
  3933. * with as well. Most of the time, this is not necessary except when
  3934. * shutting down the device.
  3935. */
  3936. static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
  3937. {
  3938. spin_lock_bh(&tp->lock);
  3939. if (irq_sync)
  3940. tg3_irq_quiesce(tp);
  3941. }
  3942. static inline void tg3_full_unlock(struct tg3 *tp)
  3943. {
  3944. spin_unlock_bh(&tp->lock);
  3945. }
  3946. /* One-shot MSI handler - Chip automatically disables interrupt
  3947. * after sending MSI so driver doesn't have to do it.
  3948. */
  3949. static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
  3950. {
  3951. struct net_device *dev = dev_id;
  3952. struct tg3 *tp = netdev_priv(dev);
  3953. prefetch(tp->hw_status);
  3954. prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
  3955. if (likely(!tg3_irq_sync(tp)))
  3956. napi_schedule(&tp->napi);
  3957. return IRQ_HANDLED;
  3958. }
  3959. /* MSI ISR - No need to check for interrupt sharing and no need to
  3960. * flush status block and interrupt mailbox. PCI ordering rules
  3961. * guarantee that MSI will arrive after the status block.
  3962. */
  3963. static irqreturn_t tg3_msi(int irq, void *dev_id)
  3964. {
  3965. struct net_device *dev = dev_id;
  3966. struct tg3 *tp = netdev_priv(dev);
  3967. prefetch(tp->hw_status);
  3968. prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
  3969. /*
  3970. * Writing any value to intr-mbox-0 clears PCI INTA# and
  3971. * chip-internal interrupt pending events.
  3972. * Writing non-zero to intr-mbox-0 additional tells the
  3973. * NIC to stop sending us irqs, engaging "in-intr-handler"
  3974. * event coalescing.
  3975. */
  3976. tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
  3977. if (likely(!tg3_irq_sync(tp)))
  3978. napi_schedule(&tp->napi);
  3979. return IRQ_RETVAL(1);
  3980. }
  3981. static irqreturn_t tg3_interrupt(int irq, void *dev_id)
  3982. {
  3983. struct net_device *dev = dev_id;
  3984. struct tg3 *tp = netdev_priv(dev);
  3985. struct tg3_hw_status *sblk = tp->hw_status;
  3986. unsigned int handled = 1;
  3987. /* In INTx mode, it is possible for the interrupt to arrive at
  3988. * the CPU before the status block posted prior to the interrupt.
  3989. * Reading the PCI State register will confirm whether the
  3990. * interrupt is ours and will flush the status block.
  3991. */
  3992. if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
  3993. if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
  3994. (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
  3995. handled = 0;
  3996. goto out;
  3997. }
  3998. }
  3999. /*
  4000. * Writing any value to intr-mbox-0 clears PCI INTA# and
  4001. * chip-internal interrupt pending events.
  4002. * Writing non-zero to intr-mbox-0 additional tells the
  4003. * NIC to stop sending us irqs, engaging "in-intr-handler"
  4004. * event coalescing.
  4005. *
  4006. * Flush the mailbox to de-assert the IRQ immediately to prevent
  4007. * spurious interrupts. The flush impacts performance but
  4008. * excessive spurious interrupts can be worse in some cases.
  4009. */
  4010. tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
  4011. if (tg3_irq_sync(tp))
  4012. goto out;
  4013. sblk->status &= ~SD_STATUS_UPDATED;
  4014. if (likely(tg3_has_work(tp))) {
  4015. prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
  4016. napi_schedule(&tp->napi);
  4017. } else {
  4018. /* No work, shared interrupt perhaps? re-enable
  4019. * interrupts, and flush that PCI write
  4020. */
  4021. tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
  4022. 0x00000000);
  4023. }
  4024. out:
  4025. return IRQ_RETVAL(handled);
  4026. }
  4027. static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
  4028. {
  4029. struct net_device *dev = dev_id;
  4030. struct tg3 *tp = netdev_priv(dev);
  4031. struct tg3_hw_status *sblk = tp->hw_status;
  4032. unsigned int handled = 1;
  4033. /* In INTx mode, it is possible for the interrupt to arrive at
  4034. * the CPU before the status block posted prior to the interrupt.
  4035. * Reading the PCI State register will confirm whether the
  4036. * interrupt is ours and will flush the status block.
  4037. */
  4038. if (unlikely(sblk->status_tag == tp->last_tag)) {
  4039. if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
  4040. (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
  4041. handled = 0;
  4042. goto out;
  4043. }
  4044. }
  4045. /*
  4046. * writing any value to intr-mbox-0 clears PCI INTA# and
  4047. * chip-internal interrupt pending events.
  4048. * writing non-zero to intr-mbox-0 additional tells the
  4049. * NIC to stop sending us irqs, engaging "in-intr-handler"
  4050. * event coalescing.
  4051. *
  4052. * Flush the mailbox to de-assert the IRQ immediately to prevent
  4053. * spurious interrupts. The flush impacts performance but
  4054. * excessive spurious interrupts can be worse in some cases.
  4055. */
  4056. tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
  4057. if (tg3_irq_sync(tp))
  4058. goto out;
  4059. if (napi_schedule_prep(&tp->napi)) {
  4060. prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
  4061. /* Update last_tag to mark that this status has been
  4062. * seen. Because interrupt may be shared, we may be
  4063. * racing with tg3_poll(), so only update last_tag
  4064. * if tg3_poll() is not scheduled.
  4065. */
  4066. tp->last_tag = sblk->status_tag;
  4067. __napi_schedule(&tp->napi);
  4068. }
  4069. out:
  4070. return IRQ_RETVAL(handled);
  4071. }
  4072. /* ISR for interrupt test */
  4073. static irqreturn_t tg3_test_isr(int irq, void *dev_id)
  4074. {
  4075. struct net_device *dev = dev_id;
  4076. struct tg3 *tp = netdev_priv(dev);
  4077. struct tg3_hw_status *sblk = tp->hw_status;
  4078. if ((sblk->status & SD_STATUS_UPDATED) ||
  4079. !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
  4080. tg3_disable_ints(tp);
  4081. return IRQ_RETVAL(1);
  4082. }
  4083. return IRQ_RETVAL(0);
  4084. }
  4085. static int tg3_init_hw(struct tg3 *, int);
  4086. static int tg3_halt(struct tg3 *, int, int);
  4087. /* Restart hardware after configuration changes, self-test, etc.
  4088. * Invoked with tp->lock held.
  4089. */
  4090. static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
  4091. __releases(tp->lock)
  4092. __acquires(tp->lock)
  4093. {
  4094. int err;
  4095. err = tg3_init_hw(tp, reset_phy);
  4096. if (err) {
  4097. printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
  4098. "aborting.\n", tp->dev->name);
  4099. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  4100. tg3_full_unlock(tp);
  4101. del_timer_sync(&tp->timer);
  4102. tp->irq_sync = 0;
  4103. napi_enable(&tp->napi);
  4104. dev_close(tp->dev);
  4105. tg3_full_lock(tp, 0);
  4106. }
  4107. return err;
  4108. }
  4109. #ifdef CONFIG_NET_POLL_CONTROLLER
  4110. static void tg3_poll_controller(struct net_device *dev)
  4111. {
  4112. struct tg3 *tp = netdev_priv(dev);
  4113. tg3_interrupt(tp->pdev->irq, dev);
  4114. }
  4115. #endif
  4116. static void tg3_reset_task(struct work_struct *work)
  4117. {
  4118. struct tg3 *tp = container_of(work, struct tg3, reset_task);
  4119. int err;
  4120. unsigned int restart_timer;
  4121. tg3_full_lock(tp, 0);
  4122. if (!netif_running(tp->dev)) {
  4123. tg3_full_unlock(tp);
  4124. return;
  4125. }
  4126. tg3_full_unlock(tp);
  4127. tg3_phy_stop(tp);
  4128. tg3_netif_stop(tp);
  4129. tg3_full_lock(tp, 1);
  4130. restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
  4131. tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
  4132. if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
  4133. tp->write32_tx_mbox = tg3_write32_tx_mbox;
  4134. tp->write32_rx_mbox = tg3_write_flush_reg32;
  4135. tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
  4136. tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
  4137. }
  4138. tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
  4139. err = tg3_init_hw(tp, 1);
  4140. if (err)
  4141. goto out;
  4142. tg3_netif_start(tp);
  4143. if (restart_timer)
  4144. mod_timer(&tp->timer, jiffies + 1);
  4145. out:
  4146. tg3_full_unlock(tp);
  4147. if (!err)
  4148. tg3_phy_start(tp);
  4149. }
  4150. static void tg3_dump_short_state(struct tg3 *tp)
  4151. {
  4152. printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
  4153. tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
  4154. printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
  4155. tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
  4156. }
  4157. static void tg3_tx_timeout(struct net_device *dev)
  4158. {
  4159. struct tg3 *tp = netdev_priv(dev);
  4160. if (netif_msg_tx_err(tp)) {
  4161. printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
  4162. dev->name);
  4163. tg3_dump_short_state(tp);
  4164. }
  4165. schedule_work(&tp->reset_task);
  4166. }
  4167. /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
  4168. static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
  4169. {
  4170. u32 base = (u32) mapping & 0xffffffff;
  4171. return ((base > 0xffffdcc0) &&
  4172. (base + len + 8 < base));
  4173. }
  4174. /* Test for DMA addresses > 40-bit */
  4175. static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
  4176. int len)
  4177. {
  4178. #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
  4179. if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
  4180. return (((u64) mapping + len) > DMA_40BIT_MASK);
  4181. return 0;
  4182. #else
  4183. return 0;
  4184. #endif
  4185. }
  4186. static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
  4187. /* Workaround 4GB and 40-bit hardware DMA bugs. */
  4188. static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
  4189. u32 last_plus_one, u32 *start,
  4190. u32 base_flags, u32 mss)
  4191. {
  4192. struct sk_buff *new_skb;
  4193. dma_addr_t new_addr = 0;
  4194. u32 entry = *start;
  4195. int i, ret = 0;
  4196. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
  4197. new_skb = skb_copy(skb, GFP_ATOMIC);
  4198. else {
  4199. int more_headroom = 4 - ((unsigned long)skb->data & 3);
  4200. new_skb = skb_copy_expand(skb,
  4201. skb_headroom(skb) + more_headroom,
  4202. skb_tailroom(skb), GFP_ATOMIC);
  4203. }
  4204. if (!new_skb) {
  4205. ret = -1;
  4206. } else {
  4207. /* New SKB is guaranteed to be linear. */
  4208. entry = *start;
  4209. ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
  4210. new_addr = skb_shinfo(new_skb)->dma_maps[0];
  4211. /* Make sure new skb does not cross any 4G boundaries.
  4212. * Drop the packet if it does.
  4213. */
  4214. if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
  4215. if (!ret)
  4216. skb_dma_unmap(&tp->pdev->dev, new_skb,
  4217. DMA_TO_DEVICE);
  4218. ret = -1;
  4219. dev_kfree_skb(new_skb);
  4220. new_skb = NULL;
  4221. } else {
  4222. tg3_set_txd(tp, entry, new_addr, new_skb->len,
  4223. base_flags, 1 | (mss << 1));
  4224. *start = NEXT_TX(entry);
  4225. }
  4226. }
  4227. /* Now clean up the sw ring entries. */
  4228. i = 0;
  4229. while (entry != last_plus_one) {
  4230. if (i == 0) {
  4231. tp->tx_buffers[entry].skb = new_skb;
  4232. } else {
  4233. tp->tx_buffers[entry].skb = NULL;
  4234. }
  4235. entry = NEXT_TX(entry);
  4236. i++;
  4237. }
  4238. skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
  4239. dev_kfree_skb(skb);
  4240. return ret;
  4241. }
  4242. static void tg3_set_txd(struct tg3 *tp, int entry,
  4243. dma_addr_t mapping, int len, u32 flags,
  4244. u32 mss_and_is_end)
  4245. {
  4246. struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
  4247. int is_end = (mss_and_is_end & 0x1);
  4248. u32 mss = (mss_and_is_end >> 1);
  4249. u32 vlan_tag = 0;
  4250. if (is_end)
  4251. flags |= TXD_FLAG_END;
  4252. if (flags & TXD_FLAG_VLAN) {
  4253. vlan_tag = flags >> 16;
  4254. flags &= 0xffff;
  4255. }
  4256. vlan_tag |= (mss << TXD_MSS_SHIFT);
  4257. txd->addr_hi = ((u64) mapping >> 32);
  4258. txd->addr_lo = ((u64) mapping & 0xffffffff);
  4259. txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
  4260. txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
  4261. }
  4262. /* hard_start_xmit for devices that don't have any bugs and
  4263. * support TG3_FLG2_HW_TSO_2 only.
  4264. */
  4265. static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
  4266. {
  4267. struct tg3 *tp = netdev_priv(dev);
  4268. u32 len, entry, base_flags, mss;
  4269. struct skb_shared_info *sp;
  4270. dma_addr_t mapping;
  4271. len = skb_headlen(skb);
  4272. /* We are running in BH disabled context with netif_tx_lock
  4273. * and TX reclaim runs via tp->napi.poll inside of a software
  4274. * interrupt. Furthermore, IRQ processing runs lockless so we have
  4275. * no IRQ context deadlocks to worry about either. Rejoice!
  4276. */
  4277. if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
  4278. if (!netif_queue_stopped(dev)) {
  4279. netif_stop_queue(dev);
  4280. /* This is a hard error, log it. */
  4281. printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
  4282. "queue awake!\n", dev->name);
  4283. }
  4284. return NETDEV_TX_BUSY;
  4285. }
  4286. entry = tp->tx_prod;
  4287. base_flags = 0;
  4288. mss = 0;
  4289. if ((mss = skb_shinfo(skb)->gso_size) != 0) {
  4290. int tcp_opt_len, ip_tcp_len;
  4291. if (skb_header_cloned(skb) &&
  4292. pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
  4293. dev_kfree_skb(skb);
  4294. goto out_unlock;
  4295. }
  4296. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  4297. mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
  4298. else {
  4299. struct iphdr *iph = ip_hdr(skb);
  4300. tcp_opt_len = tcp_optlen(skb);
  4301. ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
  4302. iph->check = 0;
  4303. iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
  4304. mss |= (ip_tcp_len + tcp_opt_len) << 9;
  4305. }
  4306. base_flags |= (TXD_FLAG_CPU_PRE_DMA |
  4307. TXD_FLAG_CPU_POST_DMA);
  4308. tcp_hdr(skb)->check = 0;
  4309. }
  4310. else if (skb->ip_summed == CHECKSUM_PARTIAL)
  4311. base_flags |= TXD_FLAG_TCPUDP_CSUM;
  4312. #if TG3_VLAN_TAG_USED
  4313. if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
  4314. base_flags |= (TXD_FLAG_VLAN |
  4315. (vlan_tx_tag_get(skb) << 16));
  4316. #endif
  4317. if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
  4318. dev_kfree_skb(skb);
  4319. goto out_unlock;
  4320. }
  4321. sp = skb_shinfo(skb);
  4322. mapping = sp->dma_maps[0];
  4323. tp->tx_buffers[entry].skb = skb;
  4324. tg3_set_txd(tp, entry, mapping, len, base_flags,
  4325. (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
  4326. entry = NEXT_TX(entry);
  4327. /* Now loop through additional data fragments, and queue them. */
  4328. if (skb_shinfo(skb)->nr_frags > 0) {
  4329. unsigned int i, last;
  4330. last = skb_shinfo(skb)->nr_frags - 1;
  4331. for (i = 0; i <= last; i++) {
  4332. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  4333. len = frag->size;
  4334. mapping = sp->dma_maps[i + 1];
  4335. tp->tx_buffers[entry].skb = NULL;
  4336. tg3_set_txd(tp, entry, mapping, len,
  4337. base_flags, (i == last) | (mss << 1));
  4338. entry = NEXT_TX(entry);
  4339. }
  4340. }
  4341. /* Packets are ready, update Tx producer idx local and on card. */
  4342. tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
  4343. tp->tx_prod = entry;
  4344. if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
  4345. netif_stop_queue(dev);
  4346. if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
  4347. netif_wake_queue(tp->dev);
  4348. }
  4349. out_unlock:
  4350. mmiowb();
  4351. dev->trans_start = jiffies;
  4352. return NETDEV_TX_OK;
  4353. }
  4354. static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
  4355. /* Use GSO to workaround a rare TSO bug that may be triggered when the
  4356. * TSO header is greater than 80 bytes.
  4357. */
  4358. static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
  4359. {
  4360. struct sk_buff *segs, *nskb;
  4361. /* Estimate the number of fragments in the worst case */
  4362. if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
  4363. netif_stop_queue(tp->dev);
  4364. if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
  4365. return NETDEV_TX_BUSY;
  4366. netif_wake_queue(tp->dev);
  4367. }
  4368. segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
  4369. if (IS_ERR(segs))
  4370. goto tg3_tso_bug_end;
  4371. do {
  4372. nskb = segs;
  4373. segs = segs->next;
  4374. nskb->next = NULL;
  4375. tg3_start_xmit_dma_bug(nskb, tp->dev);
  4376. } while (segs);
  4377. tg3_tso_bug_end:
  4378. dev_kfree_skb(skb);
  4379. return NETDEV_TX_OK;
  4380. }
  4381. /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
  4382. * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
  4383. */
  4384. static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
  4385. {
  4386. struct tg3 *tp = netdev_priv(dev);
  4387. u32 len, entry, base_flags, mss;
  4388. struct skb_shared_info *sp;
  4389. int would_hit_hwbug;
  4390. dma_addr_t mapping;
  4391. len = skb_headlen(skb);
  4392. /* We are running in BH disabled context with netif_tx_lock
  4393. * and TX reclaim runs via tp->napi.poll inside of a software
  4394. * interrupt. Furthermore, IRQ processing runs lockless so we have
  4395. * no IRQ context deadlocks to worry about either. Rejoice!
  4396. */
  4397. if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
  4398. if (!netif_queue_stopped(dev)) {
  4399. netif_stop_queue(dev);
  4400. /* This is a hard error, log it. */
  4401. printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
  4402. "queue awake!\n", dev->name);
  4403. }
  4404. return NETDEV_TX_BUSY;
  4405. }
  4406. entry = tp->tx_prod;
  4407. base_flags = 0;
  4408. if (skb->ip_summed == CHECKSUM_PARTIAL)
  4409. base_flags |= TXD_FLAG_TCPUDP_CSUM;
  4410. mss = 0;
  4411. if ((mss = skb_shinfo(skb)->gso_size) != 0) {
  4412. struct iphdr *iph;
  4413. int tcp_opt_len, ip_tcp_len, hdr_len;
  4414. if (skb_header_cloned(skb) &&
  4415. pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
  4416. dev_kfree_skb(skb);
  4417. goto out_unlock;
  4418. }
  4419. tcp_opt_len = tcp_optlen(skb);
  4420. ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
  4421. hdr_len = ip_tcp_len + tcp_opt_len;
  4422. if (unlikely((ETH_HLEN + hdr_len) > 80) &&
  4423. (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
  4424. return (tg3_tso_bug(tp, skb));
  4425. base_flags |= (TXD_FLAG_CPU_PRE_DMA |
  4426. TXD_FLAG_CPU_POST_DMA);
  4427. iph = ip_hdr(skb);
  4428. iph->check = 0;
  4429. iph->tot_len = htons(mss + hdr_len);
  4430. if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
  4431. tcp_hdr(skb)->check = 0;
  4432. base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
  4433. } else
  4434. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  4435. iph->daddr, 0,
  4436. IPPROTO_TCP,
  4437. 0);
  4438. if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
  4439. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
  4440. if (tcp_opt_len || iph->ihl > 5) {
  4441. int tsflags;
  4442. tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
  4443. mss |= (tsflags << 11);
  4444. }
  4445. } else {
  4446. if (tcp_opt_len || iph->ihl > 5) {
  4447. int tsflags;
  4448. tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
  4449. base_flags |= tsflags << 12;
  4450. }
  4451. }
  4452. }
  4453. #if TG3_VLAN_TAG_USED
  4454. if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
  4455. base_flags |= (TXD_FLAG_VLAN |
  4456. (vlan_tx_tag_get(skb) << 16));
  4457. #endif
  4458. if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
  4459. dev_kfree_skb(skb);
  4460. goto out_unlock;
  4461. }
  4462. sp = skb_shinfo(skb);
  4463. mapping = sp->dma_maps[0];
  4464. tp->tx_buffers[entry].skb = skb;
  4465. would_hit_hwbug = 0;
  4466. if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
  4467. would_hit_hwbug = 1;
  4468. else if (tg3_4g_overflow_test(mapping, len))
  4469. would_hit_hwbug = 1;
  4470. tg3_set_txd(tp, entry, mapping, len, base_flags,
  4471. (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
  4472. entry = NEXT_TX(entry);
  4473. /* Now loop through additional data fragments, and queue them. */
  4474. if (skb_shinfo(skb)->nr_frags > 0) {
  4475. unsigned int i, last;
  4476. last = skb_shinfo(skb)->nr_frags - 1;
  4477. for (i = 0; i <= last; i++) {
  4478. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  4479. len = frag->size;
  4480. mapping = sp->dma_maps[i + 1];
  4481. tp->tx_buffers[entry].skb = NULL;
  4482. if (tg3_4g_overflow_test(mapping, len))
  4483. would_hit_hwbug = 1;
  4484. if (tg3_40bit_overflow_test(tp, mapping, len))
  4485. would_hit_hwbug = 1;
  4486. if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
  4487. tg3_set_txd(tp, entry, mapping, len,
  4488. base_flags, (i == last)|(mss << 1));
  4489. else
  4490. tg3_set_txd(tp, entry, mapping, len,
  4491. base_flags, (i == last));
  4492. entry = NEXT_TX(entry);
  4493. }
  4494. }
  4495. if (would_hit_hwbug) {
  4496. u32 last_plus_one = entry;
  4497. u32 start;
  4498. start = entry - 1 - skb_shinfo(skb)->nr_frags;
  4499. start &= (TG3_TX_RING_SIZE - 1);
  4500. /* If the workaround fails due to memory/mapping
  4501. * failure, silently drop this packet.
  4502. */
  4503. if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
  4504. &start, base_flags, mss))
  4505. goto out_unlock;
  4506. entry = start;
  4507. }
  4508. /* Packets are ready, update Tx producer idx local and on card. */
  4509. tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
  4510. tp->tx_prod = entry;
  4511. if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
  4512. netif_stop_queue(dev);
  4513. if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
  4514. netif_wake_queue(tp->dev);
  4515. }
  4516. out_unlock:
  4517. mmiowb();
  4518. dev->trans_start = jiffies;
  4519. return NETDEV_TX_OK;
  4520. }
  4521. static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
  4522. int new_mtu)
  4523. {
  4524. dev->mtu = new_mtu;
  4525. if (new_mtu > ETH_DATA_LEN) {
  4526. if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
  4527. tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
  4528. ethtool_op_set_tso(dev, 0);
  4529. }
  4530. else
  4531. tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
  4532. } else {
  4533. if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
  4534. tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
  4535. tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
  4536. }
  4537. }
  4538. static int tg3_change_mtu(struct net_device *dev, int new_mtu)
  4539. {
  4540. struct tg3 *tp = netdev_priv(dev);
  4541. int err;
  4542. if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
  4543. return -EINVAL;
  4544. if (!netif_running(dev)) {
  4545. /* We'll just catch it later when the
  4546. * device is up'd.
  4547. */
  4548. tg3_set_mtu(dev, tp, new_mtu);
  4549. return 0;
  4550. }
  4551. tg3_phy_stop(tp);
  4552. tg3_netif_stop(tp);
  4553. tg3_full_lock(tp, 1);
  4554. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  4555. tg3_set_mtu(dev, tp, new_mtu);
  4556. err = tg3_restart_hw(tp, 0);
  4557. if (!err)
  4558. tg3_netif_start(tp);
  4559. tg3_full_unlock(tp);
  4560. if (!err)
  4561. tg3_phy_start(tp);
  4562. return err;
  4563. }
  4564. /* Free up pending packets in all rx/tx rings.
  4565. *
  4566. * The chip has been shut down and the driver detached from
  4567. * the networking, so no interrupts or new tx packets will
  4568. * end up in the driver. tp->{tx,}lock is not held and we are not
  4569. * in an interrupt context and thus may sleep.
  4570. */
  4571. static void tg3_free_rings(struct tg3 *tp)
  4572. {
  4573. struct ring_info *rxp;
  4574. int i;
  4575. for (i = 0; i < TG3_RX_RING_SIZE; i++) {
  4576. rxp = &tp->rx_std_buffers[i];
  4577. if (rxp->skb == NULL)
  4578. continue;
  4579. pci_unmap_single(tp->pdev,
  4580. pci_unmap_addr(rxp, mapping),
  4581. tp->rx_pkt_buf_sz - tp->rx_offset,
  4582. PCI_DMA_FROMDEVICE);
  4583. dev_kfree_skb_any(rxp->skb);
  4584. rxp->skb = NULL;
  4585. }
  4586. for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
  4587. rxp = &tp->rx_jumbo_buffers[i];
  4588. if (rxp->skb == NULL)
  4589. continue;
  4590. pci_unmap_single(tp->pdev,
  4591. pci_unmap_addr(rxp, mapping),
  4592. RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
  4593. PCI_DMA_FROMDEVICE);
  4594. dev_kfree_skb_any(rxp->skb);
  4595. rxp->skb = NULL;
  4596. }
  4597. for (i = 0; i < TG3_TX_RING_SIZE; ) {
  4598. struct tx_ring_info *txp;
  4599. struct sk_buff *skb;
  4600. txp = &tp->tx_buffers[i];
  4601. skb = txp->skb;
  4602. if (skb == NULL) {
  4603. i++;
  4604. continue;
  4605. }
  4606. skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
  4607. txp->skb = NULL;
  4608. i += skb_shinfo(skb)->nr_frags + 1;
  4609. dev_kfree_skb_any(skb);
  4610. }
  4611. }
  4612. /* Initialize tx/rx rings for packet processing.
  4613. *
  4614. * The chip has been shut down and the driver detached from
  4615. * the networking, so no interrupts or new tx packets will
  4616. * end up in the driver. tp->{tx,}lock are held and thus
  4617. * we may not sleep.
  4618. */
  4619. static int tg3_init_rings(struct tg3 *tp)
  4620. {
  4621. u32 i;
  4622. /* Free up all the SKBs. */
  4623. tg3_free_rings(tp);
  4624. /* Zero out all descriptors. */
  4625. memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
  4626. memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
  4627. memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
  4628. memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
  4629. tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
  4630. if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
  4631. (tp->dev->mtu > ETH_DATA_LEN))
  4632. tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
  4633. /* Initialize invariants of the rings, we only set this
  4634. * stuff once. This works because the card does not
  4635. * write into the rx buffer posting rings.
  4636. */
  4637. for (i = 0; i < TG3_RX_RING_SIZE; i++) {
  4638. struct tg3_rx_buffer_desc *rxd;
  4639. rxd = &tp->rx_std[i];
  4640. rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
  4641. << RXD_LEN_SHIFT;
  4642. rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
  4643. rxd->opaque = (RXD_OPAQUE_RING_STD |
  4644. (i << RXD_OPAQUE_INDEX_SHIFT));
  4645. }
  4646. if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
  4647. for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
  4648. struct tg3_rx_buffer_desc *rxd;
  4649. rxd = &tp->rx_jumbo[i];
  4650. rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
  4651. << RXD_LEN_SHIFT;
  4652. rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
  4653. RXD_FLAG_JUMBO;
  4654. rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
  4655. (i << RXD_OPAQUE_INDEX_SHIFT));
  4656. }
  4657. }
  4658. /* Now allocate fresh SKBs for each rx ring. */
  4659. for (i = 0; i < tp->rx_pending; i++) {
  4660. if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
  4661. printk(KERN_WARNING PFX
  4662. "%s: Using a smaller RX standard ring, "
  4663. "only %d out of %d buffers were allocated "
  4664. "successfully.\n",
  4665. tp->dev->name, i, tp->rx_pending);
  4666. if (i == 0)
  4667. return -ENOMEM;
  4668. tp->rx_pending = i;
  4669. break;
  4670. }
  4671. }
  4672. if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
  4673. for (i = 0; i < tp->rx_jumbo_pending; i++) {
  4674. if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
  4675. -1, i) < 0) {
  4676. printk(KERN_WARNING PFX
  4677. "%s: Using a smaller RX jumbo ring, "
  4678. "only %d out of %d buffers were "
  4679. "allocated successfully.\n",
  4680. tp->dev->name, i, tp->rx_jumbo_pending);
  4681. if (i == 0) {
  4682. tg3_free_rings(tp);
  4683. return -ENOMEM;
  4684. }
  4685. tp->rx_jumbo_pending = i;
  4686. break;
  4687. }
  4688. }
  4689. }
  4690. return 0;
  4691. }
  4692. /*
  4693. * Must not be invoked with interrupt sources disabled and
  4694. * the hardware shutdown down.
  4695. */
  4696. static void tg3_free_consistent(struct tg3 *tp)
  4697. {
  4698. kfree(tp->rx_std_buffers);
  4699. tp->rx_std_buffers = NULL;
  4700. if (tp->rx_std) {
  4701. pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
  4702. tp->rx_std, tp->rx_std_mapping);
  4703. tp->rx_std = NULL;
  4704. }
  4705. if (tp->rx_jumbo) {
  4706. pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
  4707. tp->rx_jumbo, tp->rx_jumbo_mapping);
  4708. tp->rx_jumbo = NULL;
  4709. }
  4710. if (tp->rx_rcb) {
  4711. pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
  4712. tp->rx_rcb, tp->rx_rcb_mapping);
  4713. tp->rx_rcb = NULL;
  4714. }
  4715. if (tp->tx_ring) {
  4716. pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
  4717. tp->tx_ring, tp->tx_desc_mapping);
  4718. tp->tx_ring = NULL;
  4719. }
  4720. if (tp->hw_status) {
  4721. pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
  4722. tp->hw_status, tp->status_mapping);
  4723. tp->hw_status = NULL;
  4724. }
  4725. if (tp->hw_stats) {
  4726. pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
  4727. tp->hw_stats, tp->stats_mapping);
  4728. tp->hw_stats = NULL;
  4729. }
  4730. }
  4731. /*
  4732. * Must not be invoked with interrupt sources disabled and
  4733. * the hardware shutdown down. Can sleep.
  4734. */
  4735. static int tg3_alloc_consistent(struct tg3 *tp)
  4736. {
  4737. tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
  4738. (TG3_RX_RING_SIZE +
  4739. TG3_RX_JUMBO_RING_SIZE)) +
  4740. (sizeof(struct tx_ring_info) *
  4741. TG3_TX_RING_SIZE),
  4742. GFP_KERNEL);
  4743. if (!tp->rx_std_buffers)
  4744. return -ENOMEM;
  4745. tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
  4746. tp->tx_buffers = (struct tx_ring_info *)
  4747. &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
  4748. tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
  4749. &tp->rx_std_mapping);
  4750. if (!tp->rx_std)
  4751. goto err_out;
  4752. tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
  4753. &tp->rx_jumbo_mapping);
  4754. if (!tp->rx_jumbo)
  4755. goto err_out;
  4756. tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
  4757. &tp->rx_rcb_mapping);
  4758. if (!tp->rx_rcb)
  4759. goto err_out;
  4760. tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
  4761. &tp->tx_desc_mapping);
  4762. if (!tp->tx_ring)
  4763. goto err_out;
  4764. tp->hw_status = pci_alloc_consistent(tp->pdev,
  4765. TG3_HW_STATUS_SIZE,
  4766. &tp->status_mapping);
  4767. if (!tp->hw_status)
  4768. goto err_out;
  4769. tp->hw_stats = pci_alloc_consistent(tp->pdev,
  4770. sizeof(struct tg3_hw_stats),
  4771. &tp->stats_mapping);
  4772. if (!tp->hw_stats)
  4773. goto err_out;
  4774. memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
  4775. memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
  4776. return 0;
  4777. err_out:
  4778. tg3_free_consistent(tp);
  4779. return -ENOMEM;
  4780. }
  4781. #define MAX_WAIT_CNT 1000
  4782. /* To stop a block, clear the enable bit and poll till it
  4783. * clears. tp->lock is held.
  4784. */
  4785. static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
  4786. {
  4787. unsigned int i;
  4788. u32 val;
  4789. if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
  4790. switch (ofs) {
  4791. case RCVLSC_MODE:
  4792. case DMAC_MODE:
  4793. case MBFREE_MODE:
  4794. case BUFMGR_MODE:
  4795. case MEMARB_MODE:
  4796. /* We can't enable/disable these bits of the
  4797. * 5705/5750, just say success.
  4798. */
  4799. return 0;
  4800. default:
  4801. break;
  4802. }
  4803. }
  4804. val = tr32(ofs);
  4805. val &= ~enable_bit;
  4806. tw32_f(ofs, val);
  4807. for (i = 0; i < MAX_WAIT_CNT; i++) {
  4808. udelay(100);
  4809. val = tr32(ofs);
  4810. if ((val & enable_bit) == 0)
  4811. break;
  4812. }
  4813. if (i == MAX_WAIT_CNT && !silent) {
  4814. printk(KERN_ERR PFX "tg3_stop_block timed out, "
  4815. "ofs=%lx enable_bit=%x\n",
  4816. ofs, enable_bit);
  4817. return -ENODEV;
  4818. }
  4819. return 0;
  4820. }
  4821. /* tp->lock is held. */
  4822. static int tg3_abort_hw(struct tg3 *tp, int silent)
  4823. {
  4824. int i, err;
  4825. tg3_disable_ints(tp);
  4826. tp->rx_mode &= ~RX_MODE_ENABLE;
  4827. tw32_f(MAC_RX_MODE, tp->rx_mode);
  4828. udelay(10);
  4829. err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
  4830. err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
  4831. err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
  4832. err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
  4833. err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
  4834. err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
  4835. err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
  4836. err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
  4837. err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
  4838. err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
  4839. err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
  4840. err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
  4841. err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
  4842. tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
  4843. tw32_f(MAC_MODE, tp->mac_mode);
  4844. udelay(40);
  4845. tp->tx_mode &= ~TX_MODE_ENABLE;
  4846. tw32_f(MAC_TX_MODE, tp->tx_mode);
  4847. for (i = 0; i < MAX_WAIT_CNT; i++) {
  4848. udelay(100);
  4849. if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
  4850. break;
  4851. }
  4852. if (i >= MAX_WAIT_CNT) {
  4853. printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
  4854. "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
  4855. tp->dev->name, tr32(MAC_TX_MODE));
  4856. err |= -ENODEV;
  4857. }
  4858. err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
  4859. err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
  4860. err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
  4861. tw32(FTQ_RESET, 0xffffffff);
  4862. tw32(FTQ_RESET, 0x00000000);
  4863. err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
  4864. err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
  4865. if (tp->hw_status)
  4866. memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
  4867. if (tp->hw_stats)
  4868. memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
  4869. return err;
  4870. }
  4871. static void tg3_ape_send_event(struct tg3 *tp, u32 event)
  4872. {
  4873. int i;
  4874. u32 apedata;
  4875. apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
  4876. if (apedata != APE_SEG_SIG_MAGIC)
  4877. return;
  4878. apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
  4879. if (!(apedata & APE_FW_STATUS_READY))
  4880. return;
  4881. /* Wait for up to 1 millisecond for APE to service previous event. */
  4882. for (i = 0; i < 10; i++) {
  4883. if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
  4884. return;
  4885. apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
  4886. if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
  4887. tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
  4888. event | APE_EVENT_STATUS_EVENT_PENDING);
  4889. tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
  4890. if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
  4891. break;
  4892. udelay(100);
  4893. }
  4894. if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
  4895. tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
  4896. }
  4897. static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
  4898. {
  4899. u32 event;
  4900. u32 apedata;
  4901. if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
  4902. return;
  4903. switch (kind) {
  4904. case RESET_KIND_INIT:
  4905. tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
  4906. APE_HOST_SEG_SIG_MAGIC);
  4907. tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
  4908. APE_HOST_SEG_LEN_MAGIC);
  4909. apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
  4910. tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
  4911. tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
  4912. APE_HOST_DRIVER_ID_MAGIC);
  4913. tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
  4914. APE_HOST_BEHAV_NO_PHYLOCK);
  4915. event = APE_EVENT_STATUS_STATE_START;
  4916. break;
  4917. case RESET_KIND_SHUTDOWN:
  4918. /* With the interface we are currently using,
  4919. * APE does not track driver state. Wiping
  4920. * out the HOST SEGMENT SIGNATURE forces
  4921. * the APE to assume OS absent status.
  4922. */
  4923. tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
  4924. event = APE_EVENT_STATUS_STATE_UNLOAD;
  4925. break;
  4926. case RESET_KIND_SUSPEND:
  4927. event = APE_EVENT_STATUS_STATE_SUSPEND;
  4928. break;
  4929. default:
  4930. return;
  4931. }
  4932. event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
  4933. tg3_ape_send_event(tp, event);
  4934. }
  4935. /* tp->lock is held. */
  4936. static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
  4937. {
  4938. tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
  4939. NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
  4940. if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
  4941. switch (kind) {
  4942. case RESET_KIND_INIT:
  4943. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  4944. DRV_STATE_START);
  4945. break;
  4946. case RESET_KIND_SHUTDOWN:
  4947. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  4948. DRV_STATE_UNLOAD);
  4949. break;
  4950. case RESET_KIND_SUSPEND:
  4951. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  4952. DRV_STATE_SUSPEND);
  4953. break;
  4954. default:
  4955. break;
  4956. }
  4957. }
  4958. if (kind == RESET_KIND_INIT ||
  4959. kind == RESET_KIND_SUSPEND)
  4960. tg3_ape_driver_state_change(tp, kind);
  4961. }
  4962. /* tp->lock is held. */
  4963. static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
  4964. {
  4965. if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
  4966. switch (kind) {
  4967. case RESET_KIND_INIT:
  4968. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  4969. DRV_STATE_START_DONE);
  4970. break;
  4971. case RESET_KIND_SHUTDOWN:
  4972. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  4973. DRV_STATE_UNLOAD_DONE);
  4974. break;
  4975. default:
  4976. break;
  4977. }
  4978. }
  4979. if (kind == RESET_KIND_SHUTDOWN)
  4980. tg3_ape_driver_state_change(tp, kind);
  4981. }
  4982. /* tp->lock is held. */
  4983. static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
  4984. {
  4985. if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
  4986. switch (kind) {
  4987. case RESET_KIND_INIT:
  4988. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  4989. DRV_STATE_START);
  4990. break;
  4991. case RESET_KIND_SHUTDOWN:
  4992. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  4993. DRV_STATE_UNLOAD);
  4994. break;
  4995. case RESET_KIND_SUSPEND:
  4996. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  4997. DRV_STATE_SUSPEND);
  4998. break;
  4999. default:
  5000. break;
  5001. }
  5002. }
  5003. }
  5004. static int tg3_poll_fw(struct tg3 *tp)
  5005. {
  5006. int i;
  5007. u32 val;
  5008. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  5009. /* Wait up to 20ms for init done. */
  5010. for (i = 0; i < 200; i++) {
  5011. if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
  5012. return 0;
  5013. udelay(100);
  5014. }
  5015. return -ENODEV;
  5016. }
  5017. /* Wait for firmware initialization to complete. */
  5018. for (i = 0; i < 100000; i++) {
  5019. tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
  5020. if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
  5021. break;
  5022. udelay(10);
  5023. }
  5024. /* Chip might not be fitted with firmware. Some Sun onboard
  5025. * parts are configured like that. So don't signal the timeout
  5026. * of the above loop as an error, but do report the lack of
  5027. * running firmware once.
  5028. */
  5029. if (i >= 100000 &&
  5030. !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
  5031. tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
  5032. printk(KERN_INFO PFX "%s: No firmware running.\n",
  5033. tp->dev->name);
  5034. }
  5035. return 0;
  5036. }
  5037. /* Save PCI command register before chip reset */
  5038. static void tg3_save_pci_state(struct tg3 *tp)
  5039. {
  5040. pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
  5041. }
  5042. /* Restore PCI state after chip reset */
  5043. static void tg3_restore_pci_state(struct tg3 *tp)
  5044. {
  5045. u32 val;
  5046. /* Re-enable indirect register accesses. */
  5047. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  5048. tp->misc_host_ctrl);
  5049. /* Set MAX PCI retry to zero. */
  5050. val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
  5051. if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
  5052. (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
  5053. val |= PCISTATE_RETRY_SAME_DMA;
  5054. /* Allow reads and writes to the APE register and memory space. */
  5055. if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
  5056. val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
  5057. PCISTATE_ALLOW_APE_SHMEM_WR;
  5058. pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
  5059. pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
  5060. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
  5061. if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
  5062. pcie_set_readrq(tp->pdev, 4096);
  5063. else {
  5064. pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
  5065. tp->pci_cacheline_sz);
  5066. pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  5067. tp->pci_lat_timer);
  5068. }
  5069. }
  5070. /* Make sure PCI-X relaxed ordering bit is clear. */
  5071. if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
  5072. u16 pcix_cmd;
  5073. pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  5074. &pcix_cmd);
  5075. pcix_cmd &= ~PCI_X_CMD_ERO;
  5076. pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  5077. pcix_cmd);
  5078. }
  5079. if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
  5080. /* Chip reset on 5780 will reset MSI enable bit,
  5081. * so need to restore it.
  5082. */
  5083. if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
  5084. u16 ctrl;
  5085. pci_read_config_word(tp->pdev,
  5086. tp->msi_cap + PCI_MSI_FLAGS,
  5087. &ctrl);
  5088. pci_write_config_word(tp->pdev,
  5089. tp->msi_cap + PCI_MSI_FLAGS,
  5090. ctrl | PCI_MSI_FLAGS_ENABLE);
  5091. val = tr32(MSGINT_MODE);
  5092. tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
  5093. }
  5094. }
  5095. }
  5096. static void tg3_stop_fw(struct tg3 *);
  5097. /* tp->lock is held. */
  5098. static int tg3_chip_reset(struct tg3 *tp)
  5099. {
  5100. u32 val;
  5101. void (*write_op)(struct tg3 *, u32, u32);
  5102. int err;
  5103. tg3_nvram_lock(tp);
  5104. tg3_mdio_stop(tp);
  5105. tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
  5106. /* No matching tg3_nvram_unlock() after this because
  5107. * chip reset below will undo the nvram lock.
  5108. */
  5109. tp->nvram_lock_cnt = 0;
  5110. /* GRC_MISC_CFG core clock reset will clear the memory
  5111. * enable bit in PCI register 4 and the MSI enable bit
  5112. * on some chips, so we save relevant registers here.
  5113. */
  5114. tg3_save_pci_state(tp);
  5115. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
  5116. (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
  5117. tw32(GRC_FASTBOOT_PC, 0);
  5118. /*
  5119. * We must avoid the readl() that normally takes place.
  5120. * It locks machines, causes machine checks, and other
  5121. * fun things. So, temporarily disable the 5701
  5122. * hardware workaround, while we do the reset.
  5123. */
  5124. write_op = tp->write32;
  5125. if (write_op == tg3_write_flush_reg32)
  5126. tp->write32 = tg3_write32;
  5127. /* Prevent the irq handler from reading or writing PCI registers
  5128. * during chip reset when the memory enable bit in the PCI command
  5129. * register may be cleared. The chip does not generate interrupt
  5130. * at this time, but the irq handler may still be called due to irq
  5131. * sharing or irqpoll.
  5132. */
  5133. tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
  5134. if (tp->hw_status) {
  5135. tp->hw_status->status = 0;
  5136. tp->hw_status->status_tag = 0;
  5137. }
  5138. tp->last_tag = 0;
  5139. smp_mb();
  5140. synchronize_irq(tp->pdev->irq);
  5141. /* do the reset */
  5142. val = GRC_MISC_CFG_CORECLK_RESET;
  5143. if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
  5144. if (tr32(0x7e2c) == 0x60) {
  5145. tw32(0x7e2c, 0x20);
  5146. }
  5147. if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
  5148. tw32(GRC_MISC_CFG, (1 << 29));
  5149. val |= (1 << 29);
  5150. }
  5151. }
  5152. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  5153. tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
  5154. tw32(GRC_VCPU_EXT_CTRL,
  5155. tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
  5156. }
  5157. if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
  5158. val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
  5159. tw32(GRC_MISC_CFG, val);
  5160. /* restore 5701 hardware bug workaround write method */
  5161. tp->write32 = write_op;
  5162. /* Unfortunately, we have to delay before the PCI read back.
  5163. * Some 575X chips even will not respond to a PCI cfg access
  5164. * when the reset command is given to the chip.
  5165. *
  5166. * How do these hardware designers expect things to work
  5167. * properly if the PCI write is posted for a long period
  5168. * of time? It is always necessary to have some method by
  5169. * which a register read back can occur to push the write
  5170. * out which does the reset.
  5171. *
  5172. * For most tg3 variants the trick below was working.
  5173. * Ho hum...
  5174. */
  5175. udelay(120);
  5176. /* Flush PCI posted writes. The normal MMIO registers
  5177. * are inaccessible at this time so this is the only
  5178. * way to make this reliably (actually, this is no longer
  5179. * the case, see above). I tried to use indirect
  5180. * register read/write but this upset some 5701 variants.
  5181. */
  5182. pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
  5183. udelay(120);
  5184. if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
  5185. if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
  5186. int i;
  5187. u32 cfg_val;
  5188. /* Wait for link training to complete. */
  5189. for (i = 0; i < 5000; i++)
  5190. udelay(100);
  5191. pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
  5192. pci_write_config_dword(tp->pdev, 0xc4,
  5193. cfg_val | (1 << 15));
  5194. }
  5195. /* Set PCIE max payload size to 128 bytes and
  5196. * clear the "no snoop" and "relaxed ordering" bits.
  5197. */
  5198. pci_write_config_word(tp->pdev,
  5199. tp->pcie_cap + PCI_EXP_DEVCTL,
  5200. 0);
  5201. pcie_set_readrq(tp->pdev, 4096);
  5202. /* Clear error status */
  5203. pci_write_config_word(tp->pdev,
  5204. tp->pcie_cap + PCI_EXP_DEVSTA,
  5205. PCI_EXP_DEVSTA_CED |
  5206. PCI_EXP_DEVSTA_NFED |
  5207. PCI_EXP_DEVSTA_FED |
  5208. PCI_EXP_DEVSTA_URD);
  5209. }
  5210. tg3_restore_pci_state(tp);
  5211. tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
  5212. val = 0;
  5213. if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
  5214. val = tr32(MEMARB_MODE);
  5215. tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
  5216. if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
  5217. tg3_stop_fw(tp);
  5218. tw32(0x5000, 0x400);
  5219. }
  5220. tw32(GRC_MODE, tp->grc_mode);
  5221. if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
  5222. val = tr32(0xc4);
  5223. tw32(0xc4, val | (1 << 15));
  5224. }
  5225. if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
  5226. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
  5227. tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
  5228. if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
  5229. tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
  5230. tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
  5231. }
  5232. if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
  5233. tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
  5234. tw32_f(MAC_MODE, tp->mac_mode);
  5235. } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
  5236. tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
  5237. tw32_f(MAC_MODE, tp->mac_mode);
  5238. } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
  5239. tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
  5240. if (tp->mac_mode & MAC_MODE_APE_TX_EN)
  5241. tp->mac_mode |= MAC_MODE_TDE_ENABLE;
  5242. tw32_f(MAC_MODE, tp->mac_mode);
  5243. } else
  5244. tw32_f(MAC_MODE, 0);
  5245. udelay(40);
  5246. tg3_mdio_start(tp);
  5247. tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
  5248. err = tg3_poll_fw(tp);
  5249. if (err)
  5250. return err;
  5251. if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
  5252. tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
  5253. val = tr32(0x7c00);
  5254. tw32(0x7c00, val | (1 << 25));
  5255. }
  5256. /* Reprobe ASF enable state. */
  5257. tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
  5258. tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
  5259. tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
  5260. if (val == NIC_SRAM_DATA_SIG_MAGIC) {
  5261. u32 nic_cfg;
  5262. tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
  5263. if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
  5264. tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
  5265. tp->last_event_jiffies = jiffies;
  5266. if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
  5267. tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
  5268. }
  5269. }
  5270. return 0;
  5271. }
  5272. /* tp->lock is held. */
  5273. static void tg3_stop_fw(struct tg3 *tp)
  5274. {
  5275. if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
  5276. !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
  5277. /* Wait for RX cpu to ACK the previous event. */
  5278. tg3_wait_for_event_ack(tp);
  5279. tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
  5280. tg3_generate_fw_event(tp);
  5281. /* Wait for RX cpu to ACK this event. */
  5282. tg3_wait_for_event_ack(tp);
  5283. }
  5284. }
  5285. /* tp->lock is held. */
  5286. static int tg3_halt(struct tg3 *tp, int kind, int silent)
  5287. {
  5288. int err;
  5289. tg3_stop_fw(tp);
  5290. tg3_write_sig_pre_reset(tp, kind);
  5291. tg3_abort_hw(tp, silent);
  5292. err = tg3_chip_reset(tp);
  5293. tg3_write_sig_legacy(tp, kind);
  5294. tg3_write_sig_post_reset(tp, kind);
  5295. if (err)
  5296. return err;
  5297. return 0;
  5298. }
  5299. #define RX_CPU_SCRATCH_BASE 0x30000
  5300. #define RX_CPU_SCRATCH_SIZE 0x04000
  5301. #define TX_CPU_SCRATCH_BASE 0x34000
  5302. #define TX_CPU_SCRATCH_SIZE 0x04000
  5303. /* tp->lock is held. */
  5304. static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
  5305. {
  5306. int i;
  5307. BUG_ON(offset == TX_CPU_BASE &&
  5308. (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
  5309. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  5310. u32 val = tr32(GRC_VCPU_EXT_CTRL);
  5311. tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
  5312. return 0;
  5313. }
  5314. if (offset == RX_CPU_BASE) {
  5315. for (i = 0; i < 10000; i++) {
  5316. tw32(offset + CPU_STATE, 0xffffffff);
  5317. tw32(offset + CPU_MODE, CPU_MODE_HALT);
  5318. if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
  5319. break;
  5320. }
  5321. tw32(offset + CPU_STATE, 0xffffffff);
  5322. tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
  5323. udelay(10);
  5324. } else {
  5325. for (i = 0; i < 10000; i++) {
  5326. tw32(offset + CPU_STATE, 0xffffffff);
  5327. tw32(offset + CPU_MODE, CPU_MODE_HALT);
  5328. if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
  5329. break;
  5330. }
  5331. }
  5332. if (i >= 10000) {
  5333. printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
  5334. "and %s CPU\n",
  5335. tp->dev->name,
  5336. (offset == RX_CPU_BASE ? "RX" : "TX"));
  5337. return -ENODEV;
  5338. }
  5339. /* Clear firmware's nvram arbitration. */
  5340. if (tp->tg3_flags & TG3_FLAG_NVRAM)
  5341. tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
  5342. return 0;
  5343. }
  5344. struct fw_info {
  5345. unsigned int fw_base;
  5346. unsigned int fw_len;
  5347. const __be32 *fw_data;
  5348. };
  5349. /* tp->lock is held. */
  5350. static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
  5351. int cpu_scratch_size, struct fw_info *info)
  5352. {
  5353. int err, lock_err, i;
  5354. void (*write_op)(struct tg3 *, u32, u32);
  5355. if (cpu_base == TX_CPU_BASE &&
  5356. (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
  5357. printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
  5358. "TX cpu firmware on %s which is 5705.\n",
  5359. tp->dev->name);
  5360. return -EINVAL;
  5361. }
  5362. if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
  5363. write_op = tg3_write_mem;
  5364. else
  5365. write_op = tg3_write_indirect_reg32;
  5366. /* It is possible that bootcode is still loading at this point.
  5367. * Get the nvram lock first before halting the cpu.
  5368. */
  5369. lock_err = tg3_nvram_lock(tp);
  5370. err = tg3_halt_cpu(tp, cpu_base);
  5371. if (!lock_err)
  5372. tg3_nvram_unlock(tp);
  5373. if (err)
  5374. goto out;
  5375. for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
  5376. write_op(tp, cpu_scratch_base + i, 0);
  5377. tw32(cpu_base + CPU_STATE, 0xffffffff);
  5378. tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
  5379. for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
  5380. write_op(tp, (cpu_scratch_base +
  5381. (info->fw_base & 0xffff) +
  5382. (i * sizeof(u32))),
  5383. be32_to_cpu(info->fw_data[i]));
  5384. err = 0;
  5385. out:
  5386. return err;
  5387. }
  5388. /* tp->lock is held. */
  5389. static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
  5390. {
  5391. struct fw_info info;
  5392. const __be32 *fw_data;
  5393. int err, i;
  5394. fw_data = (void *)tp->fw->data;
  5395. /* Firmware blob starts with version numbers, followed by
  5396. start address and length. We are setting complete length.
  5397. length = end_address_of_bss - start_address_of_text.
  5398. Remainder is the blob to be loaded contiguously
  5399. from start address. */
  5400. info.fw_base = be32_to_cpu(fw_data[1]);
  5401. info.fw_len = tp->fw->size - 12;
  5402. info.fw_data = &fw_data[3];
  5403. err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
  5404. RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
  5405. &info);
  5406. if (err)
  5407. return err;
  5408. err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
  5409. TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
  5410. &info);
  5411. if (err)
  5412. return err;
  5413. /* Now startup only the RX cpu. */
  5414. tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
  5415. tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
  5416. for (i = 0; i < 5; i++) {
  5417. if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
  5418. break;
  5419. tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
  5420. tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
  5421. tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
  5422. udelay(1000);
  5423. }
  5424. if (i >= 5) {
  5425. printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
  5426. "to set RX CPU PC, is %08x should be %08x\n",
  5427. tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
  5428. info.fw_base);
  5429. return -ENODEV;
  5430. }
  5431. tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
  5432. tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
  5433. return 0;
  5434. }
  5435. /* 5705 needs a special version of the TSO firmware. */
  5436. /* tp->lock is held. */
  5437. static int tg3_load_tso_firmware(struct tg3 *tp)
  5438. {
  5439. struct fw_info info;
  5440. const __be32 *fw_data;
  5441. unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
  5442. int err, i;
  5443. if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
  5444. return 0;
  5445. fw_data = (void *)tp->fw->data;
  5446. /* Firmware blob starts with version numbers, followed by
  5447. start address and length. We are setting complete length.
  5448. length = end_address_of_bss - start_address_of_text.
  5449. Remainder is the blob to be loaded contiguously
  5450. from start address. */
  5451. info.fw_base = be32_to_cpu(fw_data[1]);
  5452. cpu_scratch_size = tp->fw_len;
  5453. info.fw_len = tp->fw->size - 12;
  5454. info.fw_data = &fw_data[3];
  5455. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
  5456. cpu_base = RX_CPU_BASE;
  5457. cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
  5458. } else {
  5459. cpu_base = TX_CPU_BASE;
  5460. cpu_scratch_base = TX_CPU_SCRATCH_BASE;
  5461. cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
  5462. }
  5463. err = tg3_load_firmware_cpu(tp, cpu_base,
  5464. cpu_scratch_base, cpu_scratch_size,
  5465. &info);
  5466. if (err)
  5467. return err;
  5468. /* Now startup the cpu. */
  5469. tw32(cpu_base + CPU_STATE, 0xffffffff);
  5470. tw32_f(cpu_base + CPU_PC, info.fw_base);
  5471. for (i = 0; i < 5; i++) {
  5472. if (tr32(cpu_base + CPU_PC) == info.fw_base)
  5473. break;
  5474. tw32(cpu_base + CPU_STATE, 0xffffffff);
  5475. tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
  5476. tw32_f(cpu_base + CPU_PC, info.fw_base);
  5477. udelay(1000);
  5478. }
  5479. if (i >= 5) {
  5480. printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
  5481. "to set CPU PC, is %08x should be %08x\n",
  5482. tp->dev->name, tr32(cpu_base + CPU_PC),
  5483. info.fw_base);
  5484. return -ENODEV;
  5485. }
  5486. tw32(cpu_base + CPU_STATE, 0xffffffff);
  5487. tw32_f(cpu_base + CPU_MODE, 0x00000000);
  5488. return 0;
  5489. }
  5490. static int tg3_set_mac_addr(struct net_device *dev, void *p)
  5491. {
  5492. struct tg3 *tp = netdev_priv(dev);
  5493. struct sockaddr *addr = p;
  5494. int err = 0, skip_mac_1 = 0;
  5495. if (!is_valid_ether_addr(addr->sa_data))
  5496. return -EINVAL;
  5497. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  5498. if (!netif_running(dev))
  5499. return 0;
  5500. if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
  5501. u32 addr0_high, addr0_low, addr1_high, addr1_low;
  5502. addr0_high = tr32(MAC_ADDR_0_HIGH);
  5503. addr0_low = tr32(MAC_ADDR_0_LOW);
  5504. addr1_high = tr32(MAC_ADDR_1_HIGH);
  5505. addr1_low = tr32(MAC_ADDR_1_LOW);
  5506. /* Skip MAC addr 1 if ASF is using it. */
  5507. if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
  5508. !(addr1_high == 0 && addr1_low == 0))
  5509. skip_mac_1 = 1;
  5510. }
  5511. spin_lock_bh(&tp->lock);
  5512. __tg3_set_mac_addr(tp, skip_mac_1);
  5513. spin_unlock_bh(&tp->lock);
  5514. return err;
  5515. }
  5516. /* tp->lock is held. */
  5517. static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
  5518. dma_addr_t mapping, u32 maxlen_flags,
  5519. u32 nic_addr)
  5520. {
  5521. tg3_write_mem(tp,
  5522. (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
  5523. ((u64) mapping >> 32));
  5524. tg3_write_mem(tp,
  5525. (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
  5526. ((u64) mapping & 0xffffffff));
  5527. tg3_write_mem(tp,
  5528. (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
  5529. maxlen_flags);
  5530. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
  5531. tg3_write_mem(tp,
  5532. (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
  5533. nic_addr);
  5534. }
  5535. static void __tg3_set_rx_mode(struct net_device *);
  5536. static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
  5537. {
  5538. tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
  5539. tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
  5540. tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
  5541. tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
  5542. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
  5543. tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
  5544. tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
  5545. }
  5546. tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
  5547. tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
  5548. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
  5549. u32 val = ec->stats_block_coalesce_usecs;
  5550. if (!netif_carrier_ok(tp->dev))
  5551. val = 0;
  5552. tw32(HOSTCC_STAT_COAL_TICKS, val);
  5553. }
  5554. }
  5555. /* tp->lock is held. */
  5556. static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
  5557. {
  5558. u32 val, rdmac_mode;
  5559. int i, err, limit;
  5560. tg3_disable_ints(tp);
  5561. tg3_stop_fw(tp);
  5562. tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
  5563. if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
  5564. tg3_abort_hw(tp, 1);
  5565. }
  5566. if (reset_phy &&
  5567. !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
  5568. tg3_phy_reset(tp);
  5569. err = tg3_chip_reset(tp);
  5570. if (err)
  5571. return err;
  5572. tg3_write_sig_legacy(tp, RESET_KIND_INIT);
  5573. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
  5574. val = tr32(TG3_CPMU_CTRL);
  5575. val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
  5576. tw32(TG3_CPMU_CTRL, val);
  5577. val = tr32(TG3_CPMU_LSPD_10MB_CLK);
  5578. val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
  5579. val |= CPMU_LSPD_10MB_MACCLK_6_25;
  5580. tw32(TG3_CPMU_LSPD_10MB_CLK, val);
  5581. val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
  5582. val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
  5583. val |= CPMU_LNK_AWARE_MACCLK_6_25;
  5584. tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
  5585. val = tr32(TG3_CPMU_HST_ACC);
  5586. val &= ~CPMU_HST_ACC_MACCLK_MASK;
  5587. val |= CPMU_HST_ACC_MACCLK_6_25;
  5588. tw32(TG3_CPMU_HST_ACC, val);
  5589. }
  5590. /* This works around an issue with Athlon chipsets on
  5591. * B3 tigon3 silicon. This bit has no effect on any
  5592. * other revision. But do not set this on PCI Express
  5593. * chips and don't even touch the clocks if the CPMU is present.
  5594. */
  5595. if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
  5596. if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
  5597. tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
  5598. tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
  5599. }
  5600. if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
  5601. (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
  5602. val = tr32(TG3PCI_PCISTATE);
  5603. val |= PCISTATE_RETRY_SAME_DMA;
  5604. tw32(TG3PCI_PCISTATE, val);
  5605. }
  5606. if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
  5607. /* Allow reads and writes to the
  5608. * APE register and memory space.
  5609. */
  5610. val = tr32(TG3PCI_PCISTATE);
  5611. val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
  5612. PCISTATE_ALLOW_APE_SHMEM_WR;
  5613. tw32(TG3PCI_PCISTATE, val);
  5614. }
  5615. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
  5616. /* Enable some hw fixes. */
  5617. val = tr32(TG3PCI_MSI_DATA);
  5618. val |= (1 << 26) | (1 << 28) | (1 << 29);
  5619. tw32(TG3PCI_MSI_DATA, val);
  5620. }
  5621. /* Descriptor ring init may make accesses to the
  5622. * NIC SRAM area to setup the TX descriptors, so we
  5623. * can only do this after the hardware has been
  5624. * successfully reset.
  5625. */
  5626. err = tg3_init_rings(tp);
  5627. if (err)
  5628. return err;
  5629. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
  5630. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
  5631. /* This value is determined during the probe time DMA
  5632. * engine test, tg3_test_dma.
  5633. */
  5634. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  5635. }
  5636. tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
  5637. GRC_MODE_4X_NIC_SEND_RINGS |
  5638. GRC_MODE_NO_TX_PHDR_CSUM |
  5639. GRC_MODE_NO_RX_PHDR_CSUM);
  5640. tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
  5641. /* Pseudo-header checksum is done by hardware logic and not
  5642. * the offload processers, so make the chip do the pseudo-
  5643. * header checksums on receive. For transmit it is more
  5644. * convenient to do the pseudo-header checksum in software
  5645. * as Linux does that on transmit for us in all cases.
  5646. */
  5647. tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
  5648. tw32(GRC_MODE,
  5649. tp->grc_mode |
  5650. (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
  5651. /* Setup the timer prescalar register. Clock is always 66Mhz. */
  5652. val = tr32(GRC_MISC_CFG);
  5653. val &= ~0xff;
  5654. val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
  5655. tw32(GRC_MISC_CFG, val);
  5656. /* Initialize MBUF/DESC pool. */
  5657. if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
  5658. /* Do nothing. */
  5659. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
  5660. tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
  5661. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
  5662. tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
  5663. else
  5664. tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
  5665. tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
  5666. tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
  5667. }
  5668. else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
  5669. int fw_len;
  5670. fw_len = tp->fw_len;
  5671. fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
  5672. tw32(BUFMGR_MB_POOL_ADDR,
  5673. NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
  5674. tw32(BUFMGR_MB_POOL_SIZE,
  5675. NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
  5676. }
  5677. if (tp->dev->mtu <= ETH_DATA_LEN) {
  5678. tw32(BUFMGR_MB_RDMA_LOW_WATER,
  5679. tp->bufmgr_config.mbuf_read_dma_low_water);
  5680. tw32(BUFMGR_MB_MACRX_LOW_WATER,
  5681. tp->bufmgr_config.mbuf_mac_rx_low_water);
  5682. tw32(BUFMGR_MB_HIGH_WATER,
  5683. tp->bufmgr_config.mbuf_high_water);
  5684. } else {
  5685. tw32(BUFMGR_MB_RDMA_LOW_WATER,
  5686. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
  5687. tw32(BUFMGR_MB_MACRX_LOW_WATER,
  5688. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
  5689. tw32(BUFMGR_MB_HIGH_WATER,
  5690. tp->bufmgr_config.mbuf_high_water_jumbo);
  5691. }
  5692. tw32(BUFMGR_DMA_LOW_WATER,
  5693. tp->bufmgr_config.dma_low_water);
  5694. tw32(BUFMGR_DMA_HIGH_WATER,
  5695. tp->bufmgr_config.dma_high_water);
  5696. tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
  5697. for (i = 0; i < 2000; i++) {
  5698. if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
  5699. break;
  5700. udelay(10);
  5701. }
  5702. if (i >= 2000) {
  5703. printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
  5704. tp->dev->name);
  5705. return -ENODEV;
  5706. }
  5707. /* Setup replenish threshold. */
  5708. val = tp->rx_pending / 8;
  5709. if (val == 0)
  5710. val = 1;
  5711. else if (val > tp->rx_std_max_post)
  5712. val = tp->rx_std_max_post;
  5713. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  5714. if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
  5715. tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
  5716. if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
  5717. val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
  5718. }
  5719. tw32(RCVBDI_STD_THRESH, val);
  5720. /* Initialize TG3_BDINFO's at:
  5721. * RCVDBDI_STD_BD: standard eth size rx ring
  5722. * RCVDBDI_JUMBO_BD: jumbo frame rx ring
  5723. * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
  5724. *
  5725. * like so:
  5726. * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
  5727. * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
  5728. * ring attribute flags
  5729. * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
  5730. *
  5731. * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
  5732. * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
  5733. *
  5734. * The size of each ring is fixed in the firmware, but the location is
  5735. * configurable.
  5736. */
  5737. tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
  5738. ((u64) tp->rx_std_mapping >> 32));
  5739. tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
  5740. ((u64) tp->rx_std_mapping & 0xffffffff));
  5741. tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
  5742. NIC_SRAM_RX_BUFFER_DESC);
  5743. /* Don't even try to program the JUMBO/MINI buffer descriptor
  5744. * configs on 5705.
  5745. */
  5746. if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
  5747. tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
  5748. RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
  5749. } else {
  5750. tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
  5751. RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
  5752. tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
  5753. BDINFO_FLAGS_DISABLED);
  5754. /* Setup replenish threshold. */
  5755. tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
  5756. if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
  5757. tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
  5758. ((u64) tp->rx_jumbo_mapping >> 32));
  5759. tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
  5760. ((u64) tp->rx_jumbo_mapping & 0xffffffff));
  5761. tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
  5762. RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
  5763. tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
  5764. NIC_SRAM_RX_JUMBO_BUFFER_DESC);
  5765. } else {
  5766. tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
  5767. BDINFO_FLAGS_DISABLED);
  5768. }
  5769. }
  5770. /* There is only one send ring on 5705/5750, no need to explicitly
  5771. * disable the others.
  5772. */
  5773. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
  5774. /* Clear out send RCB ring in SRAM. */
  5775. for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
  5776. tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
  5777. BDINFO_FLAGS_DISABLED);
  5778. }
  5779. tp->tx_prod = 0;
  5780. tp->tx_cons = 0;
  5781. tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
  5782. tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
  5783. tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
  5784. tp->tx_desc_mapping,
  5785. (TG3_TX_RING_SIZE <<
  5786. BDINFO_FLAGS_MAXLEN_SHIFT),
  5787. NIC_SRAM_TX_BUFFER_DESC);
  5788. /* There is only one receive return ring on 5705/5750, no need
  5789. * to explicitly disable the others.
  5790. */
  5791. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
  5792. for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
  5793. i += TG3_BDINFO_SIZE) {
  5794. tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
  5795. BDINFO_FLAGS_DISABLED);
  5796. }
  5797. }
  5798. tp->rx_rcb_ptr = 0;
  5799. tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
  5800. tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
  5801. tp->rx_rcb_mapping,
  5802. (TG3_RX_RCB_RING_SIZE(tp) <<
  5803. BDINFO_FLAGS_MAXLEN_SHIFT),
  5804. 0);
  5805. tp->rx_std_ptr = tp->rx_pending;
  5806. tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
  5807. tp->rx_std_ptr);
  5808. tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
  5809. tp->rx_jumbo_pending : 0;
  5810. tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
  5811. tp->rx_jumbo_ptr);
  5812. /* Initialize MAC address and backoff seed. */
  5813. __tg3_set_mac_addr(tp, 0);
  5814. /* MTU + ethernet header + FCS + optional VLAN tag */
  5815. tw32(MAC_RX_MTU_SIZE,
  5816. tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
  5817. /* The slot time is changed by tg3_setup_phy if we
  5818. * run at gigabit with half duplex.
  5819. */
  5820. tw32(MAC_TX_LENGTHS,
  5821. (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
  5822. (6 << TX_LENGTHS_IPG_SHIFT) |
  5823. (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
  5824. /* Receive rules. */
  5825. tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
  5826. tw32(RCVLPC_CONFIG, 0x0181);
  5827. /* Calculate RDMAC_MODE setting early, we need it to determine
  5828. * the RCVLPC_STATE_ENABLE mask.
  5829. */
  5830. rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
  5831. RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
  5832. RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
  5833. RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
  5834. RDMAC_MODE_LNGREAD_ENAB);
  5835. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  5836. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  5837. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
  5838. rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
  5839. RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
  5840. RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
  5841. /* If statement applies to 5705 and 5750 PCI devices only */
  5842. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  5843. tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
  5844. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
  5845. if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
  5846. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
  5847. rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
  5848. } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
  5849. !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
  5850. rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
  5851. }
  5852. }
  5853. if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
  5854. rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
  5855. if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
  5856. rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
  5857. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  5858. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
  5859. rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
  5860. /* Receive/send statistics. */
  5861. if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
  5862. val = tr32(RCVLPC_STATS_ENABLE);
  5863. val &= ~RCVLPC_STATSENAB_DACK_FIX;
  5864. tw32(RCVLPC_STATS_ENABLE, val);
  5865. } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
  5866. (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
  5867. val = tr32(RCVLPC_STATS_ENABLE);
  5868. val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
  5869. tw32(RCVLPC_STATS_ENABLE, val);
  5870. } else {
  5871. tw32(RCVLPC_STATS_ENABLE, 0xffffff);
  5872. }
  5873. tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
  5874. tw32(SNDDATAI_STATSENAB, 0xffffff);
  5875. tw32(SNDDATAI_STATSCTRL,
  5876. (SNDDATAI_SCTRL_ENABLE |
  5877. SNDDATAI_SCTRL_FASTUPD));
  5878. /* Setup host coalescing engine. */
  5879. tw32(HOSTCC_MODE, 0);
  5880. for (i = 0; i < 2000; i++) {
  5881. if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
  5882. break;
  5883. udelay(10);
  5884. }
  5885. __tg3_set_coalesce(tp, &tp->coal);
  5886. /* set status block DMA address */
  5887. tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
  5888. ((u64) tp->status_mapping >> 32));
  5889. tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
  5890. ((u64) tp->status_mapping & 0xffffffff));
  5891. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
  5892. /* Status/statistics block address. See tg3_timer,
  5893. * the tg3_periodic_fetch_stats call there, and
  5894. * tg3_get_stats to see how this works for 5705/5750 chips.
  5895. */
  5896. tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
  5897. ((u64) tp->stats_mapping >> 32));
  5898. tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
  5899. ((u64) tp->stats_mapping & 0xffffffff));
  5900. tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
  5901. tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
  5902. }
  5903. tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
  5904. tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
  5905. tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
  5906. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
  5907. tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
  5908. /* Clear statistics/status block in chip, and status block in ram. */
  5909. for (i = NIC_SRAM_STATS_BLK;
  5910. i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
  5911. i += sizeof(u32)) {
  5912. tg3_write_mem(tp, i, 0);
  5913. udelay(40);
  5914. }
  5915. memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
  5916. if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
  5917. tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
  5918. /* reset to prevent losing 1st rx packet intermittently */
  5919. tw32_f(MAC_RX_MODE, RX_MODE_RESET);
  5920. udelay(10);
  5921. }
  5922. if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
  5923. tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
  5924. else
  5925. tp->mac_mode = 0;
  5926. tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
  5927. MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
  5928. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
  5929. !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
  5930. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
  5931. tp->mac_mode |= MAC_MODE_LINK_POLARITY;
  5932. tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
  5933. udelay(40);
  5934. /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
  5935. * If TG3_FLG2_IS_NIC is zero, we should read the
  5936. * register to preserve the GPIO settings for LOMs. The GPIOs,
  5937. * whether used as inputs or outputs, are set by boot code after
  5938. * reset.
  5939. */
  5940. if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
  5941. u32 gpio_mask;
  5942. gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
  5943. GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
  5944. GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
  5945. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
  5946. gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
  5947. GRC_LCLCTRL_GPIO_OUTPUT3;
  5948. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
  5949. gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
  5950. tp->grc_local_ctrl &= ~gpio_mask;
  5951. tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
  5952. /* GPIO1 must be driven high for eeprom write protect */
  5953. if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
  5954. tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
  5955. GRC_LCLCTRL_GPIO_OUTPUT1);
  5956. }
  5957. tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
  5958. udelay(100);
  5959. tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
  5960. tp->last_tag = 0;
  5961. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
  5962. tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
  5963. udelay(40);
  5964. }
  5965. val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
  5966. WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
  5967. WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
  5968. WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
  5969. WDMAC_MODE_LNGREAD_ENAB);
  5970. /* If statement applies to 5705 and 5750 PCI devices only */
  5971. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  5972. tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
  5973. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
  5974. if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
  5975. (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
  5976. tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
  5977. /* nothing */
  5978. } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
  5979. !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
  5980. !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
  5981. val |= WDMAC_MODE_RX_ACCEL;
  5982. }
  5983. }
  5984. /* Enable host coalescing bug fix */
  5985. if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
  5986. val |= WDMAC_MODE_STATUS_TAG_FIX;
  5987. tw32_f(WDMAC_MODE, val);
  5988. udelay(40);
  5989. if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
  5990. u16 pcix_cmd;
  5991. pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  5992. &pcix_cmd);
  5993. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
  5994. pcix_cmd &= ~PCI_X_CMD_MAX_READ;
  5995. pcix_cmd |= PCI_X_CMD_READ_2K;
  5996. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
  5997. pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
  5998. pcix_cmd |= PCI_X_CMD_READ_2K;
  5999. }
  6000. pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  6001. pcix_cmd);
  6002. }
  6003. tw32_f(RDMAC_MODE, rdmac_mode);
  6004. udelay(40);
  6005. tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
  6006. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
  6007. tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
  6008. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
  6009. tw32(SNDDATAC_MODE,
  6010. SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
  6011. else
  6012. tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
  6013. tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
  6014. tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
  6015. tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
  6016. tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
  6017. if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
  6018. tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
  6019. tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
  6020. tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
  6021. if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
  6022. err = tg3_load_5701_a0_firmware_fix(tp);
  6023. if (err)
  6024. return err;
  6025. }
  6026. if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
  6027. err = tg3_load_tso_firmware(tp);
  6028. if (err)
  6029. return err;
  6030. }
  6031. tp->tx_mode = TX_MODE_ENABLE;
  6032. tw32_f(MAC_TX_MODE, tp->tx_mode);
  6033. udelay(100);
  6034. tp->rx_mode = RX_MODE_ENABLE;
  6035. if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
  6036. tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
  6037. tw32_f(MAC_RX_MODE, tp->rx_mode);
  6038. udelay(10);
  6039. tw32(MAC_LED_CTRL, tp->led_ctrl);
  6040. tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
  6041. if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
  6042. tw32_f(MAC_RX_MODE, RX_MODE_RESET);
  6043. udelay(10);
  6044. }
  6045. tw32_f(MAC_RX_MODE, tp->rx_mode);
  6046. udelay(10);
  6047. if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
  6048. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
  6049. !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
  6050. /* Set drive transmission level to 1.2V */
  6051. /* only if the signal pre-emphasis bit is not set */
  6052. val = tr32(MAC_SERDES_CFG);
  6053. val &= 0xfffff000;
  6054. val |= 0x880;
  6055. tw32(MAC_SERDES_CFG, val);
  6056. }
  6057. if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
  6058. tw32(MAC_SERDES_CFG, 0x616000);
  6059. }
  6060. /* Prevent chip from dropping frames when flow control
  6061. * is enabled.
  6062. */
  6063. tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
  6064. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
  6065. (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
  6066. /* Use hardware link auto-negotiation */
  6067. tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
  6068. }
  6069. if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
  6070. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
  6071. u32 tmp;
  6072. tmp = tr32(SERDES_RX_CTRL);
  6073. tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
  6074. tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
  6075. tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
  6076. tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
  6077. }
  6078. if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
  6079. if (tp->link_config.phy_is_low_power) {
  6080. tp->link_config.phy_is_low_power = 0;
  6081. tp->link_config.speed = tp->link_config.orig_speed;
  6082. tp->link_config.duplex = tp->link_config.orig_duplex;
  6083. tp->link_config.autoneg = tp->link_config.orig_autoneg;
  6084. }
  6085. err = tg3_setup_phy(tp, 0);
  6086. if (err)
  6087. return err;
  6088. if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
  6089. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
  6090. u32 tmp;
  6091. /* Clear CRC stats. */
  6092. if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
  6093. tg3_writephy(tp, MII_TG3_TEST1,
  6094. tmp | MII_TG3_TEST1_CRC_EN);
  6095. tg3_readphy(tp, 0x14, &tmp);
  6096. }
  6097. }
  6098. }
  6099. __tg3_set_rx_mode(tp->dev);
  6100. /* Initialize receive rules. */
  6101. tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
  6102. tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
  6103. tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
  6104. tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
  6105. if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
  6106. !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
  6107. limit = 8;
  6108. else
  6109. limit = 16;
  6110. if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
  6111. limit -= 4;
  6112. switch (limit) {
  6113. case 16:
  6114. tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
  6115. case 15:
  6116. tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
  6117. case 14:
  6118. tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
  6119. case 13:
  6120. tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
  6121. case 12:
  6122. tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
  6123. case 11:
  6124. tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
  6125. case 10:
  6126. tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
  6127. case 9:
  6128. tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
  6129. case 8:
  6130. tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
  6131. case 7:
  6132. tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
  6133. case 6:
  6134. tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
  6135. case 5:
  6136. tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
  6137. case 4:
  6138. /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
  6139. case 3:
  6140. /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
  6141. case 2:
  6142. case 1:
  6143. default:
  6144. break;
  6145. }
  6146. if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
  6147. /* Write our heartbeat update interval to APE. */
  6148. tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
  6149. APE_HOST_HEARTBEAT_INT_DISABLE);
  6150. tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
  6151. return 0;
  6152. }
  6153. /* Called at device open time to get the chip ready for
  6154. * packet processing. Invoked with tp->lock held.
  6155. */
  6156. static int tg3_init_hw(struct tg3 *tp, int reset_phy)
  6157. {
  6158. tg3_switch_clocks(tp);
  6159. tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  6160. return tg3_reset_hw(tp, reset_phy);
  6161. }
  6162. #define TG3_STAT_ADD32(PSTAT, REG) \
  6163. do { u32 __val = tr32(REG); \
  6164. (PSTAT)->low += __val; \
  6165. if ((PSTAT)->low < __val) \
  6166. (PSTAT)->high += 1; \
  6167. } while (0)
  6168. static void tg3_periodic_fetch_stats(struct tg3 *tp)
  6169. {
  6170. struct tg3_hw_stats *sp = tp->hw_stats;
  6171. if (!netif_carrier_ok(tp->dev))
  6172. return;
  6173. TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
  6174. TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
  6175. TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
  6176. TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
  6177. TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
  6178. TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
  6179. TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
  6180. TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
  6181. TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
  6182. TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
  6183. TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
  6184. TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
  6185. TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
  6186. TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
  6187. TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
  6188. TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
  6189. TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
  6190. TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
  6191. TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
  6192. TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
  6193. TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
  6194. TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
  6195. TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
  6196. TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
  6197. TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
  6198. TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
  6199. TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
  6200. TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
  6201. TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
  6202. TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
  6203. }
  6204. static void tg3_timer(unsigned long __opaque)
  6205. {
  6206. struct tg3 *tp = (struct tg3 *) __opaque;
  6207. if (tp->irq_sync)
  6208. goto restart_timer;
  6209. spin_lock(&tp->lock);
  6210. if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
  6211. /* All of this garbage is because when using non-tagged
  6212. * IRQ status the mailbox/status_block protocol the chip
  6213. * uses with the cpu is race prone.
  6214. */
  6215. if (tp->hw_status->status & SD_STATUS_UPDATED) {
  6216. tw32(GRC_LOCAL_CTRL,
  6217. tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
  6218. } else {
  6219. tw32(HOSTCC_MODE, tp->coalesce_mode |
  6220. (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
  6221. }
  6222. if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
  6223. tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
  6224. spin_unlock(&tp->lock);
  6225. schedule_work(&tp->reset_task);
  6226. return;
  6227. }
  6228. }
  6229. /* This part only runs once per second. */
  6230. if (!--tp->timer_counter) {
  6231. if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
  6232. tg3_periodic_fetch_stats(tp);
  6233. if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
  6234. u32 mac_stat;
  6235. int phy_event;
  6236. mac_stat = tr32(MAC_STATUS);
  6237. phy_event = 0;
  6238. if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
  6239. if (mac_stat & MAC_STATUS_MI_INTERRUPT)
  6240. phy_event = 1;
  6241. } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
  6242. phy_event = 1;
  6243. if (phy_event)
  6244. tg3_setup_phy(tp, 0);
  6245. } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
  6246. u32 mac_stat = tr32(MAC_STATUS);
  6247. int need_setup = 0;
  6248. if (netif_carrier_ok(tp->dev) &&
  6249. (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
  6250. need_setup = 1;
  6251. }
  6252. if (! netif_carrier_ok(tp->dev) &&
  6253. (mac_stat & (MAC_STATUS_PCS_SYNCED |
  6254. MAC_STATUS_SIGNAL_DET))) {
  6255. need_setup = 1;
  6256. }
  6257. if (need_setup) {
  6258. if (!tp->serdes_counter) {
  6259. tw32_f(MAC_MODE,
  6260. (tp->mac_mode &
  6261. ~MAC_MODE_PORT_MODE_MASK));
  6262. udelay(40);
  6263. tw32_f(MAC_MODE, tp->mac_mode);
  6264. udelay(40);
  6265. }
  6266. tg3_setup_phy(tp, 0);
  6267. }
  6268. } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
  6269. tg3_serdes_parallel_detect(tp);
  6270. tp->timer_counter = tp->timer_multiplier;
  6271. }
  6272. /* Heartbeat is only sent once every 2 seconds.
  6273. *
  6274. * The heartbeat is to tell the ASF firmware that the host
  6275. * driver is still alive. In the event that the OS crashes,
  6276. * ASF needs to reset the hardware to free up the FIFO space
  6277. * that may be filled with rx packets destined for the host.
  6278. * If the FIFO is full, ASF will no longer function properly.
  6279. *
  6280. * Unintended resets have been reported on real time kernels
  6281. * where the timer doesn't run on time. Netpoll will also have
  6282. * same problem.
  6283. *
  6284. * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
  6285. * to check the ring condition when the heartbeat is expiring
  6286. * before doing the reset. This will prevent most unintended
  6287. * resets.
  6288. */
  6289. if (!--tp->asf_counter) {
  6290. if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
  6291. !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
  6292. tg3_wait_for_event_ack(tp);
  6293. tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
  6294. FWCMD_NICDRV_ALIVE3);
  6295. tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
  6296. /* 5 seconds timeout */
  6297. tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
  6298. tg3_generate_fw_event(tp);
  6299. }
  6300. tp->asf_counter = tp->asf_multiplier;
  6301. }
  6302. spin_unlock(&tp->lock);
  6303. restart_timer:
  6304. tp->timer.expires = jiffies + tp->timer_offset;
  6305. add_timer(&tp->timer);
  6306. }
  6307. static int tg3_request_irq(struct tg3 *tp)
  6308. {
  6309. irq_handler_t fn;
  6310. unsigned long flags;
  6311. struct net_device *dev = tp->dev;
  6312. if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
  6313. fn = tg3_msi;
  6314. if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
  6315. fn = tg3_msi_1shot;
  6316. flags = IRQF_SAMPLE_RANDOM;
  6317. } else {
  6318. fn = tg3_interrupt;
  6319. if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
  6320. fn = tg3_interrupt_tagged;
  6321. flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
  6322. }
  6323. return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
  6324. }
  6325. static int tg3_test_interrupt(struct tg3 *tp)
  6326. {
  6327. struct net_device *dev = tp->dev;
  6328. int err, i, intr_ok = 0;
  6329. if (!netif_running(dev))
  6330. return -ENODEV;
  6331. tg3_disable_ints(tp);
  6332. free_irq(tp->pdev->irq, dev);
  6333. err = request_irq(tp->pdev->irq, tg3_test_isr,
  6334. IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
  6335. if (err)
  6336. return err;
  6337. tp->hw_status->status &= ~SD_STATUS_UPDATED;
  6338. tg3_enable_ints(tp);
  6339. tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
  6340. HOSTCC_MODE_NOW);
  6341. for (i = 0; i < 5; i++) {
  6342. u32 int_mbox, misc_host_ctrl;
  6343. int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
  6344. TG3_64BIT_REG_LOW);
  6345. misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
  6346. if ((int_mbox != 0) ||
  6347. (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
  6348. intr_ok = 1;
  6349. break;
  6350. }
  6351. msleep(10);
  6352. }
  6353. tg3_disable_ints(tp);
  6354. free_irq(tp->pdev->irq, dev);
  6355. err = tg3_request_irq(tp);
  6356. if (err)
  6357. return err;
  6358. if (intr_ok)
  6359. return 0;
  6360. return -EIO;
  6361. }
  6362. /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
  6363. * successfully restored
  6364. */
  6365. static int tg3_test_msi(struct tg3 *tp)
  6366. {
  6367. struct net_device *dev = tp->dev;
  6368. int err;
  6369. u16 pci_cmd;
  6370. if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
  6371. return 0;
  6372. /* Turn off SERR reporting in case MSI terminates with Master
  6373. * Abort.
  6374. */
  6375. pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
  6376. pci_write_config_word(tp->pdev, PCI_COMMAND,
  6377. pci_cmd & ~PCI_COMMAND_SERR);
  6378. err = tg3_test_interrupt(tp);
  6379. pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
  6380. if (!err)
  6381. return 0;
  6382. /* other failures */
  6383. if (err != -EIO)
  6384. return err;
  6385. /* MSI test failed, go back to INTx mode */
  6386. printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
  6387. "switching to INTx mode. Please report this failure to "
  6388. "the PCI maintainer and include system chipset information.\n",
  6389. tp->dev->name);
  6390. free_irq(tp->pdev->irq, dev);
  6391. pci_disable_msi(tp->pdev);
  6392. tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
  6393. err = tg3_request_irq(tp);
  6394. if (err)
  6395. return err;
  6396. /* Need to reset the chip because the MSI cycle may have terminated
  6397. * with Master Abort.
  6398. */
  6399. tg3_full_lock(tp, 1);
  6400. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  6401. err = tg3_init_hw(tp, 1);
  6402. tg3_full_unlock(tp);
  6403. if (err)
  6404. free_irq(tp->pdev->irq, dev);
  6405. return err;
  6406. }
  6407. static int tg3_request_firmware(struct tg3 *tp)
  6408. {
  6409. const __be32 *fw_data;
  6410. if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
  6411. printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
  6412. tp->dev->name, tp->fw_needed);
  6413. return -ENOENT;
  6414. }
  6415. fw_data = (void *)tp->fw->data;
  6416. /* Firmware blob starts with version numbers, followed by
  6417. * start address and _full_ length including BSS sections
  6418. * (which must be longer than the actual data, of course
  6419. */
  6420. tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
  6421. if (tp->fw_len < (tp->fw->size - 12)) {
  6422. printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
  6423. tp->dev->name, tp->fw_len, tp->fw_needed);
  6424. release_firmware(tp->fw);
  6425. tp->fw = NULL;
  6426. return -EINVAL;
  6427. }
  6428. /* We no longer need firmware; we have it. */
  6429. tp->fw_needed = NULL;
  6430. return 0;
  6431. }
  6432. static int tg3_open(struct net_device *dev)
  6433. {
  6434. struct tg3 *tp = netdev_priv(dev);
  6435. int err;
  6436. if (tp->fw_needed) {
  6437. err = tg3_request_firmware(tp);
  6438. if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
  6439. if (err)
  6440. return err;
  6441. } else if (err) {
  6442. printk(KERN_WARNING "%s: TSO capability disabled.\n",
  6443. tp->dev->name);
  6444. tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
  6445. } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
  6446. printk(KERN_NOTICE "%s: TSO capability restored.\n",
  6447. tp->dev->name);
  6448. tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
  6449. }
  6450. }
  6451. netif_carrier_off(tp->dev);
  6452. err = tg3_set_power_state(tp, PCI_D0);
  6453. if (err)
  6454. return err;
  6455. tg3_full_lock(tp, 0);
  6456. tg3_disable_ints(tp);
  6457. tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
  6458. tg3_full_unlock(tp);
  6459. /* The placement of this call is tied
  6460. * to the setup and use of Host TX descriptors.
  6461. */
  6462. err = tg3_alloc_consistent(tp);
  6463. if (err)
  6464. return err;
  6465. if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
  6466. /* All MSI supporting chips should support tagged
  6467. * status. Assert that this is the case.
  6468. */
  6469. if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
  6470. printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
  6471. "Not using MSI.\n", tp->dev->name);
  6472. } else if (pci_enable_msi(tp->pdev) == 0) {
  6473. u32 msi_mode;
  6474. msi_mode = tr32(MSGINT_MODE);
  6475. tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
  6476. tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
  6477. }
  6478. }
  6479. err = tg3_request_irq(tp);
  6480. if (err) {
  6481. if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
  6482. pci_disable_msi(tp->pdev);
  6483. tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
  6484. }
  6485. tg3_free_consistent(tp);
  6486. return err;
  6487. }
  6488. napi_enable(&tp->napi);
  6489. tg3_full_lock(tp, 0);
  6490. err = tg3_init_hw(tp, 1);
  6491. if (err) {
  6492. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  6493. tg3_free_rings(tp);
  6494. } else {
  6495. if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
  6496. tp->timer_offset = HZ;
  6497. else
  6498. tp->timer_offset = HZ / 10;
  6499. BUG_ON(tp->timer_offset > HZ);
  6500. tp->timer_counter = tp->timer_multiplier =
  6501. (HZ / tp->timer_offset);
  6502. tp->asf_counter = tp->asf_multiplier =
  6503. ((HZ / tp->timer_offset) * 2);
  6504. init_timer(&tp->timer);
  6505. tp->timer.expires = jiffies + tp->timer_offset;
  6506. tp->timer.data = (unsigned long) tp;
  6507. tp->timer.function = tg3_timer;
  6508. }
  6509. tg3_full_unlock(tp);
  6510. if (err) {
  6511. napi_disable(&tp->napi);
  6512. free_irq(tp->pdev->irq, dev);
  6513. if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
  6514. pci_disable_msi(tp->pdev);
  6515. tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
  6516. }
  6517. tg3_free_consistent(tp);
  6518. return err;
  6519. }
  6520. if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
  6521. err = tg3_test_msi(tp);
  6522. if (err) {
  6523. tg3_full_lock(tp, 0);
  6524. if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
  6525. pci_disable_msi(tp->pdev);
  6526. tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
  6527. }
  6528. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  6529. tg3_free_rings(tp);
  6530. tg3_free_consistent(tp);
  6531. tg3_full_unlock(tp);
  6532. napi_disable(&tp->napi);
  6533. return err;
  6534. }
  6535. if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
  6536. if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
  6537. u32 val = tr32(PCIE_TRANSACTION_CFG);
  6538. tw32(PCIE_TRANSACTION_CFG,
  6539. val | PCIE_TRANS_CFG_1SHOT_MSI);
  6540. }
  6541. }
  6542. }
  6543. tg3_phy_start(tp);
  6544. tg3_full_lock(tp, 0);
  6545. add_timer(&tp->timer);
  6546. tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
  6547. tg3_enable_ints(tp);
  6548. tg3_full_unlock(tp);
  6549. netif_start_queue(dev);
  6550. return 0;
  6551. }
  6552. #if 0
  6553. /*static*/ void tg3_dump_state(struct tg3 *tp)
  6554. {
  6555. u32 val32, val32_2, val32_3, val32_4, val32_5;
  6556. u16 val16;
  6557. int i;
  6558. pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
  6559. pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
  6560. printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
  6561. val16, val32);
  6562. /* MAC block */
  6563. printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
  6564. tr32(MAC_MODE), tr32(MAC_STATUS));
  6565. printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
  6566. tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
  6567. printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
  6568. tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
  6569. printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
  6570. tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
  6571. /* Send data initiator control block */
  6572. printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
  6573. tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
  6574. printk(" SNDDATAI_STATSCTRL[%08x]\n",
  6575. tr32(SNDDATAI_STATSCTRL));
  6576. /* Send data completion control block */
  6577. printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
  6578. /* Send BD ring selector block */
  6579. printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
  6580. tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
  6581. /* Send BD initiator control block */
  6582. printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
  6583. tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
  6584. /* Send BD completion control block */
  6585. printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
  6586. /* Receive list placement control block */
  6587. printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
  6588. tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
  6589. printk(" RCVLPC_STATSCTRL[%08x]\n",
  6590. tr32(RCVLPC_STATSCTRL));
  6591. /* Receive data and receive BD initiator control block */
  6592. printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
  6593. tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
  6594. /* Receive data completion control block */
  6595. printk("DEBUG: RCVDCC_MODE[%08x]\n",
  6596. tr32(RCVDCC_MODE));
  6597. /* Receive BD initiator control block */
  6598. printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
  6599. tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
  6600. /* Receive BD completion control block */
  6601. printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
  6602. tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
  6603. /* Receive list selector control block */
  6604. printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
  6605. tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
  6606. /* Mbuf cluster free block */
  6607. printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
  6608. tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
  6609. /* Host coalescing control block */
  6610. printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
  6611. tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
  6612. printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
  6613. tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
  6614. tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
  6615. printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
  6616. tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
  6617. tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
  6618. printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
  6619. tr32(HOSTCC_STATS_BLK_NIC_ADDR));
  6620. printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
  6621. tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
  6622. /* Memory arbiter control block */
  6623. printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
  6624. tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
  6625. /* Buffer manager control block */
  6626. printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
  6627. tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
  6628. printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
  6629. tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
  6630. printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
  6631. "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
  6632. tr32(BUFMGR_DMA_DESC_POOL_ADDR),
  6633. tr32(BUFMGR_DMA_DESC_POOL_SIZE));
  6634. /* Read DMA control block */
  6635. printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
  6636. tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
  6637. /* Write DMA control block */
  6638. printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
  6639. tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
  6640. /* DMA completion block */
  6641. printk("DEBUG: DMAC_MODE[%08x]\n",
  6642. tr32(DMAC_MODE));
  6643. /* GRC block */
  6644. printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
  6645. tr32(GRC_MODE), tr32(GRC_MISC_CFG));
  6646. printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
  6647. tr32(GRC_LOCAL_CTRL));
  6648. /* TG3_BDINFOs */
  6649. printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
  6650. tr32(RCVDBDI_JUMBO_BD + 0x0),
  6651. tr32(RCVDBDI_JUMBO_BD + 0x4),
  6652. tr32(RCVDBDI_JUMBO_BD + 0x8),
  6653. tr32(RCVDBDI_JUMBO_BD + 0xc));
  6654. printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
  6655. tr32(RCVDBDI_STD_BD + 0x0),
  6656. tr32(RCVDBDI_STD_BD + 0x4),
  6657. tr32(RCVDBDI_STD_BD + 0x8),
  6658. tr32(RCVDBDI_STD_BD + 0xc));
  6659. printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
  6660. tr32(RCVDBDI_MINI_BD + 0x0),
  6661. tr32(RCVDBDI_MINI_BD + 0x4),
  6662. tr32(RCVDBDI_MINI_BD + 0x8),
  6663. tr32(RCVDBDI_MINI_BD + 0xc));
  6664. tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
  6665. tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
  6666. tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
  6667. tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
  6668. printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
  6669. val32, val32_2, val32_3, val32_4);
  6670. tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
  6671. tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
  6672. tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
  6673. tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
  6674. printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
  6675. val32, val32_2, val32_3, val32_4);
  6676. tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
  6677. tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
  6678. tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
  6679. tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
  6680. tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
  6681. printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
  6682. val32, val32_2, val32_3, val32_4, val32_5);
  6683. /* SW status block */
  6684. printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
  6685. tp->hw_status->status,
  6686. tp->hw_status->status_tag,
  6687. tp->hw_status->rx_jumbo_consumer,
  6688. tp->hw_status->rx_consumer,
  6689. tp->hw_status->rx_mini_consumer,
  6690. tp->hw_status->idx[0].rx_producer,
  6691. tp->hw_status->idx[0].tx_consumer);
  6692. /* SW statistics block */
  6693. printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
  6694. ((u32 *)tp->hw_stats)[0],
  6695. ((u32 *)tp->hw_stats)[1],
  6696. ((u32 *)tp->hw_stats)[2],
  6697. ((u32 *)tp->hw_stats)[3]);
  6698. /* Mailboxes */
  6699. printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
  6700. tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
  6701. tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
  6702. tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
  6703. tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
  6704. /* NIC side send descriptors. */
  6705. for (i = 0; i < 6; i++) {
  6706. unsigned long txd;
  6707. txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
  6708. + (i * sizeof(struct tg3_tx_buffer_desc));
  6709. printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
  6710. i,
  6711. readl(txd + 0x0), readl(txd + 0x4),
  6712. readl(txd + 0x8), readl(txd + 0xc));
  6713. }
  6714. /* NIC side RX descriptors. */
  6715. for (i = 0; i < 6; i++) {
  6716. unsigned long rxd;
  6717. rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
  6718. + (i * sizeof(struct tg3_rx_buffer_desc));
  6719. printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
  6720. i,
  6721. readl(rxd + 0x0), readl(rxd + 0x4),
  6722. readl(rxd + 0x8), readl(rxd + 0xc));
  6723. rxd += (4 * sizeof(u32));
  6724. printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
  6725. i,
  6726. readl(rxd + 0x0), readl(rxd + 0x4),
  6727. readl(rxd + 0x8), readl(rxd + 0xc));
  6728. }
  6729. for (i = 0; i < 6; i++) {
  6730. unsigned long rxd;
  6731. rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
  6732. + (i * sizeof(struct tg3_rx_buffer_desc));
  6733. printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
  6734. i,
  6735. readl(rxd + 0x0), readl(rxd + 0x4),
  6736. readl(rxd + 0x8), readl(rxd + 0xc));
  6737. rxd += (4 * sizeof(u32));
  6738. printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
  6739. i,
  6740. readl(rxd + 0x0), readl(rxd + 0x4),
  6741. readl(rxd + 0x8), readl(rxd + 0xc));
  6742. }
  6743. }
  6744. #endif
  6745. static struct net_device_stats *tg3_get_stats(struct net_device *);
  6746. static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
  6747. static int tg3_close(struct net_device *dev)
  6748. {
  6749. struct tg3 *tp = netdev_priv(dev);
  6750. napi_disable(&tp->napi);
  6751. cancel_work_sync(&tp->reset_task);
  6752. netif_stop_queue(dev);
  6753. del_timer_sync(&tp->timer);
  6754. tg3_full_lock(tp, 1);
  6755. #if 0
  6756. tg3_dump_state(tp);
  6757. #endif
  6758. tg3_disable_ints(tp);
  6759. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  6760. tg3_free_rings(tp);
  6761. tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
  6762. tg3_full_unlock(tp);
  6763. free_irq(tp->pdev->irq, dev);
  6764. if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
  6765. pci_disable_msi(tp->pdev);
  6766. tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
  6767. }
  6768. memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
  6769. sizeof(tp->net_stats_prev));
  6770. memcpy(&tp->estats_prev, tg3_get_estats(tp),
  6771. sizeof(tp->estats_prev));
  6772. tg3_free_consistent(tp);
  6773. tg3_set_power_state(tp, PCI_D3hot);
  6774. netif_carrier_off(tp->dev);
  6775. return 0;
  6776. }
  6777. static inline unsigned long get_stat64(tg3_stat64_t *val)
  6778. {
  6779. unsigned long ret;
  6780. #if (BITS_PER_LONG == 32)
  6781. ret = val->low;
  6782. #else
  6783. ret = ((u64)val->high << 32) | ((u64)val->low);
  6784. #endif
  6785. return ret;
  6786. }
  6787. static inline u64 get_estat64(tg3_stat64_t *val)
  6788. {
  6789. return ((u64)val->high << 32) | ((u64)val->low);
  6790. }
  6791. static unsigned long calc_crc_errors(struct tg3 *tp)
  6792. {
  6793. struct tg3_hw_stats *hw_stats = tp->hw_stats;
  6794. if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
  6795. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  6796. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
  6797. u32 val;
  6798. spin_lock_bh(&tp->lock);
  6799. if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
  6800. tg3_writephy(tp, MII_TG3_TEST1,
  6801. val | MII_TG3_TEST1_CRC_EN);
  6802. tg3_readphy(tp, 0x14, &val);
  6803. } else
  6804. val = 0;
  6805. spin_unlock_bh(&tp->lock);
  6806. tp->phy_crc_errors += val;
  6807. return tp->phy_crc_errors;
  6808. }
  6809. return get_stat64(&hw_stats->rx_fcs_errors);
  6810. }
  6811. #define ESTAT_ADD(member) \
  6812. estats->member = old_estats->member + \
  6813. get_estat64(&hw_stats->member)
  6814. static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
  6815. {
  6816. struct tg3_ethtool_stats *estats = &tp->estats;
  6817. struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
  6818. struct tg3_hw_stats *hw_stats = tp->hw_stats;
  6819. if (!hw_stats)
  6820. return old_estats;
  6821. ESTAT_ADD(rx_octets);
  6822. ESTAT_ADD(rx_fragments);
  6823. ESTAT_ADD(rx_ucast_packets);
  6824. ESTAT_ADD(rx_mcast_packets);
  6825. ESTAT_ADD(rx_bcast_packets);
  6826. ESTAT_ADD(rx_fcs_errors);
  6827. ESTAT_ADD(rx_align_errors);
  6828. ESTAT_ADD(rx_xon_pause_rcvd);
  6829. ESTAT_ADD(rx_xoff_pause_rcvd);
  6830. ESTAT_ADD(rx_mac_ctrl_rcvd);
  6831. ESTAT_ADD(rx_xoff_entered);
  6832. ESTAT_ADD(rx_frame_too_long_errors);
  6833. ESTAT_ADD(rx_jabbers);
  6834. ESTAT_ADD(rx_undersize_packets);
  6835. ESTAT_ADD(rx_in_length_errors);
  6836. ESTAT_ADD(rx_out_length_errors);
  6837. ESTAT_ADD(rx_64_or_less_octet_packets);
  6838. ESTAT_ADD(rx_65_to_127_octet_packets);
  6839. ESTAT_ADD(rx_128_to_255_octet_packets);
  6840. ESTAT_ADD(rx_256_to_511_octet_packets);
  6841. ESTAT_ADD(rx_512_to_1023_octet_packets);
  6842. ESTAT_ADD(rx_1024_to_1522_octet_packets);
  6843. ESTAT_ADD(rx_1523_to_2047_octet_packets);
  6844. ESTAT_ADD(rx_2048_to_4095_octet_packets);
  6845. ESTAT_ADD(rx_4096_to_8191_octet_packets);
  6846. ESTAT_ADD(rx_8192_to_9022_octet_packets);
  6847. ESTAT_ADD(tx_octets);
  6848. ESTAT_ADD(tx_collisions);
  6849. ESTAT_ADD(tx_xon_sent);
  6850. ESTAT_ADD(tx_xoff_sent);
  6851. ESTAT_ADD(tx_flow_control);
  6852. ESTAT_ADD(tx_mac_errors);
  6853. ESTAT_ADD(tx_single_collisions);
  6854. ESTAT_ADD(tx_mult_collisions);
  6855. ESTAT_ADD(tx_deferred);
  6856. ESTAT_ADD(tx_excessive_collisions);
  6857. ESTAT_ADD(tx_late_collisions);
  6858. ESTAT_ADD(tx_collide_2times);
  6859. ESTAT_ADD(tx_collide_3times);
  6860. ESTAT_ADD(tx_collide_4times);
  6861. ESTAT_ADD(tx_collide_5times);
  6862. ESTAT_ADD(tx_collide_6times);
  6863. ESTAT_ADD(tx_collide_7times);
  6864. ESTAT_ADD(tx_collide_8times);
  6865. ESTAT_ADD(tx_collide_9times);
  6866. ESTAT_ADD(tx_collide_10times);
  6867. ESTAT_ADD(tx_collide_11times);
  6868. ESTAT_ADD(tx_collide_12times);
  6869. ESTAT_ADD(tx_collide_13times);
  6870. ESTAT_ADD(tx_collide_14times);
  6871. ESTAT_ADD(tx_collide_15times);
  6872. ESTAT_ADD(tx_ucast_packets);
  6873. ESTAT_ADD(tx_mcast_packets);
  6874. ESTAT_ADD(tx_bcast_packets);
  6875. ESTAT_ADD(tx_carrier_sense_errors);
  6876. ESTAT_ADD(tx_discards);
  6877. ESTAT_ADD(tx_errors);
  6878. ESTAT_ADD(dma_writeq_full);
  6879. ESTAT_ADD(dma_write_prioq_full);
  6880. ESTAT_ADD(rxbds_empty);
  6881. ESTAT_ADD(rx_discards);
  6882. ESTAT_ADD(rx_errors);
  6883. ESTAT_ADD(rx_threshold_hit);
  6884. ESTAT_ADD(dma_readq_full);
  6885. ESTAT_ADD(dma_read_prioq_full);
  6886. ESTAT_ADD(tx_comp_queue_full);
  6887. ESTAT_ADD(ring_set_send_prod_index);
  6888. ESTAT_ADD(ring_status_update);
  6889. ESTAT_ADD(nic_irqs);
  6890. ESTAT_ADD(nic_avoided_irqs);
  6891. ESTAT_ADD(nic_tx_threshold_hit);
  6892. return estats;
  6893. }
  6894. static struct net_device_stats *tg3_get_stats(struct net_device *dev)
  6895. {
  6896. struct tg3 *tp = netdev_priv(dev);
  6897. struct net_device_stats *stats = &tp->net_stats;
  6898. struct net_device_stats *old_stats = &tp->net_stats_prev;
  6899. struct tg3_hw_stats *hw_stats = tp->hw_stats;
  6900. if (!hw_stats)
  6901. return old_stats;
  6902. stats->rx_packets = old_stats->rx_packets +
  6903. get_stat64(&hw_stats->rx_ucast_packets) +
  6904. get_stat64(&hw_stats->rx_mcast_packets) +
  6905. get_stat64(&hw_stats->rx_bcast_packets);
  6906. stats->tx_packets = old_stats->tx_packets +
  6907. get_stat64(&hw_stats->tx_ucast_packets) +
  6908. get_stat64(&hw_stats->tx_mcast_packets) +
  6909. get_stat64(&hw_stats->tx_bcast_packets);
  6910. stats->rx_bytes = old_stats->rx_bytes +
  6911. get_stat64(&hw_stats->rx_octets);
  6912. stats->tx_bytes = old_stats->tx_bytes +
  6913. get_stat64(&hw_stats->tx_octets);
  6914. stats->rx_errors = old_stats->rx_errors +
  6915. get_stat64(&hw_stats->rx_errors);
  6916. stats->tx_errors = old_stats->tx_errors +
  6917. get_stat64(&hw_stats->tx_errors) +
  6918. get_stat64(&hw_stats->tx_mac_errors) +
  6919. get_stat64(&hw_stats->tx_carrier_sense_errors) +
  6920. get_stat64(&hw_stats->tx_discards);
  6921. stats->multicast = old_stats->multicast +
  6922. get_stat64(&hw_stats->rx_mcast_packets);
  6923. stats->collisions = old_stats->collisions +
  6924. get_stat64(&hw_stats->tx_collisions);
  6925. stats->rx_length_errors = old_stats->rx_length_errors +
  6926. get_stat64(&hw_stats->rx_frame_too_long_errors) +
  6927. get_stat64(&hw_stats->rx_undersize_packets);
  6928. stats->rx_over_errors = old_stats->rx_over_errors +
  6929. get_stat64(&hw_stats->rxbds_empty);
  6930. stats->rx_frame_errors = old_stats->rx_frame_errors +
  6931. get_stat64(&hw_stats->rx_align_errors);
  6932. stats->tx_aborted_errors = old_stats->tx_aborted_errors +
  6933. get_stat64(&hw_stats->tx_discards);
  6934. stats->tx_carrier_errors = old_stats->tx_carrier_errors +
  6935. get_stat64(&hw_stats->tx_carrier_sense_errors);
  6936. stats->rx_crc_errors = old_stats->rx_crc_errors +
  6937. calc_crc_errors(tp);
  6938. stats->rx_missed_errors = old_stats->rx_missed_errors +
  6939. get_stat64(&hw_stats->rx_discards);
  6940. return stats;
  6941. }
  6942. static inline u32 calc_crc(unsigned char *buf, int len)
  6943. {
  6944. u32 reg;
  6945. u32 tmp;
  6946. int j, k;
  6947. reg = 0xffffffff;
  6948. for (j = 0; j < len; j++) {
  6949. reg ^= buf[j];
  6950. for (k = 0; k < 8; k++) {
  6951. tmp = reg & 0x01;
  6952. reg >>= 1;
  6953. if (tmp) {
  6954. reg ^= 0xedb88320;
  6955. }
  6956. }
  6957. }
  6958. return ~reg;
  6959. }
  6960. static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
  6961. {
  6962. /* accept or reject all multicast frames */
  6963. tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
  6964. tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
  6965. tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
  6966. tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
  6967. }
  6968. static void __tg3_set_rx_mode(struct net_device *dev)
  6969. {
  6970. struct tg3 *tp = netdev_priv(dev);
  6971. u32 rx_mode;
  6972. rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
  6973. RX_MODE_KEEP_VLAN_TAG);
  6974. /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
  6975. * flag clear.
  6976. */
  6977. #if TG3_VLAN_TAG_USED
  6978. if (!tp->vlgrp &&
  6979. !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
  6980. rx_mode |= RX_MODE_KEEP_VLAN_TAG;
  6981. #else
  6982. /* By definition, VLAN is disabled always in this
  6983. * case.
  6984. */
  6985. if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
  6986. rx_mode |= RX_MODE_KEEP_VLAN_TAG;
  6987. #endif
  6988. if (dev->flags & IFF_PROMISC) {
  6989. /* Promiscuous mode. */
  6990. rx_mode |= RX_MODE_PROMISC;
  6991. } else if (dev->flags & IFF_ALLMULTI) {
  6992. /* Accept all multicast. */
  6993. tg3_set_multi (tp, 1);
  6994. } else if (dev->mc_count < 1) {
  6995. /* Reject all multicast. */
  6996. tg3_set_multi (tp, 0);
  6997. } else {
  6998. /* Accept one or more multicast(s). */
  6999. struct dev_mc_list *mclist;
  7000. unsigned int i;
  7001. u32 mc_filter[4] = { 0, };
  7002. u32 regidx;
  7003. u32 bit;
  7004. u32 crc;
  7005. for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
  7006. i++, mclist = mclist->next) {
  7007. crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
  7008. bit = ~crc & 0x7f;
  7009. regidx = (bit & 0x60) >> 5;
  7010. bit &= 0x1f;
  7011. mc_filter[regidx] |= (1 << bit);
  7012. }
  7013. tw32(MAC_HASH_REG_0, mc_filter[0]);
  7014. tw32(MAC_HASH_REG_1, mc_filter[1]);
  7015. tw32(MAC_HASH_REG_2, mc_filter[2]);
  7016. tw32(MAC_HASH_REG_3, mc_filter[3]);
  7017. }
  7018. if (rx_mode != tp->rx_mode) {
  7019. tp->rx_mode = rx_mode;
  7020. tw32_f(MAC_RX_MODE, rx_mode);
  7021. udelay(10);
  7022. }
  7023. }
  7024. static void tg3_set_rx_mode(struct net_device *dev)
  7025. {
  7026. struct tg3 *tp = netdev_priv(dev);
  7027. if (!netif_running(dev))
  7028. return;
  7029. tg3_full_lock(tp, 0);
  7030. __tg3_set_rx_mode(dev);
  7031. tg3_full_unlock(tp);
  7032. }
  7033. #define TG3_REGDUMP_LEN (32 * 1024)
  7034. static int tg3_get_regs_len(struct net_device *dev)
  7035. {
  7036. return TG3_REGDUMP_LEN;
  7037. }
  7038. static void tg3_get_regs(struct net_device *dev,
  7039. struct ethtool_regs *regs, void *_p)
  7040. {
  7041. u32 *p = _p;
  7042. struct tg3 *tp = netdev_priv(dev);
  7043. u8 *orig_p = _p;
  7044. int i;
  7045. regs->version = 0;
  7046. memset(p, 0, TG3_REGDUMP_LEN);
  7047. if (tp->link_config.phy_is_low_power)
  7048. return;
  7049. tg3_full_lock(tp, 0);
  7050. #define __GET_REG32(reg) (*(p)++ = tr32(reg))
  7051. #define GET_REG32_LOOP(base,len) \
  7052. do { p = (u32 *)(orig_p + (base)); \
  7053. for (i = 0; i < len; i += 4) \
  7054. __GET_REG32((base) + i); \
  7055. } while (0)
  7056. #define GET_REG32_1(reg) \
  7057. do { p = (u32 *)(orig_p + (reg)); \
  7058. __GET_REG32((reg)); \
  7059. } while (0)
  7060. GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
  7061. GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
  7062. GET_REG32_LOOP(MAC_MODE, 0x4f0);
  7063. GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
  7064. GET_REG32_1(SNDDATAC_MODE);
  7065. GET_REG32_LOOP(SNDBDS_MODE, 0x80);
  7066. GET_REG32_LOOP(SNDBDI_MODE, 0x48);
  7067. GET_REG32_1(SNDBDC_MODE);
  7068. GET_REG32_LOOP(RCVLPC_MODE, 0x20);
  7069. GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
  7070. GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
  7071. GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
  7072. GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
  7073. GET_REG32_1(RCVDCC_MODE);
  7074. GET_REG32_LOOP(RCVBDI_MODE, 0x20);
  7075. GET_REG32_LOOP(RCVCC_MODE, 0x14);
  7076. GET_REG32_LOOP(RCVLSC_MODE, 0x08);
  7077. GET_REG32_1(MBFREE_MODE);
  7078. GET_REG32_LOOP(HOSTCC_MODE, 0x100);
  7079. GET_REG32_LOOP(MEMARB_MODE, 0x10);
  7080. GET_REG32_LOOP(BUFMGR_MODE, 0x58);
  7081. GET_REG32_LOOP(RDMAC_MODE, 0x08);
  7082. GET_REG32_LOOP(WDMAC_MODE, 0x08);
  7083. GET_REG32_1(RX_CPU_MODE);
  7084. GET_REG32_1(RX_CPU_STATE);
  7085. GET_REG32_1(RX_CPU_PGMCTR);
  7086. GET_REG32_1(RX_CPU_HWBKPT);
  7087. GET_REG32_1(TX_CPU_MODE);
  7088. GET_REG32_1(TX_CPU_STATE);
  7089. GET_REG32_1(TX_CPU_PGMCTR);
  7090. GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
  7091. GET_REG32_LOOP(FTQ_RESET, 0x120);
  7092. GET_REG32_LOOP(MSGINT_MODE, 0x0c);
  7093. GET_REG32_1(DMAC_MODE);
  7094. GET_REG32_LOOP(GRC_MODE, 0x4c);
  7095. if (tp->tg3_flags & TG3_FLAG_NVRAM)
  7096. GET_REG32_LOOP(NVRAM_CMD, 0x24);
  7097. #undef __GET_REG32
  7098. #undef GET_REG32_LOOP
  7099. #undef GET_REG32_1
  7100. tg3_full_unlock(tp);
  7101. }
  7102. static int tg3_get_eeprom_len(struct net_device *dev)
  7103. {
  7104. struct tg3 *tp = netdev_priv(dev);
  7105. return tp->nvram_size;
  7106. }
  7107. static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
  7108. {
  7109. struct tg3 *tp = netdev_priv(dev);
  7110. int ret;
  7111. u8 *pd;
  7112. u32 i, offset, len, b_offset, b_count;
  7113. __le32 val;
  7114. if (tp->link_config.phy_is_low_power)
  7115. return -EAGAIN;
  7116. offset = eeprom->offset;
  7117. len = eeprom->len;
  7118. eeprom->len = 0;
  7119. eeprom->magic = TG3_EEPROM_MAGIC;
  7120. if (offset & 3) {
  7121. /* adjustments to start on required 4 byte boundary */
  7122. b_offset = offset & 3;
  7123. b_count = 4 - b_offset;
  7124. if (b_count > len) {
  7125. /* i.e. offset=1 len=2 */
  7126. b_count = len;
  7127. }
  7128. ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
  7129. if (ret)
  7130. return ret;
  7131. memcpy(data, ((char*)&val) + b_offset, b_count);
  7132. len -= b_count;
  7133. offset += b_count;
  7134. eeprom->len += b_count;
  7135. }
  7136. /* read bytes upto the last 4 byte boundary */
  7137. pd = &data[eeprom->len];
  7138. for (i = 0; i < (len - (len & 3)); i += 4) {
  7139. ret = tg3_nvram_read_le(tp, offset + i, &val);
  7140. if (ret) {
  7141. eeprom->len += i;
  7142. return ret;
  7143. }
  7144. memcpy(pd + i, &val, 4);
  7145. }
  7146. eeprom->len += i;
  7147. if (len & 3) {
  7148. /* read last bytes not ending on 4 byte boundary */
  7149. pd = &data[eeprom->len];
  7150. b_count = len & 3;
  7151. b_offset = offset + len - b_count;
  7152. ret = tg3_nvram_read_le(tp, b_offset, &val);
  7153. if (ret)
  7154. return ret;
  7155. memcpy(pd, &val, b_count);
  7156. eeprom->len += b_count;
  7157. }
  7158. return 0;
  7159. }
  7160. static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
  7161. static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
  7162. {
  7163. struct tg3 *tp = netdev_priv(dev);
  7164. int ret;
  7165. u32 offset, len, b_offset, odd_len;
  7166. u8 *buf;
  7167. __le32 start, end;
  7168. if (tp->link_config.phy_is_low_power)
  7169. return -EAGAIN;
  7170. if (eeprom->magic != TG3_EEPROM_MAGIC)
  7171. return -EINVAL;
  7172. offset = eeprom->offset;
  7173. len = eeprom->len;
  7174. if ((b_offset = (offset & 3))) {
  7175. /* adjustments to start on required 4 byte boundary */
  7176. ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
  7177. if (ret)
  7178. return ret;
  7179. len += b_offset;
  7180. offset &= ~3;
  7181. if (len < 4)
  7182. len = 4;
  7183. }
  7184. odd_len = 0;
  7185. if (len & 3) {
  7186. /* adjustments to end on required 4 byte boundary */
  7187. odd_len = 1;
  7188. len = (len + 3) & ~3;
  7189. ret = tg3_nvram_read_le(tp, offset+len-4, &end);
  7190. if (ret)
  7191. return ret;
  7192. }
  7193. buf = data;
  7194. if (b_offset || odd_len) {
  7195. buf = kmalloc(len, GFP_KERNEL);
  7196. if (!buf)
  7197. return -ENOMEM;
  7198. if (b_offset)
  7199. memcpy(buf, &start, 4);
  7200. if (odd_len)
  7201. memcpy(buf+len-4, &end, 4);
  7202. memcpy(buf + b_offset, data, eeprom->len);
  7203. }
  7204. ret = tg3_nvram_write_block(tp, offset, len, buf);
  7205. if (buf != data)
  7206. kfree(buf);
  7207. return ret;
  7208. }
  7209. static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  7210. {
  7211. struct tg3 *tp = netdev_priv(dev);
  7212. if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
  7213. if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
  7214. return -EAGAIN;
  7215. return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
  7216. }
  7217. cmd->supported = (SUPPORTED_Autoneg);
  7218. if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
  7219. cmd->supported |= (SUPPORTED_1000baseT_Half |
  7220. SUPPORTED_1000baseT_Full);
  7221. if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
  7222. cmd->supported |= (SUPPORTED_100baseT_Half |
  7223. SUPPORTED_100baseT_Full |
  7224. SUPPORTED_10baseT_Half |
  7225. SUPPORTED_10baseT_Full |
  7226. SUPPORTED_TP);
  7227. cmd->port = PORT_TP;
  7228. } else {
  7229. cmd->supported |= SUPPORTED_FIBRE;
  7230. cmd->port = PORT_FIBRE;
  7231. }
  7232. cmd->advertising = tp->link_config.advertising;
  7233. if (netif_running(dev)) {
  7234. cmd->speed = tp->link_config.active_speed;
  7235. cmd->duplex = tp->link_config.active_duplex;
  7236. }
  7237. cmd->phy_address = PHY_ADDR;
  7238. cmd->transceiver = XCVR_INTERNAL;
  7239. cmd->autoneg = tp->link_config.autoneg;
  7240. cmd->maxtxpkt = 0;
  7241. cmd->maxrxpkt = 0;
  7242. return 0;
  7243. }
  7244. static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  7245. {
  7246. struct tg3 *tp = netdev_priv(dev);
  7247. if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
  7248. if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
  7249. return -EAGAIN;
  7250. return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
  7251. }
  7252. if (cmd->autoneg != AUTONEG_ENABLE &&
  7253. cmd->autoneg != AUTONEG_DISABLE)
  7254. return -EINVAL;
  7255. if (cmd->autoneg == AUTONEG_DISABLE &&
  7256. cmd->duplex != DUPLEX_FULL &&
  7257. cmd->duplex != DUPLEX_HALF)
  7258. return -EINVAL;
  7259. if (cmd->autoneg == AUTONEG_ENABLE) {
  7260. u32 mask = ADVERTISED_Autoneg |
  7261. ADVERTISED_Pause |
  7262. ADVERTISED_Asym_Pause;
  7263. if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
  7264. mask |= ADVERTISED_1000baseT_Half |
  7265. ADVERTISED_1000baseT_Full;
  7266. if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
  7267. mask |= ADVERTISED_100baseT_Half |
  7268. ADVERTISED_100baseT_Full |
  7269. ADVERTISED_10baseT_Half |
  7270. ADVERTISED_10baseT_Full |
  7271. ADVERTISED_TP;
  7272. else
  7273. mask |= ADVERTISED_FIBRE;
  7274. if (cmd->advertising & ~mask)
  7275. return -EINVAL;
  7276. mask &= (ADVERTISED_1000baseT_Half |
  7277. ADVERTISED_1000baseT_Full |
  7278. ADVERTISED_100baseT_Half |
  7279. ADVERTISED_100baseT_Full |
  7280. ADVERTISED_10baseT_Half |
  7281. ADVERTISED_10baseT_Full);
  7282. cmd->advertising &= mask;
  7283. } else {
  7284. if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
  7285. if (cmd->speed != SPEED_1000)
  7286. return -EINVAL;
  7287. if (cmd->duplex != DUPLEX_FULL)
  7288. return -EINVAL;
  7289. } else {
  7290. if (cmd->speed != SPEED_100 &&
  7291. cmd->speed != SPEED_10)
  7292. return -EINVAL;
  7293. }
  7294. }
  7295. tg3_full_lock(tp, 0);
  7296. tp->link_config.autoneg = cmd->autoneg;
  7297. if (cmd->autoneg == AUTONEG_ENABLE) {
  7298. tp->link_config.advertising = (cmd->advertising |
  7299. ADVERTISED_Autoneg);
  7300. tp->link_config.speed = SPEED_INVALID;
  7301. tp->link_config.duplex = DUPLEX_INVALID;
  7302. } else {
  7303. tp->link_config.advertising = 0;
  7304. tp->link_config.speed = cmd->speed;
  7305. tp->link_config.duplex = cmd->duplex;
  7306. }
  7307. tp->link_config.orig_speed = tp->link_config.speed;
  7308. tp->link_config.orig_duplex = tp->link_config.duplex;
  7309. tp->link_config.orig_autoneg = tp->link_config.autoneg;
  7310. if (netif_running(dev))
  7311. tg3_setup_phy(tp, 1);
  7312. tg3_full_unlock(tp);
  7313. return 0;
  7314. }
  7315. static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  7316. {
  7317. struct tg3 *tp = netdev_priv(dev);
  7318. strcpy(info->driver, DRV_MODULE_NAME);
  7319. strcpy(info->version, DRV_MODULE_VERSION);
  7320. strcpy(info->fw_version, tp->fw_ver);
  7321. strcpy(info->bus_info, pci_name(tp->pdev));
  7322. }
  7323. static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  7324. {
  7325. struct tg3 *tp = netdev_priv(dev);
  7326. if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
  7327. device_can_wakeup(&tp->pdev->dev))
  7328. wol->supported = WAKE_MAGIC;
  7329. else
  7330. wol->supported = 0;
  7331. wol->wolopts = 0;
  7332. if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
  7333. device_can_wakeup(&tp->pdev->dev))
  7334. wol->wolopts = WAKE_MAGIC;
  7335. memset(&wol->sopass, 0, sizeof(wol->sopass));
  7336. }
  7337. static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  7338. {
  7339. struct tg3 *tp = netdev_priv(dev);
  7340. struct device *dp = &tp->pdev->dev;
  7341. if (wol->wolopts & ~WAKE_MAGIC)
  7342. return -EINVAL;
  7343. if ((wol->wolopts & WAKE_MAGIC) &&
  7344. !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
  7345. return -EINVAL;
  7346. spin_lock_bh(&tp->lock);
  7347. if (wol->wolopts & WAKE_MAGIC) {
  7348. tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
  7349. device_set_wakeup_enable(dp, true);
  7350. } else {
  7351. tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
  7352. device_set_wakeup_enable(dp, false);
  7353. }
  7354. spin_unlock_bh(&tp->lock);
  7355. return 0;
  7356. }
  7357. static u32 tg3_get_msglevel(struct net_device *dev)
  7358. {
  7359. struct tg3 *tp = netdev_priv(dev);
  7360. return tp->msg_enable;
  7361. }
  7362. static void tg3_set_msglevel(struct net_device *dev, u32 value)
  7363. {
  7364. struct tg3 *tp = netdev_priv(dev);
  7365. tp->msg_enable = value;
  7366. }
  7367. static int tg3_set_tso(struct net_device *dev, u32 value)
  7368. {
  7369. struct tg3 *tp = netdev_priv(dev);
  7370. if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
  7371. if (value)
  7372. return -EINVAL;
  7373. return 0;
  7374. }
  7375. if ((dev->features & NETIF_F_IPV6_CSUM) &&
  7376. (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) {
  7377. if (value) {
  7378. dev->features |= NETIF_F_TSO6;
  7379. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  7380. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
  7381. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
  7382. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  7383. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
  7384. dev->features |= NETIF_F_TSO_ECN;
  7385. } else
  7386. dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
  7387. }
  7388. return ethtool_op_set_tso(dev, value);
  7389. }
  7390. static int tg3_nway_reset(struct net_device *dev)
  7391. {
  7392. struct tg3 *tp = netdev_priv(dev);
  7393. int r;
  7394. if (!netif_running(dev))
  7395. return -EAGAIN;
  7396. if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
  7397. return -EINVAL;
  7398. if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
  7399. if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
  7400. return -EAGAIN;
  7401. r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
  7402. } else {
  7403. u32 bmcr;
  7404. spin_lock_bh(&tp->lock);
  7405. r = -EINVAL;
  7406. tg3_readphy(tp, MII_BMCR, &bmcr);
  7407. if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
  7408. ((bmcr & BMCR_ANENABLE) ||
  7409. (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
  7410. tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
  7411. BMCR_ANENABLE);
  7412. r = 0;
  7413. }
  7414. spin_unlock_bh(&tp->lock);
  7415. }
  7416. return r;
  7417. }
  7418. static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
  7419. {
  7420. struct tg3 *tp = netdev_priv(dev);
  7421. ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
  7422. ering->rx_mini_max_pending = 0;
  7423. if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
  7424. ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
  7425. else
  7426. ering->rx_jumbo_max_pending = 0;
  7427. ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
  7428. ering->rx_pending = tp->rx_pending;
  7429. ering->rx_mini_pending = 0;
  7430. if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
  7431. ering->rx_jumbo_pending = tp->rx_jumbo_pending;
  7432. else
  7433. ering->rx_jumbo_pending = 0;
  7434. ering->tx_pending = tp->tx_pending;
  7435. }
  7436. static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
  7437. {
  7438. struct tg3 *tp = netdev_priv(dev);
  7439. int irq_sync = 0, err = 0;
  7440. if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
  7441. (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
  7442. (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
  7443. (ering->tx_pending <= MAX_SKB_FRAGS) ||
  7444. ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
  7445. (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
  7446. return -EINVAL;
  7447. if (netif_running(dev)) {
  7448. tg3_phy_stop(tp);
  7449. tg3_netif_stop(tp);
  7450. irq_sync = 1;
  7451. }
  7452. tg3_full_lock(tp, irq_sync);
  7453. tp->rx_pending = ering->rx_pending;
  7454. if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
  7455. tp->rx_pending > 63)
  7456. tp->rx_pending = 63;
  7457. tp->rx_jumbo_pending = ering->rx_jumbo_pending;
  7458. tp->tx_pending = ering->tx_pending;
  7459. if (netif_running(dev)) {
  7460. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  7461. err = tg3_restart_hw(tp, 1);
  7462. if (!err)
  7463. tg3_netif_start(tp);
  7464. }
  7465. tg3_full_unlock(tp);
  7466. if (irq_sync && !err)
  7467. tg3_phy_start(tp);
  7468. return err;
  7469. }
  7470. static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
  7471. {
  7472. struct tg3 *tp = netdev_priv(dev);
  7473. epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
  7474. if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
  7475. epause->rx_pause = 1;
  7476. else
  7477. epause->rx_pause = 0;
  7478. if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
  7479. epause->tx_pause = 1;
  7480. else
  7481. epause->tx_pause = 0;
  7482. }
  7483. static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
  7484. {
  7485. struct tg3 *tp = netdev_priv(dev);
  7486. int err = 0;
  7487. if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
  7488. if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
  7489. return -EAGAIN;
  7490. if (epause->autoneg) {
  7491. u32 newadv;
  7492. struct phy_device *phydev;
  7493. phydev = tp->mdio_bus->phy_map[PHY_ADDR];
  7494. if (epause->rx_pause) {
  7495. if (epause->tx_pause)
  7496. newadv = ADVERTISED_Pause;
  7497. else
  7498. newadv = ADVERTISED_Pause |
  7499. ADVERTISED_Asym_Pause;
  7500. } else if (epause->tx_pause) {
  7501. newadv = ADVERTISED_Asym_Pause;
  7502. } else
  7503. newadv = 0;
  7504. if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
  7505. u32 oldadv = phydev->advertising &
  7506. (ADVERTISED_Pause |
  7507. ADVERTISED_Asym_Pause);
  7508. if (oldadv != newadv) {
  7509. phydev->advertising &=
  7510. ~(ADVERTISED_Pause |
  7511. ADVERTISED_Asym_Pause);
  7512. phydev->advertising |= newadv;
  7513. err = phy_start_aneg(phydev);
  7514. }
  7515. } else {
  7516. tp->link_config.advertising &=
  7517. ~(ADVERTISED_Pause |
  7518. ADVERTISED_Asym_Pause);
  7519. tp->link_config.advertising |= newadv;
  7520. }
  7521. } else {
  7522. if (epause->rx_pause)
  7523. tp->link_config.flowctrl |= FLOW_CTRL_RX;
  7524. else
  7525. tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
  7526. if (epause->tx_pause)
  7527. tp->link_config.flowctrl |= FLOW_CTRL_TX;
  7528. else
  7529. tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
  7530. if (netif_running(dev))
  7531. tg3_setup_flow_control(tp, 0, 0);
  7532. }
  7533. } else {
  7534. int irq_sync = 0;
  7535. if (netif_running(dev)) {
  7536. tg3_netif_stop(tp);
  7537. irq_sync = 1;
  7538. }
  7539. tg3_full_lock(tp, irq_sync);
  7540. if (epause->autoneg)
  7541. tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
  7542. else
  7543. tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
  7544. if (epause->rx_pause)
  7545. tp->link_config.flowctrl |= FLOW_CTRL_RX;
  7546. else
  7547. tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
  7548. if (epause->tx_pause)
  7549. tp->link_config.flowctrl |= FLOW_CTRL_TX;
  7550. else
  7551. tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
  7552. if (netif_running(dev)) {
  7553. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  7554. err = tg3_restart_hw(tp, 1);
  7555. if (!err)
  7556. tg3_netif_start(tp);
  7557. }
  7558. tg3_full_unlock(tp);
  7559. }
  7560. return err;
  7561. }
  7562. static u32 tg3_get_rx_csum(struct net_device *dev)
  7563. {
  7564. struct tg3 *tp = netdev_priv(dev);
  7565. return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
  7566. }
  7567. static int tg3_set_rx_csum(struct net_device *dev, u32 data)
  7568. {
  7569. struct tg3 *tp = netdev_priv(dev);
  7570. if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
  7571. if (data != 0)
  7572. return -EINVAL;
  7573. return 0;
  7574. }
  7575. spin_lock_bh(&tp->lock);
  7576. if (data)
  7577. tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
  7578. else
  7579. tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
  7580. spin_unlock_bh(&tp->lock);
  7581. return 0;
  7582. }
  7583. static int tg3_set_tx_csum(struct net_device *dev, u32 data)
  7584. {
  7585. struct tg3 *tp = netdev_priv(dev);
  7586. if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
  7587. if (data != 0)
  7588. return -EINVAL;
  7589. return 0;
  7590. }
  7591. if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
  7592. ethtool_op_set_tx_ipv6_csum(dev, data);
  7593. else
  7594. ethtool_op_set_tx_csum(dev, data);
  7595. return 0;
  7596. }
  7597. static int tg3_get_sset_count (struct net_device *dev, int sset)
  7598. {
  7599. switch (sset) {
  7600. case ETH_SS_TEST:
  7601. return TG3_NUM_TEST;
  7602. case ETH_SS_STATS:
  7603. return TG3_NUM_STATS;
  7604. default:
  7605. return -EOPNOTSUPP;
  7606. }
  7607. }
  7608. static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
  7609. {
  7610. switch (stringset) {
  7611. case ETH_SS_STATS:
  7612. memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
  7613. break;
  7614. case ETH_SS_TEST:
  7615. memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
  7616. break;
  7617. default:
  7618. WARN_ON(1); /* we need a WARN() */
  7619. break;
  7620. }
  7621. }
  7622. static int tg3_phys_id(struct net_device *dev, u32 data)
  7623. {
  7624. struct tg3 *tp = netdev_priv(dev);
  7625. int i;
  7626. if (!netif_running(tp->dev))
  7627. return -EAGAIN;
  7628. if (data == 0)
  7629. data = UINT_MAX / 2;
  7630. for (i = 0; i < (data * 2); i++) {
  7631. if ((i % 2) == 0)
  7632. tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
  7633. LED_CTRL_1000MBPS_ON |
  7634. LED_CTRL_100MBPS_ON |
  7635. LED_CTRL_10MBPS_ON |
  7636. LED_CTRL_TRAFFIC_OVERRIDE |
  7637. LED_CTRL_TRAFFIC_BLINK |
  7638. LED_CTRL_TRAFFIC_LED);
  7639. else
  7640. tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
  7641. LED_CTRL_TRAFFIC_OVERRIDE);
  7642. if (msleep_interruptible(500))
  7643. break;
  7644. }
  7645. tw32(MAC_LED_CTRL, tp->led_ctrl);
  7646. return 0;
  7647. }
  7648. static void tg3_get_ethtool_stats (struct net_device *dev,
  7649. struct ethtool_stats *estats, u64 *tmp_stats)
  7650. {
  7651. struct tg3 *tp = netdev_priv(dev);
  7652. memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
  7653. }
  7654. #define NVRAM_TEST_SIZE 0x100
  7655. #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
  7656. #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
  7657. #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
  7658. #define NVRAM_SELFBOOT_HW_SIZE 0x20
  7659. #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
  7660. static int tg3_test_nvram(struct tg3 *tp)
  7661. {
  7662. u32 csum, magic;
  7663. __le32 *buf;
  7664. int i, j, k, err = 0, size;
  7665. if (tg3_nvram_read(tp, 0, &magic) != 0)
  7666. return -EIO;
  7667. if (magic == TG3_EEPROM_MAGIC)
  7668. size = NVRAM_TEST_SIZE;
  7669. else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
  7670. if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
  7671. TG3_EEPROM_SB_FORMAT_1) {
  7672. switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
  7673. case TG3_EEPROM_SB_REVISION_0:
  7674. size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
  7675. break;
  7676. case TG3_EEPROM_SB_REVISION_2:
  7677. size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
  7678. break;
  7679. case TG3_EEPROM_SB_REVISION_3:
  7680. size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
  7681. break;
  7682. default:
  7683. return 0;
  7684. }
  7685. } else
  7686. return 0;
  7687. } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
  7688. size = NVRAM_SELFBOOT_HW_SIZE;
  7689. else
  7690. return -EIO;
  7691. buf = kmalloc(size, GFP_KERNEL);
  7692. if (buf == NULL)
  7693. return -ENOMEM;
  7694. err = -EIO;
  7695. for (i = 0, j = 0; i < size; i += 4, j++) {
  7696. if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
  7697. break;
  7698. }
  7699. if (i < size)
  7700. goto out;
  7701. /* Selfboot format */
  7702. magic = swab32(le32_to_cpu(buf[0]));
  7703. if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
  7704. TG3_EEPROM_MAGIC_FW) {
  7705. u8 *buf8 = (u8 *) buf, csum8 = 0;
  7706. if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
  7707. TG3_EEPROM_SB_REVISION_2) {
  7708. /* For rev 2, the csum doesn't include the MBA. */
  7709. for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
  7710. csum8 += buf8[i];
  7711. for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
  7712. csum8 += buf8[i];
  7713. } else {
  7714. for (i = 0; i < size; i++)
  7715. csum8 += buf8[i];
  7716. }
  7717. if (csum8 == 0) {
  7718. err = 0;
  7719. goto out;
  7720. }
  7721. err = -EIO;
  7722. goto out;
  7723. }
  7724. if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
  7725. TG3_EEPROM_MAGIC_HW) {
  7726. u8 data[NVRAM_SELFBOOT_DATA_SIZE];
  7727. u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
  7728. u8 *buf8 = (u8 *) buf;
  7729. /* Separate the parity bits and the data bytes. */
  7730. for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
  7731. if ((i == 0) || (i == 8)) {
  7732. int l;
  7733. u8 msk;
  7734. for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
  7735. parity[k++] = buf8[i] & msk;
  7736. i++;
  7737. }
  7738. else if (i == 16) {
  7739. int l;
  7740. u8 msk;
  7741. for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
  7742. parity[k++] = buf8[i] & msk;
  7743. i++;
  7744. for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
  7745. parity[k++] = buf8[i] & msk;
  7746. i++;
  7747. }
  7748. data[j++] = buf8[i];
  7749. }
  7750. err = -EIO;
  7751. for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
  7752. u8 hw8 = hweight8(data[i]);
  7753. if ((hw8 & 0x1) && parity[i])
  7754. goto out;
  7755. else if (!(hw8 & 0x1) && !parity[i])
  7756. goto out;
  7757. }
  7758. err = 0;
  7759. goto out;
  7760. }
  7761. /* Bootstrap checksum at offset 0x10 */
  7762. csum = calc_crc((unsigned char *) buf, 0x10);
  7763. if(csum != le32_to_cpu(buf[0x10/4]))
  7764. goto out;
  7765. /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
  7766. csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
  7767. if (csum != le32_to_cpu(buf[0xfc/4]))
  7768. goto out;
  7769. err = 0;
  7770. out:
  7771. kfree(buf);
  7772. return err;
  7773. }
  7774. #define TG3_SERDES_TIMEOUT_SEC 2
  7775. #define TG3_COPPER_TIMEOUT_SEC 6
  7776. static int tg3_test_link(struct tg3 *tp)
  7777. {
  7778. int i, max;
  7779. if (!netif_running(tp->dev))
  7780. return -ENODEV;
  7781. if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
  7782. max = TG3_SERDES_TIMEOUT_SEC;
  7783. else
  7784. max = TG3_COPPER_TIMEOUT_SEC;
  7785. for (i = 0; i < max; i++) {
  7786. if (netif_carrier_ok(tp->dev))
  7787. return 0;
  7788. if (msleep_interruptible(1000))
  7789. break;
  7790. }
  7791. return -EIO;
  7792. }
  7793. /* Only test the commonly used registers */
  7794. static int tg3_test_registers(struct tg3 *tp)
  7795. {
  7796. int i, is_5705, is_5750;
  7797. u32 offset, read_mask, write_mask, val, save_val, read_val;
  7798. static struct {
  7799. u16 offset;
  7800. u16 flags;
  7801. #define TG3_FL_5705 0x1
  7802. #define TG3_FL_NOT_5705 0x2
  7803. #define TG3_FL_NOT_5788 0x4
  7804. #define TG3_FL_NOT_5750 0x8
  7805. u32 read_mask;
  7806. u32 write_mask;
  7807. } reg_tbl[] = {
  7808. /* MAC Control Registers */
  7809. { MAC_MODE, TG3_FL_NOT_5705,
  7810. 0x00000000, 0x00ef6f8c },
  7811. { MAC_MODE, TG3_FL_5705,
  7812. 0x00000000, 0x01ef6b8c },
  7813. { MAC_STATUS, TG3_FL_NOT_5705,
  7814. 0x03800107, 0x00000000 },
  7815. { MAC_STATUS, TG3_FL_5705,
  7816. 0x03800100, 0x00000000 },
  7817. { MAC_ADDR_0_HIGH, 0x0000,
  7818. 0x00000000, 0x0000ffff },
  7819. { MAC_ADDR_0_LOW, 0x0000,
  7820. 0x00000000, 0xffffffff },
  7821. { MAC_RX_MTU_SIZE, 0x0000,
  7822. 0x00000000, 0x0000ffff },
  7823. { MAC_TX_MODE, 0x0000,
  7824. 0x00000000, 0x00000070 },
  7825. { MAC_TX_LENGTHS, 0x0000,
  7826. 0x00000000, 0x00003fff },
  7827. { MAC_RX_MODE, TG3_FL_NOT_5705,
  7828. 0x00000000, 0x000007fc },
  7829. { MAC_RX_MODE, TG3_FL_5705,
  7830. 0x00000000, 0x000007dc },
  7831. { MAC_HASH_REG_0, 0x0000,
  7832. 0x00000000, 0xffffffff },
  7833. { MAC_HASH_REG_1, 0x0000,
  7834. 0x00000000, 0xffffffff },
  7835. { MAC_HASH_REG_2, 0x0000,
  7836. 0x00000000, 0xffffffff },
  7837. { MAC_HASH_REG_3, 0x0000,
  7838. 0x00000000, 0xffffffff },
  7839. /* Receive Data and Receive BD Initiator Control Registers. */
  7840. { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
  7841. 0x00000000, 0xffffffff },
  7842. { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
  7843. 0x00000000, 0xffffffff },
  7844. { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
  7845. 0x00000000, 0x00000003 },
  7846. { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
  7847. 0x00000000, 0xffffffff },
  7848. { RCVDBDI_STD_BD+0, 0x0000,
  7849. 0x00000000, 0xffffffff },
  7850. { RCVDBDI_STD_BD+4, 0x0000,
  7851. 0x00000000, 0xffffffff },
  7852. { RCVDBDI_STD_BD+8, 0x0000,
  7853. 0x00000000, 0xffff0002 },
  7854. { RCVDBDI_STD_BD+0xc, 0x0000,
  7855. 0x00000000, 0xffffffff },
  7856. /* Receive BD Initiator Control Registers. */
  7857. { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
  7858. 0x00000000, 0xffffffff },
  7859. { RCVBDI_STD_THRESH, TG3_FL_5705,
  7860. 0x00000000, 0x000003ff },
  7861. { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
  7862. 0x00000000, 0xffffffff },
  7863. /* Host Coalescing Control Registers. */
  7864. { HOSTCC_MODE, TG3_FL_NOT_5705,
  7865. 0x00000000, 0x00000004 },
  7866. { HOSTCC_MODE, TG3_FL_5705,
  7867. 0x00000000, 0x000000f6 },
  7868. { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
  7869. 0x00000000, 0xffffffff },
  7870. { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
  7871. 0x00000000, 0x000003ff },
  7872. { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
  7873. 0x00000000, 0xffffffff },
  7874. { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
  7875. 0x00000000, 0x000003ff },
  7876. { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
  7877. 0x00000000, 0xffffffff },
  7878. { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
  7879. 0x00000000, 0x000000ff },
  7880. { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
  7881. 0x00000000, 0xffffffff },
  7882. { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
  7883. 0x00000000, 0x000000ff },
  7884. { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
  7885. 0x00000000, 0xffffffff },
  7886. { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
  7887. 0x00000000, 0xffffffff },
  7888. { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
  7889. 0x00000000, 0xffffffff },
  7890. { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
  7891. 0x00000000, 0x000000ff },
  7892. { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
  7893. 0x00000000, 0xffffffff },
  7894. { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
  7895. 0x00000000, 0x000000ff },
  7896. { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
  7897. 0x00000000, 0xffffffff },
  7898. { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
  7899. 0x00000000, 0xffffffff },
  7900. { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
  7901. 0x00000000, 0xffffffff },
  7902. { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
  7903. 0x00000000, 0xffffffff },
  7904. { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
  7905. 0x00000000, 0xffffffff },
  7906. { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
  7907. 0xffffffff, 0x00000000 },
  7908. { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
  7909. 0xffffffff, 0x00000000 },
  7910. /* Buffer Manager Control Registers. */
  7911. { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
  7912. 0x00000000, 0x007fff80 },
  7913. { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
  7914. 0x00000000, 0x007fffff },
  7915. { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
  7916. 0x00000000, 0x0000003f },
  7917. { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
  7918. 0x00000000, 0x000001ff },
  7919. { BUFMGR_MB_HIGH_WATER, 0x0000,
  7920. 0x00000000, 0x000001ff },
  7921. { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
  7922. 0xffffffff, 0x00000000 },
  7923. { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
  7924. 0xffffffff, 0x00000000 },
  7925. /* Mailbox Registers */
  7926. { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
  7927. 0x00000000, 0x000001ff },
  7928. { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
  7929. 0x00000000, 0x000001ff },
  7930. { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
  7931. 0x00000000, 0x000007ff },
  7932. { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
  7933. 0x00000000, 0x000001ff },
  7934. { 0xffff, 0x0000, 0x00000000, 0x00000000 },
  7935. };
  7936. is_5705 = is_5750 = 0;
  7937. if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
  7938. is_5705 = 1;
  7939. if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
  7940. is_5750 = 1;
  7941. }
  7942. for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
  7943. if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
  7944. continue;
  7945. if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
  7946. continue;
  7947. if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
  7948. (reg_tbl[i].flags & TG3_FL_NOT_5788))
  7949. continue;
  7950. if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
  7951. continue;
  7952. offset = (u32) reg_tbl[i].offset;
  7953. read_mask = reg_tbl[i].read_mask;
  7954. write_mask = reg_tbl[i].write_mask;
  7955. /* Save the original register content */
  7956. save_val = tr32(offset);
  7957. /* Determine the read-only value. */
  7958. read_val = save_val & read_mask;
  7959. /* Write zero to the register, then make sure the read-only bits
  7960. * are not changed and the read/write bits are all zeros.
  7961. */
  7962. tw32(offset, 0);
  7963. val = tr32(offset);
  7964. /* Test the read-only and read/write bits. */
  7965. if (((val & read_mask) != read_val) || (val & write_mask))
  7966. goto out;
  7967. /* Write ones to all the bits defined by RdMask and WrMask, then
  7968. * make sure the read-only bits are not changed and the
  7969. * read/write bits are all ones.
  7970. */
  7971. tw32(offset, read_mask | write_mask);
  7972. val = tr32(offset);
  7973. /* Test the read-only bits. */
  7974. if ((val & read_mask) != read_val)
  7975. goto out;
  7976. /* Test the read/write bits. */
  7977. if ((val & write_mask) != write_mask)
  7978. goto out;
  7979. tw32(offset, save_val);
  7980. }
  7981. return 0;
  7982. out:
  7983. if (netif_msg_hw(tp))
  7984. printk(KERN_ERR PFX "Register test failed at offset %x\n",
  7985. offset);
  7986. tw32(offset, save_val);
  7987. return -EIO;
  7988. }
  7989. static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
  7990. {
  7991. static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
  7992. int i;
  7993. u32 j;
  7994. for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
  7995. for (j = 0; j < len; j += 4) {
  7996. u32 val;
  7997. tg3_write_mem(tp, offset + j, test_pattern[i]);
  7998. tg3_read_mem(tp, offset + j, &val);
  7999. if (val != test_pattern[i])
  8000. return -EIO;
  8001. }
  8002. }
  8003. return 0;
  8004. }
  8005. static int tg3_test_memory(struct tg3 *tp)
  8006. {
  8007. static struct mem_entry {
  8008. u32 offset;
  8009. u32 len;
  8010. } mem_tbl_570x[] = {
  8011. { 0x00000000, 0x00b50},
  8012. { 0x00002000, 0x1c000},
  8013. { 0xffffffff, 0x00000}
  8014. }, mem_tbl_5705[] = {
  8015. { 0x00000100, 0x0000c},
  8016. { 0x00000200, 0x00008},
  8017. { 0x00004000, 0x00800},
  8018. { 0x00006000, 0x01000},
  8019. { 0x00008000, 0x02000},
  8020. { 0x00010000, 0x0e000},
  8021. { 0xffffffff, 0x00000}
  8022. }, mem_tbl_5755[] = {
  8023. { 0x00000200, 0x00008},
  8024. { 0x00004000, 0x00800},
  8025. { 0x00006000, 0x00800},
  8026. { 0x00008000, 0x02000},
  8027. { 0x00010000, 0x0c000},
  8028. { 0xffffffff, 0x00000}
  8029. }, mem_tbl_5906[] = {
  8030. { 0x00000200, 0x00008},
  8031. { 0x00004000, 0x00400},
  8032. { 0x00006000, 0x00400},
  8033. { 0x00008000, 0x01000},
  8034. { 0x00010000, 0x01000},
  8035. { 0xffffffff, 0x00000}
  8036. };
  8037. struct mem_entry *mem_tbl;
  8038. int err = 0;
  8039. int i;
  8040. if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
  8041. mem_tbl = mem_tbl_5755;
  8042. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  8043. mem_tbl = mem_tbl_5906;
  8044. else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
  8045. mem_tbl = mem_tbl_5705;
  8046. else
  8047. mem_tbl = mem_tbl_570x;
  8048. for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
  8049. if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
  8050. mem_tbl[i].len)) != 0)
  8051. break;
  8052. }
  8053. return err;
  8054. }
  8055. #define TG3_MAC_LOOPBACK 0
  8056. #define TG3_PHY_LOOPBACK 1
  8057. static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
  8058. {
  8059. u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
  8060. u32 desc_idx;
  8061. struct sk_buff *skb, *rx_skb;
  8062. u8 *tx_data;
  8063. dma_addr_t map;
  8064. int num_pkts, tx_len, rx_len, i, err;
  8065. struct tg3_rx_buffer_desc *desc;
  8066. if (loopback_mode == TG3_MAC_LOOPBACK) {
  8067. /* HW errata - mac loopback fails in some cases on 5780.
  8068. * Normal traffic and PHY loopback are not affected by
  8069. * errata.
  8070. */
  8071. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
  8072. return 0;
  8073. mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
  8074. MAC_MODE_PORT_INT_LPBACK;
  8075. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
  8076. mac_mode |= MAC_MODE_LINK_POLARITY;
  8077. if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
  8078. mac_mode |= MAC_MODE_PORT_MODE_MII;
  8079. else
  8080. mac_mode |= MAC_MODE_PORT_MODE_GMII;
  8081. tw32(MAC_MODE, mac_mode);
  8082. } else if (loopback_mode == TG3_PHY_LOOPBACK) {
  8083. u32 val;
  8084. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  8085. u32 phytest;
  8086. if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
  8087. u32 phy;
  8088. tg3_writephy(tp, MII_TG3_EPHY_TEST,
  8089. phytest | MII_TG3_EPHY_SHADOW_EN);
  8090. if (!tg3_readphy(tp, 0x1b, &phy))
  8091. tg3_writephy(tp, 0x1b, phy & ~0x20);
  8092. tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
  8093. }
  8094. val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
  8095. } else
  8096. val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
  8097. tg3_phy_toggle_automdix(tp, 0);
  8098. tg3_writephy(tp, MII_BMCR, val);
  8099. udelay(40);
  8100. mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
  8101. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  8102. tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
  8103. mac_mode |= MAC_MODE_PORT_MODE_MII;
  8104. } else
  8105. mac_mode |= MAC_MODE_PORT_MODE_GMII;
  8106. /* reset to prevent losing 1st rx packet intermittently */
  8107. if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
  8108. tw32_f(MAC_RX_MODE, RX_MODE_RESET);
  8109. udelay(10);
  8110. tw32_f(MAC_RX_MODE, tp->rx_mode);
  8111. }
  8112. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
  8113. if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
  8114. mac_mode &= ~MAC_MODE_LINK_POLARITY;
  8115. else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
  8116. mac_mode |= MAC_MODE_LINK_POLARITY;
  8117. tg3_writephy(tp, MII_TG3_EXT_CTRL,
  8118. MII_TG3_EXT_CTRL_LNK3_LED_MODE);
  8119. }
  8120. tw32(MAC_MODE, mac_mode);
  8121. }
  8122. else
  8123. return -EINVAL;
  8124. err = -EIO;
  8125. tx_len = 1514;
  8126. skb = netdev_alloc_skb(tp->dev, tx_len);
  8127. if (!skb)
  8128. return -ENOMEM;
  8129. tx_data = skb_put(skb, tx_len);
  8130. memcpy(tx_data, tp->dev->dev_addr, 6);
  8131. memset(tx_data + 6, 0x0, 8);
  8132. tw32(MAC_RX_MTU_SIZE, tx_len + 4);
  8133. for (i = 14; i < tx_len; i++)
  8134. tx_data[i] = (u8) (i & 0xff);
  8135. map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
  8136. tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
  8137. HOSTCC_MODE_NOW);
  8138. udelay(10);
  8139. rx_start_idx = tp->hw_status->idx[0].rx_producer;
  8140. num_pkts = 0;
  8141. tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
  8142. tp->tx_prod++;
  8143. num_pkts++;
  8144. tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
  8145. tp->tx_prod);
  8146. tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
  8147. udelay(10);
  8148. /* 250 usec to allow enough time on some 10/100 Mbps devices. */
  8149. for (i = 0; i < 25; i++) {
  8150. tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
  8151. HOSTCC_MODE_NOW);
  8152. udelay(10);
  8153. tx_idx = tp->hw_status->idx[0].tx_consumer;
  8154. rx_idx = tp->hw_status->idx[0].rx_producer;
  8155. if ((tx_idx == tp->tx_prod) &&
  8156. (rx_idx == (rx_start_idx + num_pkts)))
  8157. break;
  8158. }
  8159. pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
  8160. dev_kfree_skb(skb);
  8161. if (tx_idx != tp->tx_prod)
  8162. goto out;
  8163. if (rx_idx != rx_start_idx + num_pkts)
  8164. goto out;
  8165. desc = &tp->rx_rcb[rx_start_idx];
  8166. desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
  8167. opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
  8168. if (opaque_key != RXD_OPAQUE_RING_STD)
  8169. goto out;
  8170. if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
  8171. (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
  8172. goto out;
  8173. rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
  8174. if (rx_len != tx_len)
  8175. goto out;
  8176. rx_skb = tp->rx_std_buffers[desc_idx].skb;
  8177. map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
  8178. pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
  8179. for (i = 14; i < tx_len; i++) {
  8180. if (*(rx_skb->data + i) != (u8) (i & 0xff))
  8181. goto out;
  8182. }
  8183. err = 0;
  8184. /* tg3_free_rings will unmap and free the rx_skb */
  8185. out:
  8186. return err;
  8187. }
  8188. #define TG3_MAC_LOOPBACK_FAILED 1
  8189. #define TG3_PHY_LOOPBACK_FAILED 2
  8190. #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
  8191. TG3_PHY_LOOPBACK_FAILED)
  8192. static int tg3_test_loopback(struct tg3 *tp)
  8193. {
  8194. int err = 0;
  8195. u32 cpmuctrl = 0;
  8196. if (!netif_running(tp->dev))
  8197. return TG3_LOOPBACK_FAILED;
  8198. err = tg3_reset_hw(tp, 1);
  8199. if (err)
  8200. return TG3_LOOPBACK_FAILED;
  8201. /* Turn off gphy autopowerdown. */
  8202. if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
  8203. tg3_phy_toggle_apd(tp, false);
  8204. if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
  8205. int i;
  8206. u32 status;
  8207. tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
  8208. /* Wait for up to 40 microseconds to acquire lock. */
  8209. for (i = 0; i < 4; i++) {
  8210. status = tr32(TG3_CPMU_MUTEX_GNT);
  8211. if (status == CPMU_MUTEX_GNT_DRIVER)
  8212. break;
  8213. udelay(10);
  8214. }
  8215. if (status != CPMU_MUTEX_GNT_DRIVER)
  8216. return TG3_LOOPBACK_FAILED;
  8217. /* Turn off link-based power management. */
  8218. cpmuctrl = tr32(TG3_CPMU_CTRL);
  8219. tw32(TG3_CPMU_CTRL,
  8220. cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
  8221. CPMU_CTRL_LINK_AWARE_MODE));
  8222. }
  8223. if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
  8224. err |= TG3_MAC_LOOPBACK_FAILED;
  8225. if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
  8226. tw32(TG3_CPMU_CTRL, cpmuctrl);
  8227. /* Release the mutex */
  8228. tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
  8229. }
  8230. if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
  8231. !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
  8232. if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
  8233. err |= TG3_PHY_LOOPBACK_FAILED;
  8234. }
  8235. /* Re-enable gphy autopowerdown. */
  8236. if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
  8237. tg3_phy_toggle_apd(tp, true);
  8238. return err;
  8239. }
  8240. static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
  8241. u64 *data)
  8242. {
  8243. struct tg3 *tp = netdev_priv(dev);
  8244. if (tp->link_config.phy_is_low_power)
  8245. tg3_set_power_state(tp, PCI_D0);
  8246. memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
  8247. if (tg3_test_nvram(tp) != 0) {
  8248. etest->flags |= ETH_TEST_FL_FAILED;
  8249. data[0] = 1;
  8250. }
  8251. if (tg3_test_link(tp) != 0) {
  8252. etest->flags |= ETH_TEST_FL_FAILED;
  8253. data[1] = 1;
  8254. }
  8255. if (etest->flags & ETH_TEST_FL_OFFLINE) {
  8256. int err, err2 = 0, irq_sync = 0;
  8257. if (netif_running(dev)) {
  8258. tg3_phy_stop(tp);
  8259. tg3_netif_stop(tp);
  8260. irq_sync = 1;
  8261. }
  8262. tg3_full_lock(tp, irq_sync);
  8263. tg3_halt(tp, RESET_KIND_SUSPEND, 1);
  8264. err = tg3_nvram_lock(tp);
  8265. tg3_halt_cpu(tp, RX_CPU_BASE);
  8266. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
  8267. tg3_halt_cpu(tp, TX_CPU_BASE);
  8268. if (!err)
  8269. tg3_nvram_unlock(tp);
  8270. if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
  8271. tg3_phy_reset(tp);
  8272. if (tg3_test_registers(tp) != 0) {
  8273. etest->flags |= ETH_TEST_FL_FAILED;
  8274. data[2] = 1;
  8275. }
  8276. if (tg3_test_memory(tp) != 0) {
  8277. etest->flags |= ETH_TEST_FL_FAILED;
  8278. data[3] = 1;
  8279. }
  8280. if ((data[4] = tg3_test_loopback(tp)) != 0)
  8281. etest->flags |= ETH_TEST_FL_FAILED;
  8282. tg3_full_unlock(tp);
  8283. if (tg3_test_interrupt(tp) != 0) {
  8284. etest->flags |= ETH_TEST_FL_FAILED;
  8285. data[5] = 1;
  8286. }
  8287. tg3_full_lock(tp, 0);
  8288. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  8289. if (netif_running(dev)) {
  8290. tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
  8291. err2 = tg3_restart_hw(tp, 1);
  8292. if (!err2)
  8293. tg3_netif_start(tp);
  8294. }
  8295. tg3_full_unlock(tp);
  8296. if (irq_sync && !err2)
  8297. tg3_phy_start(tp);
  8298. }
  8299. if (tp->link_config.phy_is_low_power)
  8300. tg3_set_power_state(tp, PCI_D3hot);
  8301. }
  8302. static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  8303. {
  8304. struct mii_ioctl_data *data = if_mii(ifr);
  8305. struct tg3 *tp = netdev_priv(dev);
  8306. int err;
  8307. if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
  8308. if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
  8309. return -EAGAIN;
  8310. return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
  8311. }
  8312. switch(cmd) {
  8313. case SIOCGMIIPHY:
  8314. data->phy_id = PHY_ADDR;
  8315. /* fallthru */
  8316. case SIOCGMIIREG: {
  8317. u32 mii_regval;
  8318. if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
  8319. break; /* We have no PHY */
  8320. if (tp->link_config.phy_is_low_power)
  8321. return -EAGAIN;
  8322. spin_lock_bh(&tp->lock);
  8323. err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
  8324. spin_unlock_bh(&tp->lock);
  8325. data->val_out = mii_regval;
  8326. return err;
  8327. }
  8328. case SIOCSMIIREG:
  8329. if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
  8330. break; /* We have no PHY */
  8331. if (!capable(CAP_NET_ADMIN))
  8332. return -EPERM;
  8333. if (tp->link_config.phy_is_low_power)
  8334. return -EAGAIN;
  8335. spin_lock_bh(&tp->lock);
  8336. err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
  8337. spin_unlock_bh(&tp->lock);
  8338. return err;
  8339. default:
  8340. /* do nothing */
  8341. break;
  8342. }
  8343. return -EOPNOTSUPP;
  8344. }
  8345. #if TG3_VLAN_TAG_USED
  8346. static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
  8347. {
  8348. struct tg3 *tp = netdev_priv(dev);
  8349. if (!netif_running(dev)) {
  8350. tp->vlgrp = grp;
  8351. return;
  8352. }
  8353. tg3_netif_stop(tp);
  8354. tg3_full_lock(tp, 0);
  8355. tp->vlgrp = grp;
  8356. /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
  8357. __tg3_set_rx_mode(dev);
  8358. tg3_netif_start(tp);
  8359. tg3_full_unlock(tp);
  8360. }
  8361. #endif
  8362. static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
  8363. {
  8364. struct tg3 *tp = netdev_priv(dev);
  8365. memcpy(ec, &tp->coal, sizeof(*ec));
  8366. return 0;
  8367. }
  8368. static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
  8369. {
  8370. struct tg3 *tp = netdev_priv(dev);
  8371. u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
  8372. u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
  8373. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
  8374. max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
  8375. max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
  8376. max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
  8377. min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
  8378. }
  8379. if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
  8380. (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
  8381. (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
  8382. (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
  8383. (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
  8384. (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
  8385. (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
  8386. (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
  8387. (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
  8388. (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
  8389. return -EINVAL;
  8390. /* No rx interrupts will be generated if both are zero */
  8391. if ((ec->rx_coalesce_usecs == 0) &&
  8392. (ec->rx_max_coalesced_frames == 0))
  8393. return -EINVAL;
  8394. /* No tx interrupts will be generated if both are zero */
  8395. if ((ec->tx_coalesce_usecs == 0) &&
  8396. (ec->tx_max_coalesced_frames == 0))
  8397. return -EINVAL;
  8398. /* Only copy relevant parameters, ignore all others. */
  8399. tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
  8400. tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
  8401. tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
  8402. tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
  8403. tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
  8404. tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
  8405. tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
  8406. tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
  8407. tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
  8408. if (netif_running(dev)) {
  8409. tg3_full_lock(tp, 0);
  8410. __tg3_set_coalesce(tp, &tp->coal);
  8411. tg3_full_unlock(tp);
  8412. }
  8413. return 0;
  8414. }
  8415. static const struct ethtool_ops tg3_ethtool_ops = {
  8416. .get_settings = tg3_get_settings,
  8417. .set_settings = tg3_set_settings,
  8418. .get_drvinfo = tg3_get_drvinfo,
  8419. .get_regs_len = tg3_get_regs_len,
  8420. .get_regs = tg3_get_regs,
  8421. .get_wol = tg3_get_wol,
  8422. .set_wol = tg3_set_wol,
  8423. .get_msglevel = tg3_get_msglevel,
  8424. .set_msglevel = tg3_set_msglevel,
  8425. .nway_reset = tg3_nway_reset,
  8426. .get_link = ethtool_op_get_link,
  8427. .get_eeprom_len = tg3_get_eeprom_len,
  8428. .get_eeprom = tg3_get_eeprom,
  8429. .set_eeprom = tg3_set_eeprom,
  8430. .get_ringparam = tg3_get_ringparam,
  8431. .set_ringparam = tg3_set_ringparam,
  8432. .get_pauseparam = tg3_get_pauseparam,
  8433. .set_pauseparam = tg3_set_pauseparam,
  8434. .get_rx_csum = tg3_get_rx_csum,
  8435. .set_rx_csum = tg3_set_rx_csum,
  8436. .set_tx_csum = tg3_set_tx_csum,
  8437. .set_sg = ethtool_op_set_sg,
  8438. .set_tso = tg3_set_tso,
  8439. .self_test = tg3_self_test,
  8440. .get_strings = tg3_get_strings,
  8441. .phys_id = tg3_phys_id,
  8442. .get_ethtool_stats = tg3_get_ethtool_stats,
  8443. .get_coalesce = tg3_get_coalesce,
  8444. .set_coalesce = tg3_set_coalesce,
  8445. .get_sset_count = tg3_get_sset_count,
  8446. };
  8447. static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
  8448. {
  8449. u32 cursize, val, magic;
  8450. tp->nvram_size = EEPROM_CHIP_SIZE;
  8451. if (tg3_nvram_read(tp, 0, &magic) != 0)
  8452. return;
  8453. if ((magic != TG3_EEPROM_MAGIC) &&
  8454. ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
  8455. ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
  8456. return;
  8457. /*
  8458. * Size the chip by reading offsets at increasing powers of two.
  8459. * When we encounter our validation signature, we know the addressing
  8460. * has wrapped around, and thus have our chip size.
  8461. */
  8462. cursize = 0x10;
  8463. while (cursize < tp->nvram_size) {
  8464. if (tg3_nvram_read(tp, cursize, &val) != 0)
  8465. return;
  8466. if (val == magic)
  8467. break;
  8468. cursize <<= 1;
  8469. }
  8470. tp->nvram_size = cursize;
  8471. }
  8472. static void __devinit tg3_get_nvram_size(struct tg3 *tp)
  8473. {
  8474. u32 val;
  8475. if (tg3_nvram_read(tp, 0, &val) != 0)
  8476. return;
  8477. /* Selfboot format */
  8478. if (val != TG3_EEPROM_MAGIC) {
  8479. tg3_get_eeprom_size(tp);
  8480. return;
  8481. }
  8482. if (tg3_nvram_read_swab(tp, 0xf0, &val) == 0) {
  8483. if (val != 0) {
  8484. tp->nvram_size = (val >> 16) * 1024;
  8485. return;
  8486. }
  8487. }
  8488. tp->nvram_size = TG3_NVRAM_SIZE_512KB;
  8489. }
  8490. static void __devinit tg3_get_nvram_info(struct tg3 *tp)
  8491. {
  8492. u32 nvcfg1;
  8493. nvcfg1 = tr32(NVRAM_CFG1);
  8494. if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
  8495. tp->tg3_flags2 |= TG3_FLG2_FLASH;
  8496. }
  8497. else {
  8498. nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
  8499. tw32(NVRAM_CFG1, nvcfg1);
  8500. }
  8501. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
  8502. (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
  8503. switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
  8504. case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
  8505. tp->nvram_jedecnum = JEDEC_ATMEL;
  8506. tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
  8507. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8508. break;
  8509. case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
  8510. tp->nvram_jedecnum = JEDEC_ATMEL;
  8511. tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
  8512. break;
  8513. case FLASH_VENDOR_ATMEL_EEPROM:
  8514. tp->nvram_jedecnum = JEDEC_ATMEL;
  8515. tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  8516. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8517. break;
  8518. case FLASH_VENDOR_ST:
  8519. tp->nvram_jedecnum = JEDEC_ST;
  8520. tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
  8521. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8522. break;
  8523. case FLASH_VENDOR_SAIFUN:
  8524. tp->nvram_jedecnum = JEDEC_SAIFUN;
  8525. tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
  8526. break;
  8527. case FLASH_VENDOR_SST_SMALL:
  8528. case FLASH_VENDOR_SST_LARGE:
  8529. tp->nvram_jedecnum = JEDEC_SST;
  8530. tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
  8531. break;
  8532. }
  8533. }
  8534. else {
  8535. tp->nvram_jedecnum = JEDEC_ATMEL;
  8536. tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
  8537. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8538. }
  8539. }
  8540. static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
  8541. {
  8542. u32 nvcfg1;
  8543. nvcfg1 = tr32(NVRAM_CFG1);
  8544. /* NVRAM protection for TPM */
  8545. if (nvcfg1 & (1 << 27))
  8546. tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
  8547. switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
  8548. case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
  8549. case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
  8550. tp->nvram_jedecnum = JEDEC_ATMEL;
  8551. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8552. break;
  8553. case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
  8554. tp->nvram_jedecnum = JEDEC_ATMEL;
  8555. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8556. tp->tg3_flags2 |= TG3_FLG2_FLASH;
  8557. break;
  8558. case FLASH_5752VENDOR_ST_M45PE10:
  8559. case FLASH_5752VENDOR_ST_M45PE20:
  8560. case FLASH_5752VENDOR_ST_M45PE40:
  8561. tp->nvram_jedecnum = JEDEC_ST;
  8562. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8563. tp->tg3_flags2 |= TG3_FLG2_FLASH;
  8564. break;
  8565. }
  8566. if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
  8567. switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
  8568. case FLASH_5752PAGE_SIZE_256:
  8569. tp->nvram_pagesize = 256;
  8570. break;
  8571. case FLASH_5752PAGE_SIZE_512:
  8572. tp->nvram_pagesize = 512;
  8573. break;
  8574. case FLASH_5752PAGE_SIZE_1K:
  8575. tp->nvram_pagesize = 1024;
  8576. break;
  8577. case FLASH_5752PAGE_SIZE_2K:
  8578. tp->nvram_pagesize = 2048;
  8579. break;
  8580. case FLASH_5752PAGE_SIZE_4K:
  8581. tp->nvram_pagesize = 4096;
  8582. break;
  8583. case FLASH_5752PAGE_SIZE_264:
  8584. tp->nvram_pagesize = 264;
  8585. break;
  8586. }
  8587. }
  8588. else {
  8589. /* For eeprom, set pagesize to maximum eeprom size */
  8590. tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  8591. nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
  8592. tw32(NVRAM_CFG1, nvcfg1);
  8593. }
  8594. }
  8595. static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
  8596. {
  8597. u32 nvcfg1, protect = 0;
  8598. nvcfg1 = tr32(NVRAM_CFG1);
  8599. /* NVRAM protection for TPM */
  8600. if (nvcfg1 & (1 << 27)) {
  8601. tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
  8602. protect = 1;
  8603. }
  8604. nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
  8605. switch (nvcfg1) {
  8606. case FLASH_5755VENDOR_ATMEL_FLASH_1:
  8607. case FLASH_5755VENDOR_ATMEL_FLASH_2:
  8608. case FLASH_5755VENDOR_ATMEL_FLASH_3:
  8609. case FLASH_5755VENDOR_ATMEL_FLASH_5:
  8610. tp->nvram_jedecnum = JEDEC_ATMEL;
  8611. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8612. tp->tg3_flags2 |= TG3_FLG2_FLASH;
  8613. tp->nvram_pagesize = 264;
  8614. if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
  8615. nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
  8616. tp->nvram_size = (protect ? 0x3e200 :
  8617. TG3_NVRAM_SIZE_512KB);
  8618. else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
  8619. tp->nvram_size = (protect ? 0x1f200 :
  8620. TG3_NVRAM_SIZE_256KB);
  8621. else
  8622. tp->nvram_size = (protect ? 0x1f200 :
  8623. TG3_NVRAM_SIZE_128KB);
  8624. break;
  8625. case FLASH_5752VENDOR_ST_M45PE10:
  8626. case FLASH_5752VENDOR_ST_M45PE20:
  8627. case FLASH_5752VENDOR_ST_M45PE40:
  8628. tp->nvram_jedecnum = JEDEC_ST;
  8629. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8630. tp->tg3_flags2 |= TG3_FLG2_FLASH;
  8631. tp->nvram_pagesize = 256;
  8632. if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
  8633. tp->nvram_size = (protect ?
  8634. TG3_NVRAM_SIZE_64KB :
  8635. TG3_NVRAM_SIZE_128KB);
  8636. else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
  8637. tp->nvram_size = (protect ?
  8638. TG3_NVRAM_SIZE_64KB :
  8639. TG3_NVRAM_SIZE_256KB);
  8640. else
  8641. tp->nvram_size = (protect ?
  8642. TG3_NVRAM_SIZE_128KB :
  8643. TG3_NVRAM_SIZE_512KB);
  8644. break;
  8645. }
  8646. }
  8647. static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
  8648. {
  8649. u32 nvcfg1;
  8650. nvcfg1 = tr32(NVRAM_CFG1);
  8651. switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
  8652. case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
  8653. case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
  8654. case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
  8655. case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
  8656. tp->nvram_jedecnum = JEDEC_ATMEL;
  8657. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8658. tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  8659. nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
  8660. tw32(NVRAM_CFG1, nvcfg1);
  8661. break;
  8662. case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
  8663. case FLASH_5755VENDOR_ATMEL_FLASH_1:
  8664. case FLASH_5755VENDOR_ATMEL_FLASH_2:
  8665. case FLASH_5755VENDOR_ATMEL_FLASH_3:
  8666. tp->nvram_jedecnum = JEDEC_ATMEL;
  8667. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8668. tp->tg3_flags2 |= TG3_FLG2_FLASH;
  8669. tp->nvram_pagesize = 264;
  8670. break;
  8671. case FLASH_5752VENDOR_ST_M45PE10:
  8672. case FLASH_5752VENDOR_ST_M45PE20:
  8673. case FLASH_5752VENDOR_ST_M45PE40:
  8674. tp->nvram_jedecnum = JEDEC_ST;
  8675. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8676. tp->tg3_flags2 |= TG3_FLG2_FLASH;
  8677. tp->nvram_pagesize = 256;
  8678. break;
  8679. }
  8680. }
  8681. static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
  8682. {
  8683. u32 nvcfg1, protect = 0;
  8684. nvcfg1 = tr32(NVRAM_CFG1);
  8685. /* NVRAM protection for TPM */
  8686. if (nvcfg1 & (1 << 27)) {
  8687. tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
  8688. protect = 1;
  8689. }
  8690. nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
  8691. switch (nvcfg1) {
  8692. case FLASH_5761VENDOR_ATMEL_ADB021D:
  8693. case FLASH_5761VENDOR_ATMEL_ADB041D:
  8694. case FLASH_5761VENDOR_ATMEL_ADB081D:
  8695. case FLASH_5761VENDOR_ATMEL_ADB161D:
  8696. case FLASH_5761VENDOR_ATMEL_MDB021D:
  8697. case FLASH_5761VENDOR_ATMEL_MDB041D:
  8698. case FLASH_5761VENDOR_ATMEL_MDB081D:
  8699. case FLASH_5761VENDOR_ATMEL_MDB161D:
  8700. tp->nvram_jedecnum = JEDEC_ATMEL;
  8701. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8702. tp->tg3_flags2 |= TG3_FLG2_FLASH;
  8703. tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
  8704. tp->nvram_pagesize = 256;
  8705. break;
  8706. case FLASH_5761VENDOR_ST_A_M45PE20:
  8707. case FLASH_5761VENDOR_ST_A_M45PE40:
  8708. case FLASH_5761VENDOR_ST_A_M45PE80:
  8709. case FLASH_5761VENDOR_ST_A_M45PE16:
  8710. case FLASH_5761VENDOR_ST_M_M45PE20:
  8711. case FLASH_5761VENDOR_ST_M_M45PE40:
  8712. case FLASH_5761VENDOR_ST_M_M45PE80:
  8713. case FLASH_5761VENDOR_ST_M_M45PE16:
  8714. tp->nvram_jedecnum = JEDEC_ST;
  8715. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8716. tp->tg3_flags2 |= TG3_FLG2_FLASH;
  8717. tp->nvram_pagesize = 256;
  8718. break;
  8719. }
  8720. if (protect) {
  8721. tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
  8722. } else {
  8723. switch (nvcfg1) {
  8724. case FLASH_5761VENDOR_ATMEL_ADB161D:
  8725. case FLASH_5761VENDOR_ATMEL_MDB161D:
  8726. case FLASH_5761VENDOR_ST_A_M45PE16:
  8727. case FLASH_5761VENDOR_ST_M_M45PE16:
  8728. tp->nvram_size = TG3_NVRAM_SIZE_2MB;
  8729. break;
  8730. case FLASH_5761VENDOR_ATMEL_ADB081D:
  8731. case FLASH_5761VENDOR_ATMEL_MDB081D:
  8732. case FLASH_5761VENDOR_ST_A_M45PE80:
  8733. case FLASH_5761VENDOR_ST_M_M45PE80:
  8734. tp->nvram_size = TG3_NVRAM_SIZE_1MB;
  8735. break;
  8736. case FLASH_5761VENDOR_ATMEL_ADB041D:
  8737. case FLASH_5761VENDOR_ATMEL_MDB041D:
  8738. case FLASH_5761VENDOR_ST_A_M45PE40:
  8739. case FLASH_5761VENDOR_ST_M_M45PE40:
  8740. tp->nvram_size = TG3_NVRAM_SIZE_512KB;
  8741. break;
  8742. case FLASH_5761VENDOR_ATMEL_ADB021D:
  8743. case FLASH_5761VENDOR_ATMEL_MDB021D:
  8744. case FLASH_5761VENDOR_ST_A_M45PE20:
  8745. case FLASH_5761VENDOR_ST_M_M45PE20:
  8746. tp->nvram_size = TG3_NVRAM_SIZE_256KB;
  8747. break;
  8748. }
  8749. }
  8750. }
  8751. static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
  8752. {
  8753. tp->nvram_jedecnum = JEDEC_ATMEL;
  8754. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8755. tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  8756. }
  8757. static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
  8758. {
  8759. u32 nvcfg1;
  8760. nvcfg1 = tr32(NVRAM_CFG1);
  8761. switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
  8762. case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
  8763. case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
  8764. tp->nvram_jedecnum = JEDEC_ATMEL;
  8765. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8766. tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  8767. nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
  8768. tw32(NVRAM_CFG1, nvcfg1);
  8769. return;
  8770. case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
  8771. case FLASH_57780VENDOR_ATMEL_AT45DB011D:
  8772. case FLASH_57780VENDOR_ATMEL_AT45DB011B:
  8773. case FLASH_57780VENDOR_ATMEL_AT45DB021D:
  8774. case FLASH_57780VENDOR_ATMEL_AT45DB021B:
  8775. case FLASH_57780VENDOR_ATMEL_AT45DB041D:
  8776. case FLASH_57780VENDOR_ATMEL_AT45DB041B:
  8777. tp->nvram_jedecnum = JEDEC_ATMEL;
  8778. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8779. tp->tg3_flags2 |= TG3_FLG2_FLASH;
  8780. switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
  8781. case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
  8782. case FLASH_57780VENDOR_ATMEL_AT45DB011D:
  8783. case FLASH_57780VENDOR_ATMEL_AT45DB011B:
  8784. tp->nvram_size = TG3_NVRAM_SIZE_128KB;
  8785. break;
  8786. case FLASH_57780VENDOR_ATMEL_AT45DB021D:
  8787. case FLASH_57780VENDOR_ATMEL_AT45DB021B:
  8788. tp->nvram_size = TG3_NVRAM_SIZE_256KB;
  8789. break;
  8790. case FLASH_57780VENDOR_ATMEL_AT45DB041D:
  8791. case FLASH_57780VENDOR_ATMEL_AT45DB041B:
  8792. tp->nvram_size = TG3_NVRAM_SIZE_512KB;
  8793. break;
  8794. }
  8795. break;
  8796. case FLASH_5752VENDOR_ST_M45PE10:
  8797. case FLASH_5752VENDOR_ST_M45PE20:
  8798. case FLASH_5752VENDOR_ST_M45PE40:
  8799. tp->nvram_jedecnum = JEDEC_ST;
  8800. tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
  8801. tp->tg3_flags2 |= TG3_FLG2_FLASH;
  8802. switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
  8803. case FLASH_5752VENDOR_ST_M45PE10:
  8804. tp->nvram_size = TG3_NVRAM_SIZE_128KB;
  8805. break;
  8806. case FLASH_5752VENDOR_ST_M45PE20:
  8807. tp->nvram_size = TG3_NVRAM_SIZE_256KB;
  8808. break;
  8809. case FLASH_5752VENDOR_ST_M45PE40:
  8810. tp->nvram_size = TG3_NVRAM_SIZE_512KB;
  8811. break;
  8812. }
  8813. break;
  8814. default:
  8815. return;
  8816. }
  8817. switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
  8818. case FLASH_5752PAGE_SIZE_256:
  8819. tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
  8820. tp->nvram_pagesize = 256;
  8821. break;
  8822. case FLASH_5752PAGE_SIZE_512:
  8823. tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
  8824. tp->nvram_pagesize = 512;
  8825. break;
  8826. case FLASH_5752PAGE_SIZE_1K:
  8827. tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
  8828. tp->nvram_pagesize = 1024;
  8829. break;
  8830. case FLASH_5752PAGE_SIZE_2K:
  8831. tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
  8832. tp->nvram_pagesize = 2048;
  8833. break;
  8834. case FLASH_5752PAGE_SIZE_4K:
  8835. tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
  8836. tp->nvram_pagesize = 4096;
  8837. break;
  8838. case FLASH_5752PAGE_SIZE_264:
  8839. tp->nvram_pagesize = 264;
  8840. break;
  8841. case FLASH_5752PAGE_SIZE_528:
  8842. tp->nvram_pagesize = 528;
  8843. break;
  8844. }
  8845. }
  8846. /* Chips other than 5700/5701 use the NVRAM for fetching info. */
  8847. static void __devinit tg3_nvram_init(struct tg3 *tp)
  8848. {
  8849. tw32_f(GRC_EEPROM_ADDR,
  8850. (EEPROM_ADDR_FSM_RESET |
  8851. (EEPROM_DEFAULT_CLOCK_PERIOD <<
  8852. EEPROM_ADDR_CLKPERD_SHIFT)));
  8853. msleep(1);
  8854. /* Enable seeprom accesses. */
  8855. tw32_f(GRC_LOCAL_CTRL,
  8856. tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
  8857. udelay(100);
  8858. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
  8859. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
  8860. tp->tg3_flags |= TG3_FLAG_NVRAM;
  8861. if (tg3_nvram_lock(tp)) {
  8862. printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
  8863. "tg3_nvram_init failed.\n", tp->dev->name);
  8864. return;
  8865. }
  8866. tg3_enable_nvram_access(tp);
  8867. tp->nvram_size = 0;
  8868. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
  8869. tg3_get_5752_nvram_info(tp);
  8870. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
  8871. tg3_get_5755_nvram_info(tp);
  8872. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
  8873. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  8874. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
  8875. tg3_get_5787_nvram_info(tp);
  8876. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
  8877. tg3_get_5761_nvram_info(tp);
  8878. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  8879. tg3_get_5906_nvram_info(tp);
  8880. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
  8881. tg3_get_57780_nvram_info(tp);
  8882. else
  8883. tg3_get_nvram_info(tp);
  8884. if (tp->nvram_size == 0)
  8885. tg3_get_nvram_size(tp);
  8886. tg3_disable_nvram_access(tp);
  8887. tg3_nvram_unlock(tp);
  8888. } else {
  8889. tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
  8890. tg3_get_eeprom_size(tp);
  8891. }
  8892. }
  8893. static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
  8894. u32 offset, u32 len, u8 *buf)
  8895. {
  8896. int i, j, rc = 0;
  8897. u32 val;
  8898. for (i = 0; i < len; i += 4) {
  8899. u32 addr;
  8900. __le32 data;
  8901. addr = offset + i;
  8902. memcpy(&data, buf + i, 4);
  8903. tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
  8904. val = tr32(GRC_EEPROM_ADDR);
  8905. tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
  8906. val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
  8907. EEPROM_ADDR_READ);
  8908. tw32(GRC_EEPROM_ADDR, val |
  8909. (0 << EEPROM_ADDR_DEVID_SHIFT) |
  8910. (addr & EEPROM_ADDR_ADDR_MASK) |
  8911. EEPROM_ADDR_START |
  8912. EEPROM_ADDR_WRITE);
  8913. for (j = 0; j < 1000; j++) {
  8914. val = tr32(GRC_EEPROM_ADDR);
  8915. if (val & EEPROM_ADDR_COMPLETE)
  8916. break;
  8917. msleep(1);
  8918. }
  8919. if (!(val & EEPROM_ADDR_COMPLETE)) {
  8920. rc = -EBUSY;
  8921. break;
  8922. }
  8923. }
  8924. return rc;
  8925. }
  8926. /* offset and length are dword aligned */
  8927. static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
  8928. u8 *buf)
  8929. {
  8930. int ret = 0;
  8931. u32 pagesize = tp->nvram_pagesize;
  8932. u32 pagemask = pagesize - 1;
  8933. u32 nvram_cmd;
  8934. u8 *tmp;
  8935. tmp = kmalloc(pagesize, GFP_KERNEL);
  8936. if (tmp == NULL)
  8937. return -ENOMEM;
  8938. while (len) {
  8939. int j;
  8940. u32 phy_addr, page_off, size;
  8941. phy_addr = offset & ~pagemask;
  8942. for (j = 0; j < pagesize; j += 4) {
  8943. if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
  8944. (__le32 *) (tmp + j))))
  8945. break;
  8946. }
  8947. if (ret)
  8948. break;
  8949. page_off = offset & pagemask;
  8950. size = pagesize;
  8951. if (len < size)
  8952. size = len;
  8953. len -= size;
  8954. memcpy(tmp + page_off, buf, size);
  8955. offset = offset + (pagesize - page_off);
  8956. tg3_enable_nvram_access(tp);
  8957. /*
  8958. * Before we can erase the flash page, we need
  8959. * to issue a special "write enable" command.
  8960. */
  8961. nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
  8962. if (tg3_nvram_exec_cmd(tp, nvram_cmd))
  8963. break;
  8964. /* Erase the target page */
  8965. tw32(NVRAM_ADDR, phy_addr);
  8966. nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
  8967. NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
  8968. if (tg3_nvram_exec_cmd(tp, nvram_cmd))
  8969. break;
  8970. /* Issue another write enable to start the write. */
  8971. nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
  8972. if (tg3_nvram_exec_cmd(tp, nvram_cmd))
  8973. break;
  8974. for (j = 0; j < pagesize; j += 4) {
  8975. __be32 data;
  8976. data = *((__be32 *) (tmp + j));
  8977. /* swab32(le32_to_cpu(data)), actually */
  8978. tw32(NVRAM_WRDATA, be32_to_cpu(data));
  8979. tw32(NVRAM_ADDR, phy_addr + j);
  8980. nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
  8981. NVRAM_CMD_WR;
  8982. if (j == 0)
  8983. nvram_cmd |= NVRAM_CMD_FIRST;
  8984. else if (j == (pagesize - 4))
  8985. nvram_cmd |= NVRAM_CMD_LAST;
  8986. if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
  8987. break;
  8988. }
  8989. if (ret)
  8990. break;
  8991. }
  8992. nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
  8993. tg3_nvram_exec_cmd(tp, nvram_cmd);
  8994. kfree(tmp);
  8995. return ret;
  8996. }
  8997. /* offset and length are dword aligned */
  8998. static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
  8999. u8 *buf)
  9000. {
  9001. int i, ret = 0;
  9002. for (i = 0; i < len; i += 4, offset += 4) {
  9003. u32 page_off, phy_addr, nvram_cmd;
  9004. __be32 data;
  9005. memcpy(&data, buf + i, 4);
  9006. tw32(NVRAM_WRDATA, be32_to_cpu(data));
  9007. page_off = offset % tp->nvram_pagesize;
  9008. phy_addr = tg3_nvram_phys_addr(tp, offset);
  9009. tw32(NVRAM_ADDR, phy_addr);
  9010. nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
  9011. if ((page_off == 0) || (i == 0))
  9012. nvram_cmd |= NVRAM_CMD_FIRST;
  9013. if (page_off == (tp->nvram_pagesize - 4))
  9014. nvram_cmd |= NVRAM_CMD_LAST;
  9015. if (i == (len - 4))
  9016. nvram_cmd |= NVRAM_CMD_LAST;
  9017. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
  9018. !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
  9019. (tp->nvram_jedecnum == JEDEC_ST) &&
  9020. (nvram_cmd & NVRAM_CMD_FIRST)) {
  9021. if ((ret = tg3_nvram_exec_cmd(tp,
  9022. NVRAM_CMD_WREN | NVRAM_CMD_GO |
  9023. NVRAM_CMD_DONE)))
  9024. break;
  9025. }
  9026. if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
  9027. /* We always do complete word writes to eeprom. */
  9028. nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
  9029. }
  9030. if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
  9031. break;
  9032. }
  9033. return ret;
  9034. }
  9035. /* offset and length are dword aligned */
  9036. static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
  9037. {
  9038. int ret;
  9039. if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
  9040. tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
  9041. ~GRC_LCLCTRL_GPIO_OUTPUT1);
  9042. udelay(40);
  9043. }
  9044. if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
  9045. ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
  9046. }
  9047. else {
  9048. u32 grc_mode;
  9049. ret = tg3_nvram_lock(tp);
  9050. if (ret)
  9051. return ret;
  9052. tg3_enable_nvram_access(tp);
  9053. if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
  9054. !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
  9055. tw32(NVRAM_WRITE1, 0x406);
  9056. grc_mode = tr32(GRC_MODE);
  9057. tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
  9058. if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
  9059. !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
  9060. ret = tg3_nvram_write_block_buffered(tp, offset, len,
  9061. buf);
  9062. }
  9063. else {
  9064. ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
  9065. buf);
  9066. }
  9067. grc_mode = tr32(GRC_MODE);
  9068. tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
  9069. tg3_disable_nvram_access(tp);
  9070. tg3_nvram_unlock(tp);
  9071. }
  9072. if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
  9073. tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
  9074. udelay(40);
  9075. }
  9076. return ret;
  9077. }
  9078. struct subsys_tbl_ent {
  9079. u16 subsys_vendor, subsys_devid;
  9080. u32 phy_id;
  9081. };
  9082. static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
  9083. /* Broadcom boards. */
  9084. { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
  9085. { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
  9086. { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
  9087. { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
  9088. { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
  9089. { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
  9090. { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
  9091. { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
  9092. { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
  9093. { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
  9094. { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
  9095. /* 3com boards. */
  9096. { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
  9097. { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
  9098. { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
  9099. { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
  9100. { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
  9101. /* DELL boards. */
  9102. { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
  9103. { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
  9104. { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
  9105. { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
  9106. /* Compaq boards. */
  9107. { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
  9108. { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
  9109. { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
  9110. { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
  9111. { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
  9112. /* IBM boards. */
  9113. { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
  9114. };
  9115. static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
  9116. {
  9117. int i;
  9118. for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
  9119. if ((subsys_id_to_phy_id[i].subsys_vendor ==
  9120. tp->pdev->subsystem_vendor) &&
  9121. (subsys_id_to_phy_id[i].subsys_devid ==
  9122. tp->pdev->subsystem_device))
  9123. return &subsys_id_to_phy_id[i];
  9124. }
  9125. return NULL;
  9126. }
  9127. static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
  9128. {
  9129. u32 val;
  9130. u16 pmcsr;
  9131. /* On some early chips the SRAM cannot be accessed in D3hot state,
  9132. * so need make sure we're in D0.
  9133. */
  9134. pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
  9135. pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
  9136. pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
  9137. msleep(1);
  9138. /* Make sure register accesses (indirect or otherwise)
  9139. * will function correctly.
  9140. */
  9141. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  9142. tp->misc_host_ctrl);
  9143. /* The memory arbiter has to be enabled in order for SRAM accesses
  9144. * to succeed. Normally on powerup the tg3 chip firmware will make
  9145. * sure it is enabled, but other entities such as system netboot
  9146. * code might disable it.
  9147. */
  9148. val = tr32(MEMARB_MODE);
  9149. tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
  9150. tp->phy_id = PHY_ID_INVALID;
  9151. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  9152. /* Assume an onboard device and WOL capable by default. */
  9153. tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
  9154. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  9155. if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
  9156. tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
  9157. tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
  9158. }
  9159. val = tr32(VCPU_CFGSHDW);
  9160. if (val & VCPU_CFGSHDW_ASPM_DBNC)
  9161. tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
  9162. if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
  9163. (val & VCPU_CFGSHDW_WOL_MAGPKT))
  9164. tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
  9165. goto done;
  9166. }
  9167. tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
  9168. if (val == NIC_SRAM_DATA_SIG_MAGIC) {
  9169. u32 nic_cfg, led_cfg;
  9170. u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
  9171. int eeprom_phy_serdes = 0;
  9172. tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
  9173. tp->nic_sram_data_cfg = nic_cfg;
  9174. tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
  9175. ver >>= NIC_SRAM_DATA_VER_SHIFT;
  9176. if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
  9177. (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
  9178. (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
  9179. (ver > 0) && (ver < 0x100))
  9180. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
  9181. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
  9182. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
  9183. if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
  9184. NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
  9185. eeprom_phy_serdes = 1;
  9186. tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
  9187. if (nic_phy_id != 0) {
  9188. u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
  9189. u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
  9190. eeprom_phy_id = (id1 >> 16) << 10;
  9191. eeprom_phy_id |= (id2 & 0xfc00) << 16;
  9192. eeprom_phy_id |= (id2 & 0x03ff) << 0;
  9193. } else
  9194. eeprom_phy_id = 0;
  9195. tp->phy_id = eeprom_phy_id;
  9196. if (eeprom_phy_serdes) {
  9197. if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
  9198. tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
  9199. else
  9200. tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
  9201. }
  9202. if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
  9203. led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
  9204. SHASTA_EXT_LED_MODE_MASK);
  9205. else
  9206. led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
  9207. switch (led_cfg) {
  9208. default:
  9209. case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
  9210. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  9211. break;
  9212. case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
  9213. tp->led_ctrl = LED_CTRL_MODE_PHY_2;
  9214. break;
  9215. case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
  9216. tp->led_ctrl = LED_CTRL_MODE_MAC;
  9217. /* Default to PHY_1_MODE if 0 (MAC_MODE) is
  9218. * read on some older 5700/5701 bootcode.
  9219. */
  9220. if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
  9221. ASIC_REV_5700 ||
  9222. GET_ASIC_REV(tp->pci_chip_rev_id) ==
  9223. ASIC_REV_5701)
  9224. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  9225. break;
  9226. case SHASTA_EXT_LED_SHARED:
  9227. tp->led_ctrl = LED_CTRL_MODE_SHARED;
  9228. if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
  9229. tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
  9230. tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
  9231. LED_CTRL_MODE_PHY_2);
  9232. break;
  9233. case SHASTA_EXT_LED_MAC:
  9234. tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
  9235. break;
  9236. case SHASTA_EXT_LED_COMBO:
  9237. tp->led_ctrl = LED_CTRL_MODE_COMBO;
  9238. if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
  9239. tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
  9240. LED_CTRL_MODE_PHY_2);
  9241. break;
  9242. }
  9243. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  9244. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
  9245. tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
  9246. tp->led_ctrl = LED_CTRL_MODE_PHY_2;
  9247. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
  9248. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  9249. if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
  9250. tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
  9251. if ((tp->pdev->subsystem_vendor ==
  9252. PCI_VENDOR_ID_ARIMA) &&
  9253. (tp->pdev->subsystem_device == 0x205a ||
  9254. tp->pdev->subsystem_device == 0x2063))
  9255. tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
  9256. } else {
  9257. tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
  9258. tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
  9259. }
  9260. if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
  9261. tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
  9262. if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
  9263. tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
  9264. }
  9265. if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
  9266. (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
  9267. tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
  9268. if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
  9269. !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
  9270. tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
  9271. if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
  9272. (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
  9273. tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
  9274. if (cfg2 & (1 << 17))
  9275. tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
  9276. /* serdes signal pre-emphasis in register 0x590 set by */
  9277. /* bootcode if bit 18 is set */
  9278. if (cfg2 & (1 << 18))
  9279. tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
  9280. if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
  9281. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
  9282. (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
  9283. tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
  9284. if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
  9285. u32 cfg3;
  9286. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
  9287. if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
  9288. tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
  9289. }
  9290. if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
  9291. tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
  9292. if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
  9293. tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
  9294. if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
  9295. tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
  9296. }
  9297. done:
  9298. device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
  9299. device_set_wakeup_enable(&tp->pdev->dev,
  9300. tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
  9301. }
  9302. static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
  9303. {
  9304. int i;
  9305. u32 val;
  9306. tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
  9307. tw32(OTP_CTRL, cmd);
  9308. /* Wait for up to 1 ms for command to execute. */
  9309. for (i = 0; i < 100; i++) {
  9310. val = tr32(OTP_STATUS);
  9311. if (val & OTP_STATUS_CMD_DONE)
  9312. break;
  9313. udelay(10);
  9314. }
  9315. return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
  9316. }
  9317. /* Read the gphy configuration from the OTP region of the chip. The gphy
  9318. * configuration is a 32-bit value that straddles the alignment boundary.
  9319. * We do two 32-bit reads and then shift and merge the results.
  9320. */
  9321. static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
  9322. {
  9323. u32 bhalf_otp, thalf_otp;
  9324. tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
  9325. if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
  9326. return 0;
  9327. tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
  9328. if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
  9329. return 0;
  9330. thalf_otp = tr32(OTP_READ_DATA);
  9331. tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
  9332. if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
  9333. return 0;
  9334. bhalf_otp = tr32(OTP_READ_DATA);
  9335. return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
  9336. }
  9337. static int __devinit tg3_phy_probe(struct tg3 *tp)
  9338. {
  9339. u32 hw_phy_id_1, hw_phy_id_2;
  9340. u32 hw_phy_id, hw_phy_id_masked;
  9341. int err;
  9342. if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
  9343. return tg3_phy_init(tp);
  9344. /* Reading the PHY ID register can conflict with ASF
  9345. * firwmare access to the PHY hardware.
  9346. */
  9347. err = 0;
  9348. if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
  9349. (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
  9350. hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
  9351. } else {
  9352. /* Now read the physical PHY_ID from the chip and verify
  9353. * that it is sane. If it doesn't look good, we fall back
  9354. * to either the hard-coded table based PHY_ID and failing
  9355. * that the value found in the eeprom area.
  9356. */
  9357. err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
  9358. err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
  9359. hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
  9360. hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
  9361. hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
  9362. hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
  9363. }
  9364. if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
  9365. tp->phy_id = hw_phy_id;
  9366. if (hw_phy_id_masked == PHY_ID_BCM8002)
  9367. tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
  9368. else
  9369. tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
  9370. } else {
  9371. if (tp->phy_id != PHY_ID_INVALID) {
  9372. /* Do nothing, phy ID already set up in
  9373. * tg3_get_eeprom_hw_cfg().
  9374. */
  9375. } else {
  9376. struct subsys_tbl_ent *p;
  9377. /* No eeprom signature? Try the hardcoded
  9378. * subsys device table.
  9379. */
  9380. p = lookup_by_subsys(tp);
  9381. if (!p)
  9382. return -ENODEV;
  9383. tp->phy_id = p->phy_id;
  9384. if (!tp->phy_id ||
  9385. tp->phy_id == PHY_ID_BCM8002)
  9386. tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
  9387. }
  9388. }
  9389. if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
  9390. !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
  9391. !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
  9392. u32 bmsr, adv_reg, tg3_ctrl, mask;
  9393. tg3_readphy(tp, MII_BMSR, &bmsr);
  9394. if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
  9395. (bmsr & BMSR_LSTATUS))
  9396. goto skip_phy_reset;
  9397. err = tg3_phy_reset(tp);
  9398. if (err)
  9399. return err;
  9400. adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
  9401. ADVERTISE_100HALF | ADVERTISE_100FULL |
  9402. ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
  9403. tg3_ctrl = 0;
  9404. if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
  9405. tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
  9406. MII_TG3_CTRL_ADV_1000_FULL);
  9407. if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
  9408. tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
  9409. tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
  9410. MII_TG3_CTRL_ENABLE_AS_MASTER);
  9411. }
  9412. mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
  9413. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
  9414. ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
  9415. if (!tg3_copper_is_advertising_all(tp, mask)) {
  9416. tg3_writephy(tp, MII_ADVERTISE, adv_reg);
  9417. if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
  9418. tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
  9419. tg3_writephy(tp, MII_BMCR,
  9420. BMCR_ANENABLE | BMCR_ANRESTART);
  9421. }
  9422. tg3_phy_set_wirespeed(tp);
  9423. tg3_writephy(tp, MII_ADVERTISE, adv_reg);
  9424. if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
  9425. tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
  9426. }
  9427. skip_phy_reset:
  9428. if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
  9429. err = tg3_init_5401phy_dsp(tp);
  9430. if (err)
  9431. return err;
  9432. }
  9433. if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
  9434. err = tg3_init_5401phy_dsp(tp);
  9435. }
  9436. if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
  9437. tp->link_config.advertising =
  9438. (ADVERTISED_1000baseT_Half |
  9439. ADVERTISED_1000baseT_Full |
  9440. ADVERTISED_Autoneg |
  9441. ADVERTISED_FIBRE);
  9442. if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
  9443. tp->link_config.advertising &=
  9444. ~(ADVERTISED_1000baseT_Half |
  9445. ADVERTISED_1000baseT_Full);
  9446. return err;
  9447. }
  9448. static void __devinit tg3_read_partno(struct tg3 *tp)
  9449. {
  9450. unsigned char vpd_data[256];
  9451. unsigned int i;
  9452. u32 magic;
  9453. if (tg3_nvram_read(tp, 0x0, &magic))
  9454. goto out_not_found;
  9455. if (magic == TG3_EEPROM_MAGIC) {
  9456. for (i = 0; i < 256; i += 4) {
  9457. u32 tmp;
  9458. if (tg3_nvram_read_swab(tp, 0x100 + i, &tmp))
  9459. goto out_not_found;
  9460. vpd_data[i + 0] = ((tmp >> 0) & 0xff);
  9461. vpd_data[i + 1] = ((tmp >> 8) & 0xff);
  9462. vpd_data[i + 2] = ((tmp >> 16) & 0xff);
  9463. vpd_data[i + 3] = ((tmp >> 24) & 0xff);
  9464. }
  9465. } else {
  9466. int vpd_cap;
  9467. vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
  9468. for (i = 0; i < 256; i += 4) {
  9469. u32 tmp, j = 0;
  9470. __le32 v;
  9471. u16 tmp16;
  9472. pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
  9473. i);
  9474. while (j++ < 100) {
  9475. pci_read_config_word(tp->pdev, vpd_cap +
  9476. PCI_VPD_ADDR, &tmp16);
  9477. if (tmp16 & 0x8000)
  9478. break;
  9479. msleep(1);
  9480. }
  9481. if (!(tmp16 & 0x8000))
  9482. goto out_not_found;
  9483. pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
  9484. &tmp);
  9485. v = cpu_to_le32(tmp);
  9486. memcpy(&vpd_data[i], &v, 4);
  9487. }
  9488. }
  9489. /* Now parse and find the part number. */
  9490. for (i = 0; i < 254; ) {
  9491. unsigned char val = vpd_data[i];
  9492. unsigned int block_end;
  9493. if (val == 0x82 || val == 0x91) {
  9494. i = (i + 3 +
  9495. (vpd_data[i + 1] +
  9496. (vpd_data[i + 2] << 8)));
  9497. continue;
  9498. }
  9499. if (val != 0x90)
  9500. goto out_not_found;
  9501. block_end = (i + 3 +
  9502. (vpd_data[i + 1] +
  9503. (vpd_data[i + 2] << 8)));
  9504. i += 3;
  9505. if (block_end > 256)
  9506. goto out_not_found;
  9507. while (i < (block_end - 2)) {
  9508. if (vpd_data[i + 0] == 'P' &&
  9509. vpd_data[i + 1] == 'N') {
  9510. int partno_len = vpd_data[i + 2];
  9511. i += 3;
  9512. if (partno_len > 24 || (partno_len + i) > 256)
  9513. goto out_not_found;
  9514. memcpy(tp->board_part_number,
  9515. &vpd_data[i], partno_len);
  9516. /* Success. */
  9517. return;
  9518. }
  9519. i += 3 + vpd_data[i + 2];
  9520. }
  9521. /* Part number not found. */
  9522. goto out_not_found;
  9523. }
  9524. out_not_found:
  9525. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  9526. strcpy(tp->board_part_number, "BCM95906");
  9527. else
  9528. strcpy(tp->board_part_number, "none");
  9529. }
  9530. static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
  9531. {
  9532. u32 val;
  9533. if (tg3_nvram_read(tp, offset, &val) ||
  9534. (val & 0xfc000000) != 0x0c000000 ||
  9535. tg3_nvram_read(tp, offset + 4, &val) ||
  9536. val != 0)
  9537. return 0;
  9538. return 1;
  9539. }
  9540. static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
  9541. {
  9542. u32 offset, major, minor, build;
  9543. tp->fw_ver[0] = 's';
  9544. tp->fw_ver[1] = 'b';
  9545. tp->fw_ver[2] = '\0';
  9546. if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
  9547. return;
  9548. switch (val & TG3_EEPROM_SB_REVISION_MASK) {
  9549. case TG3_EEPROM_SB_REVISION_0:
  9550. offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
  9551. break;
  9552. case TG3_EEPROM_SB_REVISION_2:
  9553. offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
  9554. break;
  9555. case TG3_EEPROM_SB_REVISION_3:
  9556. offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
  9557. break;
  9558. default:
  9559. return;
  9560. }
  9561. if (tg3_nvram_read(tp, offset, &val))
  9562. return;
  9563. build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
  9564. TG3_EEPROM_SB_EDH_BLD_SHFT;
  9565. major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
  9566. TG3_EEPROM_SB_EDH_MAJ_SHFT;
  9567. minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
  9568. if (minor > 99 || build > 26)
  9569. return;
  9570. snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
  9571. if (build > 0) {
  9572. tp->fw_ver[8] = 'a' + build - 1;
  9573. tp->fw_ver[9] = '\0';
  9574. }
  9575. }
  9576. static void __devinit tg3_read_fw_ver(struct tg3 *tp)
  9577. {
  9578. u32 val, offset, start;
  9579. u32 ver_offset;
  9580. int i, bcnt;
  9581. if (tg3_nvram_read(tp, 0, &val))
  9582. return;
  9583. if (val != TG3_EEPROM_MAGIC) {
  9584. if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
  9585. tg3_read_sb_ver(tp, val);
  9586. return;
  9587. }
  9588. if (tg3_nvram_read(tp, 0xc, &offset) ||
  9589. tg3_nvram_read(tp, 0x4, &start))
  9590. return;
  9591. offset = tg3_nvram_logical_addr(tp, offset);
  9592. if (!tg3_fw_img_is_valid(tp, offset) ||
  9593. tg3_nvram_read(tp, offset + 8, &ver_offset))
  9594. return;
  9595. offset = offset + ver_offset - start;
  9596. for (i = 0; i < 16; i += 4) {
  9597. __le32 v;
  9598. if (tg3_nvram_read_le(tp, offset + i, &v))
  9599. return;
  9600. memcpy(tp->fw_ver + i, &v, 4);
  9601. }
  9602. if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
  9603. (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
  9604. return;
  9605. for (offset = TG3_NVM_DIR_START;
  9606. offset < TG3_NVM_DIR_END;
  9607. offset += TG3_NVM_DIRENT_SIZE) {
  9608. if (tg3_nvram_read(tp, offset, &val))
  9609. return;
  9610. if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
  9611. break;
  9612. }
  9613. if (offset == TG3_NVM_DIR_END)
  9614. return;
  9615. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
  9616. start = 0x08000000;
  9617. else if (tg3_nvram_read(tp, offset - 4, &start))
  9618. return;
  9619. if (tg3_nvram_read(tp, offset + 4, &offset) ||
  9620. !tg3_fw_img_is_valid(tp, offset) ||
  9621. tg3_nvram_read(tp, offset + 8, &val))
  9622. return;
  9623. offset += val - start;
  9624. bcnt = strlen(tp->fw_ver);
  9625. tp->fw_ver[bcnt++] = ',';
  9626. tp->fw_ver[bcnt++] = ' ';
  9627. for (i = 0; i < 4; i++) {
  9628. __le32 v;
  9629. if (tg3_nvram_read_le(tp, offset, &v))
  9630. return;
  9631. offset += sizeof(v);
  9632. if (bcnt > TG3_VER_SIZE - sizeof(v)) {
  9633. memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
  9634. break;
  9635. }
  9636. memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
  9637. bcnt += sizeof(v);
  9638. }
  9639. tp->fw_ver[TG3_VER_SIZE - 1] = 0;
  9640. }
  9641. static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
  9642. static int __devinit tg3_get_invariants(struct tg3 *tp)
  9643. {
  9644. static struct pci_device_id write_reorder_chipsets[] = {
  9645. { PCI_DEVICE(PCI_VENDOR_ID_AMD,
  9646. PCI_DEVICE_ID_AMD_FE_GATE_700C) },
  9647. { PCI_DEVICE(PCI_VENDOR_ID_AMD,
  9648. PCI_DEVICE_ID_AMD_8131_BRIDGE) },
  9649. { PCI_DEVICE(PCI_VENDOR_ID_VIA,
  9650. PCI_DEVICE_ID_VIA_8385_0) },
  9651. { },
  9652. };
  9653. u32 misc_ctrl_reg;
  9654. u32 pci_state_reg, grc_misc_cfg;
  9655. u32 val;
  9656. u16 pci_cmd;
  9657. int err;
  9658. /* Force memory write invalidate off. If we leave it on,
  9659. * then on 5700_BX chips we have to enable a workaround.
  9660. * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
  9661. * to match the cacheline size. The Broadcom driver have this
  9662. * workaround but turns MWI off all the times so never uses
  9663. * it. This seems to suggest that the workaround is insufficient.
  9664. */
  9665. pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
  9666. pci_cmd &= ~PCI_COMMAND_INVALIDATE;
  9667. pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
  9668. /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
  9669. * has the register indirect write enable bit set before
  9670. * we try to access any of the MMIO registers. It is also
  9671. * critical that the PCI-X hw workaround situation is decided
  9672. * before that as well.
  9673. */
  9674. pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  9675. &misc_ctrl_reg);
  9676. tp->pci_chip_rev_id = (misc_ctrl_reg >>
  9677. MISC_HOST_CTRL_CHIPREV_SHIFT);
  9678. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
  9679. u32 prod_id_asic_rev;
  9680. pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
  9681. &prod_id_asic_rev);
  9682. tp->pci_chip_rev_id = prod_id_asic_rev;
  9683. }
  9684. /* Wrong chip ID in 5752 A0. This code can be removed later
  9685. * as A0 is not in production.
  9686. */
  9687. if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
  9688. tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
  9689. /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
  9690. * we need to disable memory and use config. cycles
  9691. * only to access all registers. The 5702/03 chips
  9692. * can mistakenly decode the special cycles from the
  9693. * ICH chipsets as memory write cycles, causing corruption
  9694. * of register and memory space. Only certain ICH bridges
  9695. * will drive special cycles with non-zero data during the
  9696. * address phase which can fall within the 5703's address
  9697. * range. This is not an ICH bug as the PCI spec allows
  9698. * non-zero address during special cycles. However, only
  9699. * these ICH bridges are known to drive non-zero addresses
  9700. * during special cycles.
  9701. *
  9702. * Since special cycles do not cross PCI bridges, we only
  9703. * enable this workaround if the 5703 is on the secondary
  9704. * bus of these ICH bridges.
  9705. */
  9706. if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
  9707. (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
  9708. static struct tg3_dev_id {
  9709. u32 vendor;
  9710. u32 device;
  9711. u32 rev;
  9712. } ich_chipsets[] = {
  9713. { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
  9714. PCI_ANY_ID },
  9715. { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
  9716. PCI_ANY_ID },
  9717. { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
  9718. 0xa },
  9719. { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
  9720. PCI_ANY_ID },
  9721. { },
  9722. };
  9723. struct tg3_dev_id *pci_id = &ich_chipsets[0];
  9724. struct pci_dev *bridge = NULL;
  9725. while (pci_id->vendor != 0) {
  9726. bridge = pci_get_device(pci_id->vendor, pci_id->device,
  9727. bridge);
  9728. if (!bridge) {
  9729. pci_id++;
  9730. continue;
  9731. }
  9732. if (pci_id->rev != PCI_ANY_ID) {
  9733. if (bridge->revision > pci_id->rev)
  9734. continue;
  9735. }
  9736. if (bridge->subordinate &&
  9737. (bridge->subordinate->number ==
  9738. tp->pdev->bus->number)) {
  9739. tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
  9740. pci_dev_put(bridge);
  9741. break;
  9742. }
  9743. }
  9744. }
  9745. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
  9746. static struct tg3_dev_id {
  9747. u32 vendor;
  9748. u32 device;
  9749. } bridge_chipsets[] = {
  9750. { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
  9751. { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
  9752. { },
  9753. };
  9754. struct tg3_dev_id *pci_id = &bridge_chipsets[0];
  9755. struct pci_dev *bridge = NULL;
  9756. while (pci_id->vendor != 0) {
  9757. bridge = pci_get_device(pci_id->vendor,
  9758. pci_id->device,
  9759. bridge);
  9760. if (!bridge) {
  9761. pci_id++;
  9762. continue;
  9763. }
  9764. if (bridge->subordinate &&
  9765. (bridge->subordinate->number <=
  9766. tp->pdev->bus->number) &&
  9767. (bridge->subordinate->subordinate >=
  9768. tp->pdev->bus->number)) {
  9769. tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
  9770. pci_dev_put(bridge);
  9771. break;
  9772. }
  9773. }
  9774. }
  9775. /* The EPB bridge inside 5714, 5715, and 5780 cannot support
  9776. * DMA addresses > 40-bit. This bridge may have other additional
  9777. * 57xx devices behind it in some 4-port NIC designs for example.
  9778. * Any tg3 device found behind the bridge will also need the 40-bit
  9779. * DMA workaround.
  9780. */
  9781. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
  9782. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
  9783. tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
  9784. tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
  9785. tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
  9786. }
  9787. else {
  9788. struct pci_dev *bridge = NULL;
  9789. do {
  9790. bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
  9791. PCI_DEVICE_ID_SERVERWORKS_EPB,
  9792. bridge);
  9793. if (bridge && bridge->subordinate &&
  9794. (bridge->subordinate->number <=
  9795. tp->pdev->bus->number) &&
  9796. (bridge->subordinate->subordinate >=
  9797. tp->pdev->bus->number)) {
  9798. tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
  9799. pci_dev_put(bridge);
  9800. break;
  9801. }
  9802. } while (bridge);
  9803. }
  9804. /* Initialize misc host control in PCI block. */
  9805. tp->misc_host_ctrl |= (misc_ctrl_reg &
  9806. MISC_HOST_CTRL_CHIPREV);
  9807. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  9808. tp->misc_host_ctrl);
  9809. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
  9810. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
  9811. tp->pdev_peer = tg3_find_peer(tp);
  9812. /* Intentionally exclude ASIC_REV_5906 */
  9813. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  9814. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
  9815. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  9816. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  9817. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  9818. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
  9819. tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
  9820. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
  9821. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
  9822. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
  9823. (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
  9824. (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
  9825. tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
  9826. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
  9827. (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
  9828. tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
  9829. /* 5700 B0 chips do not support checksumming correctly due
  9830. * to hardware bugs.
  9831. */
  9832. if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
  9833. tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
  9834. else {
  9835. tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
  9836. tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
  9837. if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
  9838. tp->dev->features |= NETIF_F_IPV6_CSUM;
  9839. }
  9840. if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
  9841. tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
  9842. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
  9843. GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
  9844. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
  9845. tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
  9846. tp->pdev_peer == tp->pdev))
  9847. tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
  9848. if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
  9849. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  9850. tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
  9851. tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
  9852. } else {
  9853. tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
  9854. if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
  9855. ASIC_REV_5750 &&
  9856. tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
  9857. tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
  9858. }
  9859. }
  9860. if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
  9861. (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
  9862. tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
  9863. pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
  9864. &pci_state_reg);
  9865. tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
  9866. if (tp->pcie_cap != 0) {
  9867. u16 lnkctl;
  9868. tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
  9869. pcie_set_readrq(tp->pdev, 4096);
  9870. pci_read_config_word(tp->pdev,
  9871. tp->pcie_cap + PCI_EXP_LNKCTL,
  9872. &lnkctl);
  9873. if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
  9874. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  9875. tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
  9876. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  9877. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  9878. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
  9879. tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
  9880. }
  9881. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
  9882. tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
  9883. } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
  9884. (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
  9885. tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
  9886. if (!tp->pcix_cap) {
  9887. printk(KERN_ERR PFX "Cannot find PCI-X "
  9888. "capability, aborting.\n");
  9889. return -EIO;
  9890. }
  9891. if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
  9892. tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
  9893. }
  9894. /* If we have an AMD 762 or VIA K8T800 chipset, write
  9895. * reordering to the mailbox registers done by the host
  9896. * controller can cause major troubles. We read back from
  9897. * every mailbox register write to force the writes to be
  9898. * posted to the chip in order.
  9899. */
  9900. if (pci_dev_present(write_reorder_chipsets) &&
  9901. !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
  9902. tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
  9903. pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
  9904. &tp->pci_cacheline_sz);
  9905. pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  9906. &tp->pci_lat_timer);
  9907. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
  9908. tp->pci_lat_timer < 64) {
  9909. tp->pci_lat_timer = 64;
  9910. pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  9911. tp->pci_lat_timer);
  9912. }
  9913. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
  9914. /* 5700 BX chips need to have their TX producer index
  9915. * mailboxes written twice to workaround a bug.
  9916. */
  9917. tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
  9918. /* If we are in PCI-X mode, enable register write workaround.
  9919. *
  9920. * The workaround is to use indirect register accesses
  9921. * for all chip writes not to mailbox registers.
  9922. */
  9923. if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
  9924. u32 pm_reg;
  9925. tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
  9926. /* The chip can have it's power management PCI config
  9927. * space registers clobbered due to this bug.
  9928. * So explicitly force the chip into D0 here.
  9929. */
  9930. pci_read_config_dword(tp->pdev,
  9931. tp->pm_cap + PCI_PM_CTRL,
  9932. &pm_reg);
  9933. pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
  9934. pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
  9935. pci_write_config_dword(tp->pdev,
  9936. tp->pm_cap + PCI_PM_CTRL,
  9937. pm_reg);
  9938. /* Also, force SERR#/PERR# in PCI command. */
  9939. pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
  9940. pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
  9941. pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
  9942. }
  9943. }
  9944. if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
  9945. tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
  9946. if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
  9947. tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
  9948. /* Chip-specific fixup from Broadcom driver */
  9949. if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
  9950. (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
  9951. pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
  9952. pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
  9953. }
  9954. /* Default fast path register access methods */
  9955. tp->read32 = tg3_read32;
  9956. tp->write32 = tg3_write32;
  9957. tp->read32_mbox = tg3_read32;
  9958. tp->write32_mbox = tg3_write32;
  9959. tp->write32_tx_mbox = tg3_write32;
  9960. tp->write32_rx_mbox = tg3_write32;
  9961. /* Various workaround register access methods */
  9962. if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
  9963. tp->write32 = tg3_write_indirect_reg32;
  9964. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
  9965. ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
  9966. tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
  9967. /*
  9968. * Back to back register writes can cause problems on these
  9969. * chips, the workaround is to read back all reg writes
  9970. * except those to mailbox regs.
  9971. *
  9972. * See tg3_write_indirect_reg32().
  9973. */
  9974. tp->write32 = tg3_write_flush_reg32;
  9975. }
  9976. if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
  9977. (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
  9978. tp->write32_tx_mbox = tg3_write32_tx_mbox;
  9979. if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
  9980. tp->write32_rx_mbox = tg3_write_flush_reg32;
  9981. }
  9982. if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
  9983. tp->read32 = tg3_read_indirect_reg32;
  9984. tp->write32 = tg3_write_indirect_reg32;
  9985. tp->read32_mbox = tg3_read_indirect_mbox;
  9986. tp->write32_mbox = tg3_write_indirect_mbox;
  9987. tp->write32_tx_mbox = tg3_write_indirect_mbox;
  9988. tp->write32_rx_mbox = tg3_write_indirect_mbox;
  9989. iounmap(tp->regs);
  9990. tp->regs = NULL;
  9991. pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
  9992. pci_cmd &= ~PCI_COMMAND_MEMORY;
  9993. pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
  9994. }
  9995. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  9996. tp->read32_mbox = tg3_read32_mbox_5906;
  9997. tp->write32_mbox = tg3_write32_mbox_5906;
  9998. tp->write32_tx_mbox = tg3_write32_mbox_5906;
  9999. tp->write32_rx_mbox = tg3_write32_mbox_5906;
  10000. }
  10001. if (tp->write32 == tg3_write_indirect_reg32 ||
  10002. ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
  10003. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  10004. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
  10005. tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
  10006. /* Get eeprom hw config before calling tg3_set_power_state().
  10007. * In particular, the TG3_FLG2_IS_NIC flag must be
  10008. * determined before calling tg3_set_power_state() so that
  10009. * we know whether or not to switch out of Vaux power.
  10010. * When the flag is set, it means that GPIO1 is used for eeprom
  10011. * write protect and also implies that it is a LOM where GPIOs
  10012. * are not used to switch power.
  10013. */
  10014. tg3_get_eeprom_hw_cfg(tp);
  10015. if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
  10016. /* Allow reads and writes to the
  10017. * APE register and memory space.
  10018. */
  10019. pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
  10020. PCISTATE_ALLOW_APE_SHMEM_WR;
  10021. pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
  10022. pci_state_reg);
  10023. }
  10024. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  10025. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  10026. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  10027. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
  10028. tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
  10029. /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
  10030. * GPIO1 driven high will bring 5700's external PHY out of reset.
  10031. * It is also used as eeprom write protect on LOMs.
  10032. */
  10033. tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
  10034. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
  10035. (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
  10036. tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
  10037. GRC_LCLCTRL_GPIO_OUTPUT1);
  10038. /* Unused GPIO3 must be driven as output on 5752 because there
  10039. * are no pull-up resistors on unused GPIO pins.
  10040. */
  10041. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
  10042. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
  10043. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  10044. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
  10045. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
  10046. if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
  10047. /* Turn off the debug UART. */
  10048. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
  10049. if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
  10050. /* Keep VMain power. */
  10051. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
  10052. GRC_LCLCTRL_GPIO_OUTPUT0;
  10053. }
  10054. /* Force the chip into D0. */
  10055. err = tg3_set_power_state(tp, PCI_D0);
  10056. if (err) {
  10057. printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
  10058. pci_name(tp->pdev));
  10059. return err;
  10060. }
  10061. /* Derive initial jumbo mode from MTU assigned in
  10062. * ether_setup() via the alloc_etherdev() call
  10063. */
  10064. if (tp->dev->mtu > ETH_DATA_LEN &&
  10065. !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
  10066. tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
  10067. /* Determine WakeOnLan speed to use. */
  10068. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  10069. tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
  10070. tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
  10071. tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
  10072. tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
  10073. } else {
  10074. tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
  10075. }
  10076. /* A few boards don't want Ethernet@WireSpeed phy feature */
  10077. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
  10078. ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
  10079. (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
  10080. (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
  10081. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
  10082. (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
  10083. tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
  10084. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
  10085. GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
  10086. tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
  10087. if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
  10088. tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
  10089. if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
  10090. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
  10091. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
  10092. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) {
  10093. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  10094. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
  10095. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  10096. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
  10097. if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
  10098. tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
  10099. tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
  10100. if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
  10101. tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
  10102. } else
  10103. tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
  10104. }
  10105. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
  10106. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
  10107. tp->phy_otp = tg3_read_otp_phycfg(tp);
  10108. if (tp->phy_otp == 0)
  10109. tp->phy_otp = TG3_OTP_DEFAULT;
  10110. }
  10111. if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
  10112. tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
  10113. else
  10114. tp->mi_mode = MAC_MI_MODE_BASE;
  10115. tp->coalesce_mode = 0;
  10116. if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
  10117. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
  10118. tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
  10119. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  10120. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
  10121. tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
  10122. err = tg3_mdio_init(tp);
  10123. if (err)
  10124. return err;
  10125. /* Initialize data/descriptor byte/word swapping. */
  10126. val = tr32(GRC_MODE);
  10127. val &= GRC_MODE_HOST_STACKUP;
  10128. tw32(GRC_MODE, val | tp->grc_mode);
  10129. tg3_switch_clocks(tp);
  10130. /* Clear this out for sanity. */
  10131. tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  10132. pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
  10133. &pci_state_reg);
  10134. if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
  10135. (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
  10136. u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
  10137. if (chiprevid == CHIPREV_ID_5701_A0 ||
  10138. chiprevid == CHIPREV_ID_5701_B0 ||
  10139. chiprevid == CHIPREV_ID_5701_B2 ||
  10140. chiprevid == CHIPREV_ID_5701_B5) {
  10141. void __iomem *sram_base;
  10142. /* Write some dummy words into the SRAM status block
  10143. * area, see if it reads back correctly. If the return
  10144. * value is bad, force enable the PCIX workaround.
  10145. */
  10146. sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
  10147. writel(0x00000000, sram_base);
  10148. writel(0x00000000, sram_base + 4);
  10149. writel(0xffffffff, sram_base + 4);
  10150. if (readl(sram_base) != 0x00000000)
  10151. tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
  10152. }
  10153. }
  10154. udelay(50);
  10155. tg3_nvram_init(tp);
  10156. grc_misc_cfg = tr32(GRC_MISC_CFG);
  10157. grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
  10158. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  10159. (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
  10160. grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
  10161. tp->tg3_flags2 |= TG3_FLG2_IS_5788;
  10162. if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
  10163. (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
  10164. tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
  10165. if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
  10166. tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
  10167. HOSTCC_MODE_CLRTICK_TXBD);
  10168. tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
  10169. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  10170. tp->misc_host_ctrl);
  10171. }
  10172. /* Preserve the APE MAC_MODE bits */
  10173. if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
  10174. tp->mac_mode = tr32(MAC_MODE) |
  10175. MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
  10176. else
  10177. tp->mac_mode = TG3_DEF_MAC_MODE;
  10178. /* these are limited to 10/100 only */
  10179. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
  10180. (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
  10181. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  10182. tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
  10183. (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
  10184. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
  10185. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
  10186. (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
  10187. (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
  10188. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
  10189. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
  10190. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
  10191. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  10192. tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
  10193. err = tg3_phy_probe(tp);
  10194. if (err) {
  10195. printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
  10196. pci_name(tp->pdev), err);
  10197. /* ... but do not return immediately ... */
  10198. tg3_mdio_fini(tp);
  10199. }
  10200. tg3_read_partno(tp);
  10201. tg3_read_fw_ver(tp);
  10202. if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
  10203. tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
  10204. } else {
  10205. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
  10206. tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
  10207. else
  10208. tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
  10209. }
  10210. /* 5700 {AX,BX} chips have a broken status block link
  10211. * change bit implementation, so we must use the
  10212. * status register in those cases.
  10213. */
  10214. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
  10215. tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
  10216. else
  10217. tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
  10218. /* The led_ctrl is set during tg3_phy_probe, here we might
  10219. * have to force the link status polling mechanism based
  10220. * upon subsystem IDs.
  10221. */
  10222. if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
  10223. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
  10224. !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
  10225. tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
  10226. TG3_FLAG_USE_LINKCHG_REG);
  10227. }
  10228. /* For all SERDES we poll the MAC status register. */
  10229. if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
  10230. tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
  10231. else
  10232. tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
  10233. tp->rx_offset = NET_IP_ALIGN;
  10234. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
  10235. (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
  10236. tp->rx_offset = 0;
  10237. tp->rx_std_max_post = TG3_RX_RING_SIZE;
  10238. /* Increment the rx prod index on the rx std ring by at most
  10239. * 8 for these chips to workaround hw errata.
  10240. */
  10241. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
  10242. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
  10243. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
  10244. tp->rx_std_max_post = 8;
  10245. if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
  10246. tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
  10247. PCIE_PWR_MGMT_L1_THRESH_MSK;
  10248. return err;
  10249. }
  10250. #ifdef CONFIG_SPARC
  10251. static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
  10252. {
  10253. struct net_device *dev = tp->dev;
  10254. struct pci_dev *pdev = tp->pdev;
  10255. struct device_node *dp = pci_device_to_OF_node(pdev);
  10256. const unsigned char *addr;
  10257. int len;
  10258. addr = of_get_property(dp, "local-mac-address", &len);
  10259. if (addr && len == 6) {
  10260. memcpy(dev->dev_addr, addr, 6);
  10261. memcpy(dev->perm_addr, dev->dev_addr, 6);
  10262. return 0;
  10263. }
  10264. return -ENODEV;
  10265. }
  10266. static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
  10267. {
  10268. struct net_device *dev = tp->dev;
  10269. memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
  10270. memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
  10271. return 0;
  10272. }
  10273. #endif
  10274. static int __devinit tg3_get_device_address(struct tg3 *tp)
  10275. {
  10276. struct net_device *dev = tp->dev;
  10277. u32 hi, lo, mac_offset;
  10278. int addr_ok = 0;
  10279. #ifdef CONFIG_SPARC
  10280. if (!tg3_get_macaddr_sparc(tp))
  10281. return 0;
  10282. #endif
  10283. mac_offset = 0x7c;
  10284. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
  10285. (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
  10286. if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
  10287. mac_offset = 0xcc;
  10288. if (tg3_nvram_lock(tp))
  10289. tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
  10290. else
  10291. tg3_nvram_unlock(tp);
  10292. }
  10293. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  10294. mac_offset = 0x10;
  10295. /* First try to get it from MAC address mailbox. */
  10296. tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
  10297. if ((hi >> 16) == 0x484b) {
  10298. dev->dev_addr[0] = (hi >> 8) & 0xff;
  10299. dev->dev_addr[1] = (hi >> 0) & 0xff;
  10300. tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
  10301. dev->dev_addr[2] = (lo >> 24) & 0xff;
  10302. dev->dev_addr[3] = (lo >> 16) & 0xff;
  10303. dev->dev_addr[4] = (lo >> 8) & 0xff;
  10304. dev->dev_addr[5] = (lo >> 0) & 0xff;
  10305. /* Some old bootcode may report a 0 MAC address in SRAM */
  10306. addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
  10307. }
  10308. if (!addr_ok) {
  10309. /* Next, try NVRAM. */
  10310. if (!tg3_nvram_read_swab(tp, mac_offset + 0, &hi) &&
  10311. !tg3_nvram_read_swab(tp, mac_offset + 4, &lo)) {
  10312. dev->dev_addr[0] = ((hi >> 16) & 0xff);
  10313. dev->dev_addr[1] = ((hi >> 24) & 0xff);
  10314. dev->dev_addr[2] = ((lo >> 0) & 0xff);
  10315. dev->dev_addr[3] = ((lo >> 8) & 0xff);
  10316. dev->dev_addr[4] = ((lo >> 16) & 0xff);
  10317. dev->dev_addr[5] = ((lo >> 24) & 0xff);
  10318. }
  10319. /* Finally just fetch it out of the MAC control regs. */
  10320. else {
  10321. hi = tr32(MAC_ADDR_0_HIGH);
  10322. lo = tr32(MAC_ADDR_0_LOW);
  10323. dev->dev_addr[5] = lo & 0xff;
  10324. dev->dev_addr[4] = (lo >> 8) & 0xff;
  10325. dev->dev_addr[3] = (lo >> 16) & 0xff;
  10326. dev->dev_addr[2] = (lo >> 24) & 0xff;
  10327. dev->dev_addr[1] = hi & 0xff;
  10328. dev->dev_addr[0] = (hi >> 8) & 0xff;
  10329. }
  10330. }
  10331. if (!is_valid_ether_addr(&dev->dev_addr[0])) {
  10332. #ifdef CONFIG_SPARC
  10333. if (!tg3_get_default_macaddr_sparc(tp))
  10334. return 0;
  10335. #endif
  10336. return -EINVAL;
  10337. }
  10338. memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
  10339. return 0;
  10340. }
  10341. #define BOUNDARY_SINGLE_CACHELINE 1
  10342. #define BOUNDARY_MULTI_CACHELINE 2
  10343. static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
  10344. {
  10345. int cacheline_size;
  10346. u8 byte;
  10347. int goal;
  10348. pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
  10349. if (byte == 0)
  10350. cacheline_size = 1024;
  10351. else
  10352. cacheline_size = (int) byte * 4;
  10353. /* On 5703 and later chips, the boundary bits have no
  10354. * effect.
  10355. */
  10356. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
  10357. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
  10358. !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
  10359. goto out;
  10360. #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
  10361. goal = BOUNDARY_MULTI_CACHELINE;
  10362. #else
  10363. #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
  10364. goal = BOUNDARY_SINGLE_CACHELINE;
  10365. #else
  10366. goal = 0;
  10367. #endif
  10368. #endif
  10369. if (!goal)
  10370. goto out;
  10371. /* PCI controllers on most RISC systems tend to disconnect
  10372. * when a device tries to burst across a cache-line boundary.
  10373. * Therefore, letting tg3 do so just wastes PCI bandwidth.
  10374. *
  10375. * Unfortunately, for PCI-E there are only limited
  10376. * write-side controls for this, and thus for reads
  10377. * we will still get the disconnects. We'll also waste
  10378. * these PCI cycles for both read and write for chips
  10379. * other than 5700 and 5701 which do not implement the
  10380. * boundary bits.
  10381. */
  10382. if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
  10383. !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
  10384. switch (cacheline_size) {
  10385. case 16:
  10386. case 32:
  10387. case 64:
  10388. case 128:
  10389. if (goal == BOUNDARY_SINGLE_CACHELINE) {
  10390. val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
  10391. DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
  10392. } else {
  10393. val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
  10394. DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
  10395. }
  10396. break;
  10397. case 256:
  10398. val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
  10399. DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
  10400. break;
  10401. default:
  10402. val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
  10403. DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
  10404. break;
  10405. }
  10406. } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
  10407. switch (cacheline_size) {
  10408. case 16:
  10409. case 32:
  10410. case 64:
  10411. if (goal == BOUNDARY_SINGLE_CACHELINE) {
  10412. val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
  10413. val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
  10414. break;
  10415. }
  10416. /* fallthrough */
  10417. case 128:
  10418. default:
  10419. val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
  10420. val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
  10421. break;
  10422. }
  10423. } else {
  10424. switch (cacheline_size) {
  10425. case 16:
  10426. if (goal == BOUNDARY_SINGLE_CACHELINE) {
  10427. val |= (DMA_RWCTRL_READ_BNDRY_16 |
  10428. DMA_RWCTRL_WRITE_BNDRY_16);
  10429. break;
  10430. }
  10431. /* fallthrough */
  10432. case 32:
  10433. if (goal == BOUNDARY_SINGLE_CACHELINE) {
  10434. val |= (DMA_RWCTRL_READ_BNDRY_32 |
  10435. DMA_RWCTRL_WRITE_BNDRY_32);
  10436. break;
  10437. }
  10438. /* fallthrough */
  10439. case 64:
  10440. if (goal == BOUNDARY_SINGLE_CACHELINE) {
  10441. val |= (DMA_RWCTRL_READ_BNDRY_64 |
  10442. DMA_RWCTRL_WRITE_BNDRY_64);
  10443. break;
  10444. }
  10445. /* fallthrough */
  10446. case 128:
  10447. if (goal == BOUNDARY_SINGLE_CACHELINE) {
  10448. val |= (DMA_RWCTRL_READ_BNDRY_128 |
  10449. DMA_RWCTRL_WRITE_BNDRY_128);
  10450. break;
  10451. }
  10452. /* fallthrough */
  10453. case 256:
  10454. val |= (DMA_RWCTRL_READ_BNDRY_256 |
  10455. DMA_RWCTRL_WRITE_BNDRY_256);
  10456. break;
  10457. case 512:
  10458. val |= (DMA_RWCTRL_READ_BNDRY_512 |
  10459. DMA_RWCTRL_WRITE_BNDRY_512);
  10460. break;
  10461. case 1024:
  10462. default:
  10463. val |= (DMA_RWCTRL_READ_BNDRY_1024 |
  10464. DMA_RWCTRL_WRITE_BNDRY_1024);
  10465. break;
  10466. }
  10467. }
  10468. out:
  10469. return val;
  10470. }
  10471. static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
  10472. {
  10473. struct tg3_internal_buffer_desc test_desc;
  10474. u32 sram_dma_descs;
  10475. int i, ret;
  10476. sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
  10477. tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
  10478. tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
  10479. tw32(RDMAC_STATUS, 0);
  10480. tw32(WDMAC_STATUS, 0);
  10481. tw32(BUFMGR_MODE, 0);
  10482. tw32(FTQ_RESET, 0);
  10483. test_desc.addr_hi = ((u64) buf_dma) >> 32;
  10484. test_desc.addr_lo = buf_dma & 0xffffffff;
  10485. test_desc.nic_mbuf = 0x00002100;
  10486. test_desc.len = size;
  10487. /*
  10488. * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
  10489. * the *second* time the tg3 driver was getting loaded after an
  10490. * initial scan.
  10491. *
  10492. * Broadcom tells me:
  10493. * ...the DMA engine is connected to the GRC block and a DMA
  10494. * reset may affect the GRC block in some unpredictable way...
  10495. * The behavior of resets to individual blocks has not been tested.
  10496. *
  10497. * Broadcom noted the GRC reset will also reset all sub-components.
  10498. */
  10499. if (to_device) {
  10500. test_desc.cqid_sqid = (13 << 8) | 2;
  10501. tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
  10502. udelay(40);
  10503. } else {
  10504. test_desc.cqid_sqid = (16 << 8) | 7;
  10505. tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
  10506. udelay(40);
  10507. }
  10508. test_desc.flags = 0x00000005;
  10509. for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
  10510. u32 val;
  10511. val = *(((u32 *)&test_desc) + i);
  10512. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
  10513. sram_dma_descs + (i * sizeof(u32)));
  10514. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  10515. }
  10516. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  10517. if (to_device) {
  10518. tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
  10519. } else {
  10520. tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
  10521. }
  10522. ret = -ENODEV;
  10523. for (i = 0; i < 40; i++) {
  10524. u32 val;
  10525. if (to_device)
  10526. val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
  10527. else
  10528. val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
  10529. if ((val & 0xffff) == sram_dma_descs) {
  10530. ret = 0;
  10531. break;
  10532. }
  10533. udelay(100);
  10534. }
  10535. return ret;
  10536. }
  10537. #define TEST_BUFFER_SIZE 0x2000
  10538. static int __devinit tg3_test_dma(struct tg3 *tp)
  10539. {
  10540. dma_addr_t buf_dma;
  10541. u32 *buf, saved_dma_rwctrl;
  10542. int ret;
  10543. buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
  10544. if (!buf) {
  10545. ret = -ENOMEM;
  10546. goto out_nofree;
  10547. }
  10548. tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
  10549. (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
  10550. tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
  10551. if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
  10552. /* DMA read watermark not used on PCIE */
  10553. tp->dma_rwctrl |= 0x00180000;
  10554. } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
  10555. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
  10556. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
  10557. tp->dma_rwctrl |= 0x003f0000;
  10558. else
  10559. tp->dma_rwctrl |= 0x003f000f;
  10560. } else {
  10561. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
  10562. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
  10563. u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
  10564. u32 read_water = 0x7;
  10565. /* If the 5704 is behind the EPB bridge, we can
  10566. * do the less restrictive ONE_DMA workaround for
  10567. * better performance.
  10568. */
  10569. if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
  10570. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
  10571. tp->dma_rwctrl |= 0x8000;
  10572. else if (ccval == 0x6 || ccval == 0x7)
  10573. tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
  10574. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
  10575. read_water = 4;
  10576. /* Set bit 23 to enable PCIX hw bug fix */
  10577. tp->dma_rwctrl |=
  10578. (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
  10579. (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
  10580. (1 << 23);
  10581. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
  10582. /* 5780 always in PCIX mode */
  10583. tp->dma_rwctrl |= 0x00144000;
  10584. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
  10585. /* 5714 always in PCIX mode */
  10586. tp->dma_rwctrl |= 0x00148000;
  10587. } else {
  10588. tp->dma_rwctrl |= 0x001b000f;
  10589. }
  10590. }
  10591. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
  10592. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
  10593. tp->dma_rwctrl &= 0xfffffff0;
  10594. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  10595. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
  10596. /* Remove this if it causes problems for some boards. */
  10597. tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
  10598. /* On 5700/5701 chips, we need to set this bit.
  10599. * Otherwise the chip will issue cacheline transactions
  10600. * to streamable DMA memory with not all the byte
  10601. * enables turned on. This is an error on several
  10602. * RISC PCI controllers, in particular sparc64.
  10603. *
  10604. * On 5703/5704 chips, this bit has been reassigned
  10605. * a different meaning. In particular, it is used
  10606. * on those chips to enable a PCI-X workaround.
  10607. */
  10608. tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
  10609. }
  10610. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  10611. #if 0
  10612. /* Unneeded, already done by tg3_get_invariants. */
  10613. tg3_switch_clocks(tp);
  10614. #endif
  10615. ret = 0;
  10616. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
  10617. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
  10618. goto out;
  10619. /* It is best to perform DMA test with maximum write burst size
  10620. * to expose the 5700/5701 write DMA bug.
  10621. */
  10622. saved_dma_rwctrl = tp->dma_rwctrl;
  10623. tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
  10624. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  10625. while (1) {
  10626. u32 *p = buf, i;
  10627. for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
  10628. p[i] = i;
  10629. /* Send the buffer to the chip. */
  10630. ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
  10631. if (ret) {
  10632. printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
  10633. break;
  10634. }
  10635. #if 0
  10636. /* validate data reached card RAM correctly. */
  10637. for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
  10638. u32 val;
  10639. tg3_read_mem(tp, 0x2100 + (i*4), &val);
  10640. if (le32_to_cpu(val) != p[i]) {
  10641. printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
  10642. /* ret = -ENODEV here? */
  10643. }
  10644. p[i] = 0;
  10645. }
  10646. #endif
  10647. /* Now read it back. */
  10648. ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
  10649. if (ret) {
  10650. printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
  10651. break;
  10652. }
  10653. /* Verify it. */
  10654. for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
  10655. if (p[i] == i)
  10656. continue;
  10657. if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
  10658. DMA_RWCTRL_WRITE_BNDRY_16) {
  10659. tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
  10660. tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
  10661. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  10662. break;
  10663. } else {
  10664. printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
  10665. ret = -ENODEV;
  10666. goto out;
  10667. }
  10668. }
  10669. if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
  10670. /* Success. */
  10671. ret = 0;
  10672. break;
  10673. }
  10674. }
  10675. if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
  10676. DMA_RWCTRL_WRITE_BNDRY_16) {
  10677. static struct pci_device_id dma_wait_state_chipsets[] = {
  10678. { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
  10679. PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
  10680. { },
  10681. };
  10682. /* DMA test passed without adjusting DMA boundary,
  10683. * now look for chipsets that are known to expose the
  10684. * DMA bug without failing the test.
  10685. */
  10686. if (pci_dev_present(dma_wait_state_chipsets)) {
  10687. tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
  10688. tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
  10689. }
  10690. else
  10691. /* Safe to use the calculated DMA boundary. */
  10692. tp->dma_rwctrl = saved_dma_rwctrl;
  10693. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  10694. }
  10695. out:
  10696. pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
  10697. out_nofree:
  10698. return ret;
  10699. }
  10700. static void __devinit tg3_init_link_config(struct tg3 *tp)
  10701. {
  10702. tp->link_config.advertising =
  10703. (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
  10704. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
  10705. ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
  10706. ADVERTISED_Autoneg | ADVERTISED_MII);
  10707. tp->link_config.speed = SPEED_INVALID;
  10708. tp->link_config.duplex = DUPLEX_INVALID;
  10709. tp->link_config.autoneg = AUTONEG_ENABLE;
  10710. tp->link_config.active_speed = SPEED_INVALID;
  10711. tp->link_config.active_duplex = DUPLEX_INVALID;
  10712. tp->link_config.phy_is_low_power = 0;
  10713. tp->link_config.orig_speed = SPEED_INVALID;
  10714. tp->link_config.orig_duplex = DUPLEX_INVALID;
  10715. tp->link_config.orig_autoneg = AUTONEG_INVALID;
  10716. }
  10717. static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
  10718. {
  10719. if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
  10720. tp->bufmgr_config.mbuf_read_dma_low_water =
  10721. DEFAULT_MB_RDMA_LOW_WATER_5705;
  10722. tp->bufmgr_config.mbuf_mac_rx_low_water =
  10723. DEFAULT_MB_MACRX_LOW_WATER_5705;
  10724. tp->bufmgr_config.mbuf_high_water =
  10725. DEFAULT_MB_HIGH_WATER_5705;
  10726. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  10727. tp->bufmgr_config.mbuf_mac_rx_low_water =
  10728. DEFAULT_MB_MACRX_LOW_WATER_5906;
  10729. tp->bufmgr_config.mbuf_high_water =
  10730. DEFAULT_MB_HIGH_WATER_5906;
  10731. }
  10732. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
  10733. DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
  10734. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
  10735. DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
  10736. tp->bufmgr_config.mbuf_high_water_jumbo =
  10737. DEFAULT_MB_HIGH_WATER_JUMBO_5780;
  10738. } else {
  10739. tp->bufmgr_config.mbuf_read_dma_low_water =
  10740. DEFAULT_MB_RDMA_LOW_WATER;
  10741. tp->bufmgr_config.mbuf_mac_rx_low_water =
  10742. DEFAULT_MB_MACRX_LOW_WATER;
  10743. tp->bufmgr_config.mbuf_high_water =
  10744. DEFAULT_MB_HIGH_WATER;
  10745. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
  10746. DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
  10747. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
  10748. DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
  10749. tp->bufmgr_config.mbuf_high_water_jumbo =
  10750. DEFAULT_MB_HIGH_WATER_JUMBO;
  10751. }
  10752. tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
  10753. tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
  10754. }
  10755. static char * __devinit tg3_phy_string(struct tg3 *tp)
  10756. {
  10757. switch (tp->phy_id & PHY_ID_MASK) {
  10758. case PHY_ID_BCM5400: return "5400";
  10759. case PHY_ID_BCM5401: return "5401";
  10760. case PHY_ID_BCM5411: return "5411";
  10761. case PHY_ID_BCM5701: return "5701";
  10762. case PHY_ID_BCM5703: return "5703";
  10763. case PHY_ID_BCM5704: return "5704";
  10764. case PHY_ID_BCM5705: return "5705";
  10765. case PHY_ID_BCM5750: return "5750";
  10766. case PHY_ID_BCM5752: return "5752";
  10767. case PHY_ID_BCM5714: return "5714";
  10768. case PHY_ID_BCM5780: return "5780";
  10769. case PHY_ID_BCM5755: return "5755";
  10770. case PHY_ID_BCM5787: return "5787";
  10771. case PHY_ID_BCM5784: return "5784";
  10772. case PHY_ID_BCM5756: return "5722/5756";
  10773. case PHY_ID_BCM5906: return "5906";
  10774. case PHY_ID_BCM5761: return "5761";
  10775. case PHY_ID_BCM8002: return "8002/serdes";
  10776. case 0: return "serdes";
  10777. default: return "unknown";
  10778. }
  10779. }
  10780. static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
  10781. {
  10782. if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
  10783. strcpy(str, "PCI Express");
  10784. return str;
  10785. } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
  10786. u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
  10787. strcpy(str, "PCIX:");
  10788. if ((clock_ctrl == 7) ||
  10789. ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
  10790. GRC_MISC_CFG_BOARD_ID_5704CIOBE))
  10791. strcat(str, "133MHz");
  10792. else if (clock_ctrl == 0)
  10793. strcat(str, "33MHz");
  10794. else if (clock_ctrl == 2)
  10795. strcat(str, "50MHz");
  10796. else if (clock_ctrl == 4)
  10797. strcat(str, "66MHz");
  10798. else if (clock_ctrl == 6)
  10799. strcat(str, "100MHz");
  10800. } else {
  10801. strcpy(str, "PCI:");
  10802. if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
  10803. strcat(str, "66MHz");
  10804. else
  10805. strcat(str, "33MHz");
  10806. }
  10807. if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
  10808. strcat(str, ":32-bit");
  10809. else
  10810. strcat(str, ":64-bit");
  10811. return str;
  10812. }
  10813. static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
  10814. {
  10815. struct pci_dev *peer;
  10816. unsigned int func, devnr = tp->pdev->devfn & ~7;
  10817. for (func = 0; func < 8; func++) {
  10818. peer = pci_get_slot(tp->pdev->bus, devnr | func);
  10819. if (peer && peer != tp->pdev)
  10820. break;
  10821. pci_dev_put(peer);
  10822. }
  10823. /* 5704 can be configured in single-port mode, set peer to
  10824. * tp->pdev in that case.
  10825. */
  10826. if (!peer) {
  10827. peer = tp->pdev;
  10828. return peer;
  10829. }
  10830. /*
  10831. * We don't need to keep the refcount elevated; there's no way
  10832. * to remove one half of this device without removing the other
  10833. */
  10834. pci_dev_put(peer);
  10835. return peer;
  10836. }
  10837. static void __devinit tg3_init_coal(struct tg3 *tp)
  10838. {
  10839. struct ethtool_coalesce *ec = &tp->coal;
  10840. memset(ec, 0, sizeof(*ec));
  10841. ec->cmd = ETHTOOL_GCOALESCE;
  10842. ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
  10843. ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
  10844. ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
  10845. ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
  10846. ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
  10847. ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
  10848. ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
  10849. ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
  10850. ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
  10851. if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
  10852. HOSTCC_MODE_CLRTICK_TXBD)) {
  10853. ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
  10854. ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
  10855. ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
  10856. ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
  10857. }
  10858. if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
  10859. ec->rx_coalesce_usecs_irq = 0;
  10860. ec->tx_coalesce_usecs_irq = 0;
  10861. ec->stats_block_coalesce_usecs = 0;
  10862. }
  10863. }
  10864. static const struct net_device_ops tg3_netdev_ops = {
  10865. .ndo_open = tg3_open,
  10866. .ndo_stop = tg3_close,
  10867. .ndo_start_xmit = tg3_start_xmit,
  10868. .ndo_get_stats = tg3_get_stats,
  10869. .ndo_validate_addr = eth_validate_addr,
  10870. .ndo_set_multicast_list = tg3_set_rx_mode,
  10871. .ndo_set_mac_address = tg3_set_mac_addr,
  10872. .ndo_do_ioctl = tg3_ioctl,
  10873. .ndo_tx_timeout = tg3_tx_timeout,
  10874. .ndo_change_mtu = tg3_change_mtu,
  10875. #if TG3_VLAN_TAG_USED
  10876. .ndo_vlan_rx_register = tg3_vlan_rx_register,
  10877. #endif
  10878. #ifdef CONFIG_NET_POLL_CONTROLLER
  10879. .ndo_poll_controller = tg3_poll_controller,
  10880. #endif
  10881. };
  10882. static const struct net_device_ops tg3_netdev_ops_dma_bug = {
  10883. .ndo_open = tg3_open,
  10884. .ndo_stop = tg3_close,
  10885. .ndo_start_xmit = tg3_start_xmit_dma_bug,
  10886. .ndo_get_stats = tg3_get_stats,
  10887. .ndo_validate_addr = eth_validate_addr,
  10888. .ndo_set_multicast_list = tg3_set_rx_mode,
  10889. .ndo_set_mac_address = tg3_set_mac_addr,
  10890. .ndo_do_ioctl = tg3_ioctl,
  10891. .ndo_tx_timeout = tg3_tx_timeout,
  10892. .ndo_change_mtu = tg3_change_mtu,
  10893. #if TG3_VLAN_TAG_USED
  10894. .ndo_vlan_rx_register = tg3_vlan_rx_register,
  10895. #endif
  10896. #ifdef CONFIG_NET_POLL_CONTROLLER
  10897. .ndo_poll_controller = tg3_poll_controller,
  10898. #endif
  10899. };
  10900. static int __devinit tg3_init_one(struct pci_dev *pdev,
  10901. const struct pci_device_id *ent)
  10902. {
  10903. static int tg3_version_printed = 0;
  10904. struct net_device *dev;
  10905. struct tg3 *tp;
  10906. int err, pm_cap;
  10907. char str[40];
  10908. u64 dma_mask, persist_dma_mask;
  10909. if (tg3_version_printed++ == 0)
  10910. printk(KERN_INFO "%s", version);
  10911. err = pci_enable_device(pdev);
  10912. if (err) {
  10913. printk(KERN_ERR PFX "Cannot enable PCI device, "
  10914. "aborting.\n");
  10915. return err;
  10916. }
  10917. err = pci_request_regions(pdev, DRV_MODULE_NAME);
  10918. if (err) {
  10919. printk(KERN_ERR PFX "Cannot obtain PCI resources, "
  10920. "aborting.\n");
  10921. goto err_out_disable_pdev;
  10922. }
  10923. pci_set_master(pdev);
  10924. /* Find power-management capability. */
  10925. pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
  10926. if (pm_cap == 0) {
  10927. printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
  10928. "aborting.\n");
  10929. err = -EIO;
  10930. goto err_out_free_res;
  10931. }
  10932. dev = alloc_etherdev(sizeof(*tp));
  10933. if (!dev) {
  10934. printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
  10935. err = -ENOMEM;
  10936. goto err_out_free_res;
  10937. }
  10938. SET_NETDEV_DEV(dev, &pdev->dev);
  10939. #if TG3_VLAN_TAG_USED
  10940. dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  10941. #endif
  10942. tp = netdev_priv(dev);
  10943. tp->pdev = pdev;
  10944. tp->dev = dev;
  10945. tp->pm_cap = pm_cap;
  10946. tp->rx_mode = TG3_DEF_RX_MODE;
  10947. tp->tx_mode = TG3_DEF_TX_MODE;
  10948. if (tg3_debug > 0)
  10949. tp->msg_enable = tg3_debug;
  10950. else
  10951. tp->msg_enable = TG3_DEF_MSG_ENABLE;
  10952. /* The word/byte swap controls here control register access byte
  10953. * swapping. DMA data byte swapping is controlled in the GRC_MODE
  10954. * setting below.
  10955. */
  10956. tp->misc_host_ctrl =
  10957. MISC_HOST_CTRL_MASK_PCI_INT |
  10958. MISC_HOST_CTRL_WORD_SWAP |
  10959. MISC_HOST_CTRL_INDIR_ACCESS |
  10960. MISC_HOST_CTRL_PCISTATE_RW;
  10961. /* The NONFRM (non-frame) byte/word swap controls take effect
  10962. * on descriptor entries, anything which isn't packet data.
  10963. *
  10964. * The StrongARM chips on the board (one for tx, one for rx)
  10965. * are running in big-endian mode.
  10966. */
  10967. tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
  10968. GRC_MODE_WSWAP_NONFRM_DATA);
  10969. #ifdef __BIG_ENDIAN
  10970. tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
  10971. #endif
  10972. spin_lock_init(&tp->lock);
  10973. spin_lock_init(&tp->indirect_lock);
  10974. INIT_WORK(&tp->reset_task, tg3_reset_task);
  10975. tp->regs = pci_ioremap_bar(pdev, BAR_0);
  10976. if (!tp->regs) {
  10977. printk(KERN_ERR PFX "Cannot map device registers, "
  10978. "aborting.\n");
  10979. err = -ENOMEM;
  10980. goto err_out_free_dev;
  10981. }
  10982. tg3_init_link_config(tp);
  10983. tp->rx_pending = TG3_DEF_RX_RING_PENDING;
  10984. tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
  10985. tp->tx_pending = TG3_DEF_TX_RING_PENDING;
  10986. netif_napi_add(dev, &tp->napi, tg3_poll, 64);
  10987. dev->ethtool_ops = &tg3_ethtool_ops;
  10988. dev->watchdog_timeo = TG3_TX_TIMEOUT;
  10989. dev->irq = pdev->irq;
  10990. err = tg3_get_invariants(tp);
  10991. if (err) {
  10992. printk(KERN_ERR PFX "Problem fetching invariants of chip, "
  10993. "aborting.\n");
  10994. goto err_out_iounmap;
  10995. }
  10996. if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
  10997. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  10998. dev->netdev_ops = &tg3_netdev_ops;
  10999. else
  11000. dev->netdev_ops = &tg3_netdev_ops_dma_bug;
  11001. /* The EPB bridge inside 5714, 5715, and 5780 and any
  11002. * device behind the EPB cannot support DMA addresses > 40-bit.
  11003. * On 64-bit systems with IOMMU, use 40-bit dma_mask.
  11004. * On 64-bit systems without IOMMU, use 64-bit dma_mask and
  11005. * do DMA address check in tg3_start_xmit().
  11006. */
  11007. if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
  11008. persist_dma_mask = dma_mask = DMA_32BIT_MASK;
  11009. else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
  11010. persist_dma_mask = dma_mask = DMA_40BIT_MASK;
  11011. #ifdef CONFIG_HIGHMEM
  11012. dma_mask = DMA_64BIT_MASK;
  11013. #endif
  11014. } else
  11015. persist_dma_mask = dma_mask = DMA_64BIT_MASK;
  11016. /* Configure DMA attributes. */
  11017. if (dma_mask > DMA_32BIT_MASK) {
  11018. err = pci_set_dma_mask(pdev, dma_mask);
  11019. if (!err) {
  11020. dev->features |= NETIF_F_HIGHDMA;
  11021. err = pci_set_consistent_dma_mask(pdev,
  11022. persist_dma_mask);
  11023. if (err < 0) {
  11024. printk(KERN_ERR PFX "Unable to obtain 64 bit "
  11025. "DMA for consistent allocations\n");
  11026. goto err_out_iounmap;
  11027. }
  11028. }
  11029. }
  11030. if (err || dma_mask == DMA_32BIT_MASK) {
  11031. err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  11032. if (err) {
  11033. printk(KERN_ERR PFX "No usable DMA configuration, "
  11034. "aborting.\n");
  11035. goto err_out_iounmap;
  11036. }
  11037. }
  11038. tg3_init_bufmgr_config(tp);
  11039. if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
  11040. tp->fw_needed = FIRMWARE_TG3;
  11041. if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
  11042. tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
  11043. }
  11044. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  11045. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
  11046. tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
  11047. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
  11048. (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
  11049. tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
  11050. } else {
  11051. tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
  11052. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
  11053. tp->fw_needed = FIRMWARE_TG3TSO5;
  11054. else
  11055. tp->fw_needed = FIRMWARE_TG3TSO;
  11056. }
  11057. /* TSO is on by default on chips that support hardware TSO.
  11058. * Firmware TSO on older chips gives lower performance, so it
  11059. * is off by default, but can be enabled using ethtool.
  11060. */
  11061. if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
  11062. if (dev->features & NETIF_F_IP_CSUM)
  11063. dev->features |= NETIF_F_TSO;
  11064. if ((dev->features & NETIF_F_IPV6_CSUM) &&
  11065. (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2))
  11066. dev->features |= NETIF_F_TSO6;
  11067. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  11068. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
  11069. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
  11070. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  11071. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
  11072. dev->features |= NETIF_F_TSO_ECN;
  11073. }
  11074. if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
  11075. !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
  11076. !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
  11077. tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
  11078. tp->rx_pending = 63;
  11079. }
  11080. err = tg3_get_device_address(tp);
  11081. if (err) {
  11082. printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
  11083. "aborting.\n");
  11084. goto err_out_fw;
  11085. }
  11086. if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
  11087. tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
  11088. if (!tp->aperegs) {
  11089. printk(KERN_ERR PFX "Cannot map APE registers, "
  11090. "aborting.\n");
  11091. err = -ENOMEM;
  11092. goto err_out_fw;
  11093. }
  11094. tg3_ape_lock_init(tp);
  11095. }
  11096. /*
  11097. * Reset chip in case UNDI or EFI driver did not shutdown
  11098. * DMA self test will enable WDMAC and we'll see (spurious)
  11099. * pending DMA on the PCI bus at that point.
  11100. */
  11101. if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
  11102. (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
  11103. tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
  11104. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  11105. }
  11106. err = tg3_test_dma(tp);
  11107. if (err) {
  11108. printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
  11109. goto err_out_apeunmap;
  11110. }
  11111. /* flow control autonegotiation is default behavior */
  11112. tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
  11113. tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
  11114. tg3_init_coal(tp);
  11115. pci_set_drvdata(pdev, dev);
  11116. err = register_netdev(dev);
  11117. if (err) {
  11118. printk(KERN_ERR PFX "Cannot register net device, "
  11119. "aborting.\n");
  11120. goto err_out_apeunmap;
  11121. }
  11122. printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
  11123. dev->name,
  11124. tp->board_part_number,
  11125. tp->pci_chip_rev_id,
  11126. tg3_bus_string(tp, str),
  11127. dev->dev_addr);
  11128. if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
  11129. printk(KERN_INFO
  11130. "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
  11131. tp->dev->name,
  11132. tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
  11133. dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
  11134. else
  11135. printk(KERN_INFO
  11136. "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
  11137. tp->dev->name, tg3_phy_string(tp),
  11138. ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
  11139. ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
  11140. "10/100/1000Base-T")),
  11141. (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
  11142. printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
  11143. dev->name,
  11144. (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
  11145. (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
  11146. (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
  11147. (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
  11148. (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
  11149. printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
  11150. dev->name, tp->dma_rwctrl,
  11151. (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
  11152. (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
  11153. return 0;
  11154. err_out_apeunmap:
  11155. if (tp->aperegs) {
  11156. iounmap(tp->aperegs);
  11157. tp->aperegs = NULL;
  11158. }
  11159. err_out_fw:
  11160. if (tp->fw)
  11161. release_firmware(tp->fw);
  11162. err_out_iounmap:
  11163. if (tp->regs) {
  11164. iounmap(tp->regs);
  11165. tp->regs = NULL;
  11166. }
  11167. err_out_free_dev:
  11168. free_netdev(dev);
  11169. err_out_free_res:
  11170. pci_release_regions(pdev);
  11171. err_out_disable_pdev:
  11172. pci_disable_device(pdev);
  11173. pci_set_drvdata(pdev, NULL);
  11174. return err;
  11175. }
  11176. static void __devexit tg3_remove_one(struct pci_dev *pdev)
  11177. {
  11178. struct net_device *dev = pci_get_drvdata(pdev);
  11179. if (dev) {
  11180. struct tg3 *tp = netdev_priv(dev);
  11181. if (tp->fw)
  11182. release_firmware(tp->fw);
  11183. flush_scheduled_work();
  11184. if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
  11185. tg3_phy_fini(tp);
  11186. tg3_mdio_fini(tp);
  11187. }
  11188. unregister_netdev(dev);
  11189. if (tp->aperegs) {
  11190. iounmap(tp->aperegs);
  11191. tp->aperegs = NULL;
  11192. }
  11193. if (tp->regs) {
  11194. iounmap(tp->regs);
  11195. tp->regs = NULL;
  11196. }
  11197. free_netdev(dev);
  11198. pci_release_regions(pdev);
  11199. pci_disable_device(pdev);
  11200. pci_set_drvdata(pdev, NULL);
  11201. }
  11202. }
  11203. static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
  11204. {
  11205. struct net_device *dev = pci_get_drvdata(pdev);
  11206. struct tg3 *tp = netdev_priv(dev);
  11207. pci_power_t target_state;
  11208. int err;
  11209. /* PCI register 4 needs to be saved whether netif_running() or not.
  11210. * MSI address and data need to be saved if using MSI and
  11211. * netif_running().
  11212. */
  11213. pci_save_state(pdev);
  11214. if (!netif_running(dev))
  11215. return 0;
  11216. flush_scheduled_work();
  11217. tg3_phy_stop(tp);
  11218. tg3_netif_stop(tp);
  11219. del_timer_sync(&tp->timer);
  11220. tg3_full_lock(tp, 1);
  11221. tg3_disable_ints(tp);
  11222. tg3_full_unlock(tp);
  11223. netif_device_detach(dev);
  11224. tg3_full_lock(tp, 0);
  11225. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  11226. tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
  11227. tg3_full_unlock(tp);
  11228. target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
  11229. err = tg3_set_power_state(tp, target_state);
  11230. if (err) {
  11231. int err2;
  11232. tg3_full_lock(tp, 0);
  11233. tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
  11234. err2 = tg3_restart_hw(tp, 1);
  11235. if (err2)
  11236. goto out;
  11237. tp->timer.expires = jiffies + tp->timer_offset;
  11238. add_timer(&tp->timer);
  11239. netif_device_attach(dev);
  11240. tg3_netif_start(tp);
  11241. out:
  11242. tg3_full_unlock(tp);
  11243. if (!err2)
  11244. tg3_phy_start(tp);
  11245. }
  11246. return err;
  11247. }
  11248. static int tg3_resume(struct pci_dev *pdev)
  11249. {
  11250. struct net_device *dev = pci_get_drvdata(pdev);
  11251. struct tg3 *tp = netdev_priv(dev);
  11252. int err;
  11253. pci_restore_state(tp->pdev);
  11254. if (!netif_running(dev))
  11255. return 0;
  11256. err = tg3_set_power_state(tp, PCI_D0);
  11257. if (err)
  11258. return err;
  11259. netif_device_attach(dev);
  11260. tg3_full_lock(tp, 0);
  11261. tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
  11262. err = tg3_restart_hw(tp, 1);
  11263. if (err)
  11264. goto out;
  11265. tp->timer.expires = jiffies + tp->timer_offset;
  11266. add_timer(&tp->timer);
  11267. tg3_netif_start(tp);
  11268. out:
  11269. tg3_full_unlock(tp);
  11270. if (!err)
  11271. tg3_phy_start(tp);
  11272. return err;
  11273. }
  11274. static struct pci_driver tg3_driver = {
  11275. .name = DRV_MODULE_NAME,
  11276. .id_table = tg3_pci_tbl,
  11277. .probe = tg3_init_one,
  11278. .remove = __devexit_p(tg3_remove_one),
  11279. .suspend = tg3_suspend,
  11280. .resume = tg3_resume
  11281. };
  11282. static int __init tg3_init(void)
  11283. {
  11284. return pci_register_driver(&tg3_driver);
  11285. }
  11286. static void __exit tg3_cleanup(void)
  11287. {
  11288. pci_unregister_driver(&tg3_driver);
  11289. }
  11290. module_init(tg3_init);
  11291. module_exit(tg3_cleanup);