tg3.c 451 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389173901739117392173931739417395173961739717398173991740017401174021740317404174051740617407174081740917410174111741217413174141741517416174171741817419174201742117422174231742417425174261742717428174291743017431174321743317434174351743617437174381743917440174411744217443174441744517446174471744817449174501745117452174531745417455174561745717458174591746017461174621746317464174651746617467174681746917470174711747217473174741747517476174771747817479174801748117482174831748417485174861748717488174891749017491174921749317494174951749617497174981749917500175011750217503175041750517506175071750817509175101751117512175131751417515175161751717518175191752017521175221752317524175251752617527175281752917530175311753217533175341753517536175371753817539175401754117542175431754417545175461754717548175491755017551175521755317554175551755617557175581755917560175611756217563175641756517566175671756817569175701757117572175731757417575175761757717578175791758017581175821758317584175851758617587175881758917590175911759217593175941759517596175971759817599176001760117602176031760417605176061760717608176091761017611176121761317614176151761617617176181761917620176211762217623176241762517626176271762817629176301763117632176331763417635176361763717638176391764017641176421764317644176451764617647176481764917650176511765217653176541765517656176571765817659176601766117662176631766417665
  1. /*
  2. * tg3.c: Broadcom Tigon3 ethernet driver.
  3. *
  4. * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  5. * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  6. * Copyright (C) 2004 Sun Microsystems Inc.
  7. * Copyright (C) 2005-2013 Broadcom Corporation.
  8. *
  9. * Firmware is:
  10. * Derived from proprietary unpublished source code,
  11. * Copyright (C) 2000-2003 Broadcom Corporation.
  12. *
  13. * Permission is hereby granted for the distribution of this firmware
  14. * data in hexadecimal or equivalent format, provided this copyright
  15. * notice is accompanying it.
  16. */
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/stringify.h>
  20. #include <linux/kernel.h>
  21. #include <linux/types.h>
  22. #include <linux/compiler.h>
  23. #include <linux/slab.h>
  24. #include <linux/delay.h>
  25. #include <linux/in.h>
  26. #include <linux/init.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/ioport.h>
  29. #include <linux/pci.h>
  30. #include <linux/netdevice.h>
  31. #include <linux/etherdevice.h>
  32. #include <linux/skbuff.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/mdio.h>
  35. #include <linux/mii.h>
  36. #include <linux/phy.h>
  37. #include <linux/brcmphy.h>
  38. #include <linux/if_vlan.h>
  39. #include <linux/ip.h>
  40. #include <linux/tcp.h>
  41. #include <linux/workqueue.h>
  42. #include <linux/prefetch.h>
  43. #include <linux/dma-mapping.h>
  44. #include <linux/firmware.h>
  45. #include <linux/ssb/ssb_driver_gige.h>
  46. #include <linux/hwmon.h>
  47. #include <linux/hwmon-sysfs.h>
  48. #include <net/checksum.h>
  49. #include <net/ip.h>
  50. #include <linux/io.h>
  51. #include <asm/byteorder.h>
  52. #include <linux/uaccess.h>
  53. #include <uapi/linux/net_tstamp.h>
  54. #include <linux/ptp_clock_kernel.h>
  55. #ifdef CONFIG_SPARC
  56. #include <asm/idprom.h>
  57. #include <asm/prom.h>
  58. #endif
  59. #define BAR_0 0
  60. #define BAR_2 2
  61. #include "tg3.h"
  62. /* Functions & macros to verify TG3_FLAGS types */
  63. static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
  64. {
  65. return test_bit(flag, bits);
  66. }
  67. static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
  68. {
  69. set_bit(flag, bits);
  70. }
  71. static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
  72. {
  73. clear_bit(flag, bits);
  74. }
  75. #define tg3_flag(tp, flag) \
  76. _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
  77. #define tg3_flag_set(tp, flag) \
  78. _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
  79. #define tg3_flag_clear(tp, flag) \
  80. _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
  81. #define DRV_MODULE_NAME "tg3"
  82. #define TG3_MAJ_NUM 3
  83. #define TG3_MIN_NUM 130
  84. #define DRV_MODULE_VERSION \
  85. __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
  86. #define DRV_MODULE_RELDATE "February 14, 2013"
  87. #define RESET_KIND_SHUTDOWN 0
  88. #define RESET_KIND_INIT 1
  89. #define RESET_KIND_SUSPEND 2
  90. #define TG3_DEF_RX_MODE 0
  91. #define TG3_DEF_TX_MODE 0
  92. #define TG3_DEF_MSG_ENABLE \
  93. (NETIF_MSG_DRV | \
  94. NETIF_MSG_PROBE | \
  95. NETIF_MSG_LINK | \
  96. NETIF_MSG_TIMER | \
  97. NETIF_MSG_IFDOWN | \
  98. NETIF_MSG_IFUP | \
  99. NETIF_MSG_RX_ERR | \
  100. NETIF_MSG_TX_ERR)
  101. #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
  102. /* length of time before we decide the hardware is borked,
  103. * and dev->tx_timeout() should be called to fix the problem
  104. */
  105. #define TG3_TX_TIMEOUT (5 * HZ)
  106. /* hardware minimum and maximum for a single frame's data payload */
  107. #define TG3_MIN_MTU 60
  108. #define TG3_MAX_MTU(tp) \
  109. (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
  110. /* These numbers seem to be hard coded in the NIC firmware somehow.
  111. * You can't change the ring sizes, but you can change where you place
  112. * them in the NIC onboard memory.
  113. */
  114. #define TG3_RX_STD_RING_SIZE(tp) \
  115. (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
  116. TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
  117. #define TG3_DEF_RX_RING_PENDING 200
  118. #define TG3_RX_JMB_RING_SIZE(tp) \
  119. (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
  120. TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
  121. #define TG3_DEF_RX_JUMBO_RING_PENDING 100
  122. /* Do not place this n-ring entries value into the tp struct itself,
  123. * we really want to expose these constants to GCC so that modulo et
  124. * al. operations are done with shifts and masks instead of with
  125. * hw multiply/modulo instructions. Another solution would be to
  126. * replace things like '% foo' with '& (foo - 1)'.
  127. */
  128. #define TG3_TX_RING_SIZE 512
  129. #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
  130. #define TG3_RX_STD_RING_BYTES(tp) \
  131. (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
  132. #define TG3_RX_JMB_RING_BYTES(tp) \
  133. (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
  134. #define TG3_RX_RCB_RING_BYTES(tp) \
  135. (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
  136. #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
  137. TG3_TX_RING_SIZE)
  138. #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
  139. #define TG3_DMA_BYTE_ENAB 64
  140. #define TG3_RX_STD_DMA_SZ 1536
  141. #define TG3_RX_JMB_DMA_SZ 9046
  142. #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
  143. #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
  144. #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
  145. #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
  146. (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
  147. #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
  148. (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
  149. /* Due to a hardware bug, the 5701 can only DMA to memory addresses
  150. * that are at least dword aligned when used in PCIX mode. The driver
  151. * works around this bug by double copying the packet. This workaround
  152. * is built into the normal double copy length check for efficiency.
  153. *
  154. * However, the double copy is only necessary on those architectures
  155. * where unaligned memory accesses are inefficient. For those architectures
  156. * where unaligned memory accesses incur little penalty, we can reintegrate
  157. * the 5701 in the normal rx path. Doing so saves a device structure
  158. * dereference by hardcoding the double copy threshold in place.
  159. */
  160. #define TG3_RX_COPY_THRESHOLD 256
  161. #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  162. #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
  163. #else
  164. #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
  165. #endif
  166. #if (NET_IP_ALIGN != 0)
  167. #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
  168. #else
  169. #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
  170. #endif
  171. /* minimum number of free TX descriptors required to wake up TX process */
  172. #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
  173. #define TG3_TX_BD_DMA_MAX_2K 2048
  174. #define TG3_TX_BD_DMA_MAX_4K 4096
  175. #define TG3_RAW_IP_ALIGN 2
  176. #define TG3_FW_UPDATE_TIMEOUT_SEC 5
  177. #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
  178. #define FIRMWARE_TG3 "tigon/tg3.bin"
  179. #define FIRMWARE_TG357766 "tigon/tg357766.bin"
  180. #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
  181. #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
  182. static char version[] =
  183. DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
  184. MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
  185. MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
  186. MODULE_LICENSE("GPL");
  187. MODULE_VERSION(DRV_MODULE_VERSION);
  188. MODULE_FIRMWARE(FIRMWARE_TG3);
  189. MODULE_FIRMWARE(FIRMWARE_TG3TSO);
  190. MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
  191. static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
  192. module_param(tg3_debug, int, 0);
  193. MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
  194. #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
  195. #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
  196. static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
  197. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
  198. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
  199. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
  200. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
  201. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
  202. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
  203. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
  204. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
  205. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
  206. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
  207. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
  208. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
  209. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
  210. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
  211. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
  212. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
  213. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
  214. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
  215. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
  216. .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
  217. TG3_DRV_DATA_FLAG_5705_10_100},
  218. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
  219. .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
  220. TG3_DRV_DATA_FLAG_5705_10_100},
  221. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
  222. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
  223. .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
  224. TG3_DRV_DATA_FLAG_5705_10_100},
  225. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
  226. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
  227. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
  228. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
  229. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
  230. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
  231. .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  232. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
  233. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
  234. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
  235. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
  236. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
  237. .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  238. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
  239. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
  240. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
  241. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
  242. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
  243. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
  244. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
  245. {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
  246. PCI_VENDOR_ID_LENOVO,
  247. TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
  248. .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  249. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
  250. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
  251. .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  252. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
  253. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
  254. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
  255. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
  256. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
  257. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
  258. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
  259. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
  260. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
  261. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
  262. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
  263. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
  264. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
  265. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
  266. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
  267. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
  268. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
  269. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
  270. {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
  271. PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
  272. .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  273. {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
  274. PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
  275. .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  276. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
  277. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
  278. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
  279. .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  280. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
  281. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
  282. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
  283. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
  284. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
  285. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
  286. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
  287. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
  288. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
  289. .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  290. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
  291. .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  292. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
  293. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
  294. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
  295. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
  296. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
  297. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
  298. {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
  299. {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
  300. {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
  301. {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
  302. {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
  303. {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
  304. {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
  305. {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
  306. {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
  307. {}
  308. };
  309. MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
  310. static const struct {
  311. const char string[ETH_GSTRING_LEN];
  312. } ethtool_stats_keys[] = {
  313. { "rx_octets" },
  314. { "rx_fragments" },
  315. { "rx_ucast_packets" },
  316. { "rx_mcast_packets" },
  317. { "rx_bcast_packets" },
  318. { "rx_fcs_errors" },
  319. { "rx_align_errors" },
  320. { "rx_xon_pause_rcvd" },
  321. { "rx_xoff_pause_rcvd" },
  322. { "rx_mac_ctrl_rcvd" },
  323. { "rx_xoff_entered" },
  324. { "rx_frame_too_long_errors" },
  325. { "rx_jabbers" },
  326. { "rx_undersize_packets" },
  327. { "rx_in_length_errors" },
  328. { "rx_out_length_errors" },
  329. { "rx_64_or_less_octet_packets" },
  330. { "rx_65_to_127_octet_packets" },
  331. { "rx_128_to_255_octet_packets" },
  332. { "rx_256_to_511_octet_packets" },
  333. { "rx_512_to_1023_octet_packets" },
  334. { "rx_1024_to_1522_octet_packets" },
  335. { "rx_1523_to_2047_octet_packets" },
  336. { "rx_2048_to_4095_octet_packets" },
  337. { "rx_4096_to_8191_octet_packets" },
  338. { "rx_8192_to_9022_octet_packets" },
  339. { "tx_octets" },
  340. { "tx_collisions" },
  341. { "tx_xon_sent" },
  342. { "tx_xoff_sent" },
  343. { "tx_flow_control" },
  344. { "tx_mac_errors" },
  345. { "tx_single_collisions" },
  346. { "tx_mult_collisions" },
  347. { "tx_deferred" },
  348. { "tx_excessive_collisions" },
  349. { "tx_late_collisions" },
  350. { "tx_collide_2times" },
  351. { "tx_collide_3times" },
  352. { "tx_collide_4times" },
  353. { "tx_collide_5times" },
  354. { "tx_collide_6times" },
  355. { "tx_collide_7times" },
  356. { "tx_collide_8times" },
  357. { "tx_collide_9times" },
  358. { "tx_collide_10times" },
  359. { "tx_collide_11times" },
  360. { "tx_collide_12times" },
  361. { "tx_collide_13times" },
  362. { "tx_collide_14times" },
  363. { "tx_collide_15times" },
  364. { "tx_ucast_packets" },
  365. { "tx_mcast_packets" },
  366. { "tx_bcast_packets" },
  367. { "tx_carrier_sense_errors" },
  368. { "tx_discards" },
  369. { "tx_errors" },
  370. { "dma_writeq_full" },
  371. { "dma_write_prioq_full" },
  372. { "rxbds_empty" },
  373. { "rx_discards" },
  374. { "rx_errors" },
  375. { "rx_threshold_hit" },
  376. { "dma_readq_full" },
  377. { "dma_read_prioq_full" },
  378. { "tx_comp_queue_full" },
  379. { "ring_set_send_prod_index" },
  380. { "ring_status_update" },
  381. { "nic_irqs" },
  382. { "nic_avoided_irqs" },
  383. { "nic_tx_threshold_hit" },
  384. { "mbuf_lwm_thresh_hit" },
  385. };
  386. #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
  387. #define TG3_NVRAM_TEST 0
  388. #define TG3_LINK_TEST 1
  389. #define TG3_REGISTER_TEST 2
  390. #define TG3_MEMORY_TEST 3
  391. #define TG3_MAC_LOOPB_TEST 4
  392. #define TG3_PHY_LOOPB_TEST 5
  393. #define TG3_EXT_LOOPB_TEST 6
  394. #define TG3_INTERRUPT_TEST 7
  395. static const struct {
  396. const char string[ETH_GSTRING_LEN];
  397. } ethtool_test_keys[] = {
  398. [TG3_NVRAM_TEST] = { "nvram test (online) " },
  399. [TG3_LINK_TEST] = { "link test (online) " },
  400. [TG3_REGISTER_TEST] = { "register test (offline)" },
  401. [TG3_MEMORY_TEST] = { "memory test (offline)" },
  402. [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
  403. [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
  404. [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
  405. [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
  406. };
  407. #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
  408. static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
  409. {
  410. writel(val, tp->regs + off);
  411. }
  412. static u32 tg3_read32(struct tg3 *tp, u32 off)
  413. {
  414. return readl(tp->regs + off);
  415. }
  416. static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
  417. {
  418. writel(val, tp->aperegs + off);
  419. }
  420. static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
  421. {
  422. return readl(tp->aperegs + off);
  423. }
  424. static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
  425. {
  426. unsigned long flags;
  427. spin_lock_irqsave(&tp->indirect_lock, flags);
  428. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  429. pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  430. spin_unlock_irqrestore(&tp->indirect_lock, flags);
  431. }
  432. static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
  433. {
  434. writel(val, tp->regs + off);
  435. readl(tp->regs + off);
  436. }
  437. static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
  438. {
  439. unsigned long flags;
  440. u32 val;
  441. spin_lock_irqsave(&tp->indirect_lock, flags);
  442. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  443. pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  444. spin_unlock_irqrestore(&tp->indirect_lock, flags);
  445. return val;
  446. }
  447. static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
  448. {
  449. unsigned long flags;
  450. if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
  451. pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
  452. TG3_64BIT_REG_LOW, val);
  453. return;
  454. }
  455. if (off == TG3_RX_STD_PROD_IDX_REG) {
  456. pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
  457. TG3_64BIT_REG_LOW, val);
  458. return;
  459. }
  460. spin_lock_irqsave(&tp->indirect_lock, flags);
  461. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  462. pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  463. spin_unlock_irqrestore(&tp->indirect_lock, flags);
  464. /* In indirect mode when disabling interrupts, we also need
  465. * to clear the interrupt bit in the GRC local ctrl register.
  466. */
  467. if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
  468. (val == 0x1)) {
  469. pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
  470. tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
  471. }
  472. }
  473. static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
  474. {
  475. unsigned long flags;
  476. u32 val;
  477. spin_lock_irqsave(&tp->indirect_lock, flags);
  478. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  479. pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  480. spin_unlock_irqrestore(&tp->indirect_lock, flags);
  481. return val;
  482. }
  483. /* usec_wait specifies the wait time in usec when writing to certain registers
  484. * where it is unsafe to read back the register without some delay.
  485. * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
  486. * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
  487. */
  488. static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
  489. {
  490. if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
  491. /* Non-posted methods */
  492. tp->write32(tp, off, val);
  493. else {
  494. /* Posted method */
  495. tg3_write32(tp, off, val);
  496. if (usec_wait)
  497. udelay(usec_wait);
  498. tp->read32(tp, off);
  499. }
  500. /* Wait again after the read for the posted method to guarantee that
  501. * the wait time is met.
  502. */
  503. if (usec_wait)
  504. udelay(usec_wait);
  505. }
  506. static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
  507. {
  508. tp->write32_mbox(tp, off, val);
  509. if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
  510. (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
  511. !tg3_flag(tp, ICH_WORKAROUND)))
  512. tp->read32_mbox(tp, off);
  513. }
  514. static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
  515. {
  516. void __iomem *mbox = tp->regs + off;
  517. writel(val, mbox);
  518. if (tg3_flag(tp, TXD_MBOX_HWBUG))
  519. writel(val, mbox);
  520. if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
  521. tg3_flag(tp, FLUSH_POSTED_WRITES))
  522. readl(mbox);
  523. }
  524. static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
  525. {
  526. return readl(tp->regs + off + GRCMBOX_BASE);
  527. }
  528. static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
  529. {
  530. writel(val, tp->regs + off + GRCMBOX_BASE);
  531. }
  532. #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
  533. #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
  534. #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
  535. #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
  536. #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
  537. #define tw32(reg, val) tp->write32(tp, reg, val)
  538. #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
  539. #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
  540. #define tr32(reg) tp->read32(tp, reg)
  541. static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
  542. {
  543. unsigned long flags;
  544. if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
  545. (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
  546. return;
  547. spin_lock_irqsave(&tp->indirect_lock, flags);
  548. if (tg3_flag(tp, SRAM_USE_CONFIG)) {
  549. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  550. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  551. /* Always leave this as zero. */
  552. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  553. } else {
  554. tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
  555. tw32_f(TG3PCI_MEM_WIN_DATA, val);
  556. /* Always leave this as zero. */
  557. tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  558. }
  559. spin_unlock_irqrestore(&tp->indirect_lock, flags);
  560. }
  561. static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
  562. {
  563. unsigned long flags;
  564. if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
  565. (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
  566. *val = 0;
  567. return;
  568. }
  569. spin_lock_irqsave(&tp->indirect_lock, flags);
  570. if (tg3_flag(tp, SRAM_USE_CONFIG)) {
  571. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  572. pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  573. /* Always leave this as zero. */
  574. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  575. } else {
  576. tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
  577. *val = tr32(TG3PCI_MEM_WIN_DATA);
  578. /* Always leave this as zero. */
  579. tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  580. }
  581. spin_unlock_irqrestore(&tp->indirect_lock, flags);
  582. }
  583. static void tg3_ape_lock_init(struct tg3 *tp)
  584. {
  585. int i;
  586. u32 regbase, bit;
  587. if (tg3_asic_rev(tp) == ASIC_REV_5761)
  588. regbase = TG3_APE_LOCK_GRANT;
  589. else
  590. regbase = TG3_APE_PER_LOCK_GRANT;
  591. /* Make sure the driver hasn't any stale locks. */
  592. for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
  593. switch (i) {
  594. case TG3_APE_LOCK_PHY0:
  595. case TG3_APE_LOCK_PHY1:
  596. case TG3_APE_LOCK_PHY2:
  597. case TG3_APE_LOCK_PHY3:
  598. bit = APE_LOCK_GRANT_DRIVER;
  599. break;
  600. default:
  601. if (!tp->pci_fn)
  602. bit = APE_LOCK_GRANT_DRIVER;
  603. else
  604. bit = 1 << tp->pci_fn;
  605. }
  606. tg3_ape_write32(tp, regbase + 4 * i, bit);
  607. }
  608. }
  609. static int tg3_ape_lock(struct tg3 *tp, int locknum)
  610. {
  611. int i, off;
  612. int ret = 0;
  613. u32 status, req, gnt, bit;
  614. if (!tg3_flag(tp, ENABLE_APE))
  615. return 0;
  616. switch (locknum) {
  617. case TG3_APE_LOCK_GPIO:
  618. if (tg3_asic_rev(tp) == ASIC_REV_5761)
  619. return 0;
  620. case TG3_APE_LOCK_GRC:
  621. case TG3_APE_LOCK_MEM:
  622. if (!tp->pci_fn)
  623. bit = APE_LOCK_REQ_DRIVER;
  624. else
  625. bit = 1 << tp->pci_fn;
  626. break;
  627. case TG3_APE_LOCK_PHY0:
  628. case TG3_APE_LOCK_PHY1:
  629. case TG3_APE_LOCK_PHY2:
  630. case TG3_APE_LOCK_PHY3:
  631. bit = APE_LOCK_REQ_DRIVER;
  632. break;
  633. default:
  634. return -EINVAL;
  635. }
  636. if (tg3_asic_rev(tp) == ASIC_REV_5761) {
  637. req = TG3_APE_LOCK_REQ;
  638. gnt = TG3_APE_LOCK_GRANT;
  639. } else {
  640. req = TG3_APE_PER_LOCK_REQ;
  641. gnt = TG3_APE_PER_LOCK_GRANT;
  642. }
  643. off = 4 * locknum;
  644. tg3_ape_write32(tp, req + off, bit);
  645. /* Wait for up to 1 millisecond to acquire lock. */
  646. for (i = 0; i < 100; i++) {
  647. status = tg3_ape_read32(tp, gnt + off);
  648. if (status == bit)
  649. break;
  650. udelay(10);
  651. }
  652. if (status != bit) {
  653. /* Revoke the lock request. */
  654. tg3_ape_write32(tp, gnt + off, bit);
  655. ret = -EBUSY;
  656. }
  657. return ret;
  658. }
  659. static void tg3_ape_unlock(struct tg3 *tp, int locknum)
  660. {
  661. u32 gnt, bit;
  662. if (!tg3_flag(tp, ENABLE_APE))
  663. return;
  664. switch (locknum) {
  665. case TG3_APE_LOCK_GPIO:
  666. if (tg3_asic_rev(tp) == ASIC_REV_5761)
  667. return;
  668. case TG3_APE_LOCK_GRC:
  669. case TG3_APE_LOCK_MEM:
  670. if (!tp->pci_fn)
  671. bit = APE_LOCK_GRANT_DRIVER;
  672. else
  673. bit = 1 << tp->pci_fn;
  674. break;
  675. case TG3_APE_LOCK_PHY0:
  676. case TG3_APE_LOCK_PHY1:
  677. case TG3_APE_LOCK_PHY2:
  678. case TG3_APE_LOCK_PHY3:
  679. bit = APE_LOCK_GRANT_DRIVER;
  680. break;
  681. default:
  682. return;
  683. }
  684. if (tg3_asic_rev(tp) == ASIC_REV_5761)
  685. gnt = TG3_APE_LOCK_GRANT;
  686. else
  687. gnt = TG3_APE_PER_LOCK_GRANT;
  688. tg3_ape_write32(tp, gnt + 4 * locknum, bit);
  689. }
  690. static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
  691. {
  692. u32 apedata;
  693. while (timeout_us) {
  694. if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
  695. return -EBUSY;
  696. apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
  697. if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
  698. break;
  699. tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
  700. udelay(10);
  701. timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
  702. }
  703. return timeout_us ? 0 : -EBUSY;
  704. }
  705. static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
  706. {
  707. u32 i, apedata;
  708. for (i = 0; i < timeout_us / 10; i++) {
  709. apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
  710. if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
  711. break;
  712. udelay(10);
  713. }
  714. return i == timeout_us / 10;
  715. }
  716. static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
  717. u32 len)
  718. {
  719. int err;
  720. u32 i, bufoff, msgoff, maxlen, apedata;
  721. if (!tg3_flag(tp, APE_HAS_NCSI))
  722. return 0;
  723. apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
  724. if (apedata != APE_SEG_SIG_MAGIC)
  725. return -ENODEV;
  726. apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
  727. if (!(apedata & APE_FW_STATUS_READY))
  728. return -EAGAIN;
  729. bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
  730. TG3_APE_SHMEM_BASE;
  731. msgoff = bufoff + 2 * sizeof(u32);
  732. maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
  733. while (len) {
  734. u32 length;
  735. /* Cap xfer sizes to scratchpad limits. */
  736. length = (len > maxlen) ? maxlen : len;
  737. len -= length;
  738. apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
  739. if (!(apedata & APE_FW_STATUS_READY))
  740. return -EAGAIN;
  741. /* Wait for up to 1 msec for APE to service previous event. */
  742. err = tg3_ape_event_lock(tp, 1000);
  743. if (err)
  744. return err;
  745. apedata = APE_EVENT_STATUS_DRIVER_EVNT |
  746. APE_EVENT_STATUS_SCRTCHPD_READ |
  747. APE_EVENT_STATUS_EVENT_PENDING;
  748. tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
  749. tg3_ape_write32(tp, bufoff, base_off);
  750. tg3_ape_write32(tp, bufoff + sizeof(u32), length);
  751. tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
  752. tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
  753. base_off += length;
  754. if (tg3_ape_wait_for_event(tp, 30000))
  755. return -EAGAIN;
  756. for (i = 0; length; i += 4, length -= 4) {
  757. u32 val = tg3_ape_read32(tp, msgoff + i);
  758. memcpy(data, &val, sizeof(u32));
  759. data++;
  760. }
  761. }
  762. return 0;
  763. }
  764. static int tg3_ape_send_event(struct tg3 *tp, u32 event)
  765. {
  766. int err;
  767. u32 apedata;
  768. apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
  769. if (apedata != APE_SEG_SIG_MAGIC)
  770. return -EAGAIN;
  771. apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
  772. if (!(apedata & APE_FW_STATUS_READY))
  773. return -EAGAIN;
  774. /* Wait for up to 1 millisecond for APE to service previous event. */
  775. err = tg3_ape_event_lock(tp, 1000);
  776. if (err)
  777. return err;
  778. tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
  779. event | APE_EVENT_STATUS_EVENT_PENDING);
  780. tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
  781. tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
  782. return 0;
  783. }
  784. static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
  785. {
  786. u32 event;
  787. u32 apedata;
  788. if (!tg3_flag(tp, ENABLE_APE))
  789. return;
  790. switch (kind) {
  791. case RESET_KIND_INIT:
  792. tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
  793. APE_HOST_SEG_SIG_MAGIC);
  794. tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
  795. APE_HOST_SEG_LEN_MAGIC);
  796. apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
  797. tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
  798. tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
  799. APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
  800. tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
  801. APE_HOST_BEHAV_NO_PHYLOCK);
  802. tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
  803. TG3_APE_HOST_DRVR_STATE_START);
  804. event = APE_EVENT_STATUS_STATE_START;
  805. break;
  806. case RESET_KIND_SHUTDOWN:
  807. /* With the interface we are currently using,
  808. * APE does not track driver state. Wiping
  809. * out the HOST SEGMENT SIGNATURE forces
  810. * the APE to assume OS absent status.
  811. */
  812. tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
  813. if (device_may_wakeup(&tp->pdev->dev) &&
  814. tg3_flag(tp, WOL_ENABLE)) {
  815. tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
  816. TG3_APE_HOST_WOL_SPEED_AUTO);
  817. apedata = TG3_APE_HOST_DRVR_STATE_WOL;
  818. } else
  819. apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
  820. tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
  821. event = APE_EVENT_STATUS_STATE_UNLOAD;
  822. break;
  823. case RESET_KIND_SUSPEND:
  824. event = APE_EVENT_STATUS_STATE_SUSPEND;
  825. break;
  826. default:
  827. return;
  828. }
  829. event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
  830. tg3_ape_send_event(tp, event);
  831. }
  832. static void tg3_disable_ints(struct tg3 *tp)
  833. {
  834. int i;
  835. tw32(TG3PCI_MISC_HOST_CTRL,
  836. (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
  837. for (i = 0; i < tp->irq_max; i++)
  838. tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
  839. }
  840. static void tg3_enable_ints(struct tg3 *tp)
  841. {
  842. int i;
  843. tp->irq_sync = 0;
  844. wmb();
  845. tw32(TG3PCI_MISC_HOST_CTRL,
  846. (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
  847. tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
  848. for (i = 0; i < tp->irq_cnt; i++) {
  849. struct tg3_napi *tnapi = &tp->napi[i];
  850. tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
  851. if (tg3_flag(tp, 1SHOT_MSI))
  852. tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
  853. tp->coal_now |= tnapi->coal_now;
  854. }
  855. /* Force an initial interrupt */
  856. if (!tg3_flag(tp, TAGGED_STATUS) &&
  857. (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
  858. tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
  859. else
  860. tw32(HOSTCC_MODE, tp->coal_now);
  861. tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
  862. }
  863. static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
  864. {
  865. struct tg3 *tp = tnapi->tp;
  866. struct tg3_hw_status *sblk = tnapi->hw_status;
  867. unsigned int work_exists = 0;
  868. /* check for phy events */
  869. if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
  870. if (sblk->status & SD_STATUS_LINK_CHG)
  871. work_exists = 1;
  872. }
  873. /* check for TX work to do */
  874. if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
  875. work_exists = 1;
  876. /* check for RX work to do */
  877. if (tnapi->rx_rcb_prod_idx &&
  878. *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
  879. work_exists = 1;
  880. return work_exists;
  881. }
  882. /* tg3_int_reenable
  883. * similar to tg3_enable_ints, but it accurately determines whether there
  884. * is new work pending and can return without flushing the PIO write
  885. * which reenables interrupts
  886. */
  887. static void tg3_int_reenable(struct tg3_napi *tnapi)
  888. {
  889. struct tg3 *tp = tnapi->tp;
  890. tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
  891. mmiowb();
  892. /* When doing tagged status, this work check is unnecessary.
  893. * The last_tag we write above tells the chip which piece of
  894. * work we've completed.
  895. */
  896. if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
  897. tw32(HOSTCC_MODE, tp->coalesce_mode |
  898. HOSTCC_MODE_ENABLE | tnapi->coal_now);
  899. }
  900. static void tg3_switch_clocks(struct tg3 *tp)
  901. {
  902. u32 clock_ctrl;
  903. u32 orig_clock_ctrl;
  904. if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
  905. return;
  906. clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
  907. orig_clock_ctrl = clock_ctrl;
  908. clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
  909. CLOCK_CTRL_CLKRUN_OENABLE |
  910. 0x1f);
  911. tp->pci_clock_ctrl = clock_ctrl;
  912. if (tg3_flag(tp, 5705_PLUS)) {
  913. if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
  914. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  915. clock_ctrl | CLOCK_CTRL_625_CORE, 40);
  916. }
  917. } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
  918. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  919. clock_ctrl |
  920. (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
  921. 40);
  922. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  923. clock_ctrl | (CLOCK_CTRL_ALTCLK),
  924. 40);
  925. }
  926. tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
  927. }
  928. #define PHY_BUSY_LOOPS 5000
  929. static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
  930. u32 *val)
  931. {
  932. u32 frame_val;
  933. unsigned int loops;
  934. int ret;
  935. if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
  936. tw32_f(MAC_MI_MODE,
  937. (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
  938. udelay(80);
  939. }
  940. tg3_ape_lock(tp, tp->phy_ape_lock);
  941. *val = 0x0;
  942. frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
  943. MI_COM_PHY_ADDR_MASK);
  944. frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
  945. MI_COM_REG_ADDR_MASK);
  946. frame_val |= (MI_COM_CMD_READ | MI_COM_START);
  947. tw32_f(MAC_MI_COM, frame_val);
  948. loops = PHY_BUSY_LOOPS;
  949. while (loops != 0) {
  950. udelay(10);
  951. frame_val = tr32(MAC_MI_COM);
  952. if ((frame_val & MI_COM_BUSY) == 0) {
  953. udelay(5);
  954. frame_val = tr32(MAC_MI_COM);
  955. break;
  956. }
  957. loops -= 1;
  958. }
  959. ret = -EBUSY;
  960. if (loops != 0) {
  961. *val = frame_val & MI_COM_DATA_MASK;
  962. ret = 0;
  963. }
  964. if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
  965. tw32_f(MAC_MI_MODE, tp->mi_mode);
  966. udelay(80);
  967. }
  968. tg3_ape_unlock(tp, tp->phy_ape_lock);
  969. return ret;
  970. }
  971. static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
  972. {
  973. return __tg3_readphy(tp, tp->phy_addr, reg, val);
  974. }
  975. static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
  976. u32 val)
  977. {
  978. u32 frame_val;
  979. unsigned int loops;
  980. int ret;
  981. if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
  982. (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
  983. return 0;
  984. if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
  985. tw32_f(MAC_MI_MODE,
  986. (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
  987. udelay(80);
  988. }
  989. tg3_ape_lock(tp, tp->phy_ape_lock);
  990. frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
  991. MI_COM_PHY_ADDR_MASK);
  992. frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
  993. MI_COM_REG_ADDR_MASK);
  994. frame_val |= (val & MI_COM_DATA_MASK);
  995. frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
  996. tw32_f(MAC_MI_COM, frame_val);
  997. loops = PHY_BUSY_LOOPS;
  998. while (loops != 0) {
  999. udelay(10);
  1000. frame_val = tr32(MAC_MI_COM);
  1001. if ((frame_val & MI_COM_BUSY) == 0) {
  1002. udelay(5);
  1003. frame_val = tr32(MAC_MI_COM);
  1004. break;
  1005. }
  1006. loops -= 1;
  1007. }
  1008. ret = -EBUSY;
  1009. if (loops != 0)
  1010. ret = 0;
  1011. if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
  1012. tw32_f(MAC_MI_MODE, tp->mi_mode);
  1013. udelay(80);
  1014. }
  1015. tg3_ape_unlock(tp, tp->phy_ape_lock);
  1016. return ret;
  1017. }
  1018. static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
  1019. {
  1020. return __tg3_writephy(tp, tp->phy_addr, reg, val);
  1021. }
  1022. static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
  1023. {
  1024. int err;
  1025. err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
  1026. if (err)
  1027. goto done;
  1028. err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
  1029. if (err)
  1030. goto done;
  1031. err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
  1032. MII_TG3_MMD_CTRL_DATA_NOINC | devad);
  1033. if (err)
  1034. goto done;
  1035. err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
  1036. done:
  1037. return err;
  1038. }
  1039. static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
  1040. {
  1041. int err;
  1042. err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
  1043. if (err)
  1044. goto done;
  1045. err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
  1046. if (err)
  1047. goto done;
  1048. err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
  1049. MII_TG3_MMD_CTRL_DATA_NOINC | devad);
  1050. if (err)
  1051. goto done;
  1052. err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
  1053. done:
  1054. return err;
  1055. }
  1056. static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
  1057. {
  1058. int err;
  1059. err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
  1060. if (!err)
  1061. err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
  1062. return err;
  1063. }
  1064. static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
  1065. {
  1066. int err;
  1067. err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
  1068. if (!err)
  1069. err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
  1070. return err;
  1071. }
  1072. static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
  1073. {
  1074. int err;
  1075. err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
  1076. (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
  1077. MII_TG3_AUXCTL_SHDWSEL_MISC);
  1078. if (!err)
  1079. err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
  1080. return err;
  1081. }
  1082. static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
  1083. {
  1084. if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
  1085. set |= MII_TG3_AUXCTL_MISC_WREN;
  1086. return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
  1087. }
  1088. static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
  1089. {
  1090. u32 val;
  1091. int err;
  1092. err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
  1093. if (err)
  1094. return err;
  1095. if (enable)
  1096. val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
  1097. else
  1098. val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
  1099. err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
  1100. val | MII_TG3_AUXCTL_ACTL_TX_6DB);
  1101. return err;
  1102. }
  1103. static int tg3_bmcr_reset(struct tg3 *tp)
  1104. {
  1105. u32 phy_control;
  1106. int limit, err;
  1107. /* OK, reset it, and poll the BMCR_RESET bit until it
  1108. * clears or we time out.
  1109. */
  1110. phy_control = BMCR_RESET;
  1111. err = tg3_writephy(tp, MII_BMCR, phy_control);
  1112. if (err != 0)
  1113. return -EBUSY;
  1114. limit = 5000;
  1115. while (limit--) {
  1116. err = tg3_readphy(tp, MII_BMCR, &phy_control);
  1117. if (err != 0)
  1118. return -EBUSY;
  1119. if ((phy_control & BMCR_RESET) == 0) {
  1120. udelay(40);
  1121. break;
  1122. }
  1123. udelay(10);
  1124. }
  1125. if (limit < 0)
  1126. return -EBUSY;
  1127. return 0;
  1128. }
  1129. static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
  1130. {
  1131. struct tg3 *tp = bp->priv;
  1132. u32 val;
  1133. spin_lock_bh(&tp->lock);
  1134. if (tg3_readphy(tp, reg, &val))
  1135. val = -EIO;
  1136. spin_unlock_bh(&tp->lock);
  1137. return val;
  1138. }
  1139. static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
  1140. {
  1141. struct tg3 *tp = bp->priv;
  1142. u32 ret = 0;
  1143. spin_lock_bh(&tp->lock);
  1144. if (tg3_writephy(tp, reg, val))
  1145. ret = -EIO;
  1146. spin_unlock_bh(&tp->lock);
  1147. return ret;
  1148. }
  1149. static int tg3_mdio_reset(struct mii_bus *bp)
  1150. {
  1151. return 0;
  1152. }
  1153. static void tg3_mdio_config_5785(struct tg3 *tp)
  1154. {
  1155. u32 val;
  1156. struct phy_device *phydev;
  1157. phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
  1158. switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
  1159. case PHY_ID_BCM50610:
  1160. case PHY_ID_BCM50610M:
  1161. val = MAC_PHYCFG2_50610_LED_MODES;
  1162. break;
  1163. case PHY_ID_BCMAC131:
  1164. val = MAC_PHYCFG2_AC131_LED_MODES;
  1165. break;
  1166. case PHY_ID_RTL8211C:
  1167. val = MAC_PHYCFG2_RTL8211C_LED_MODES;
  1168. break;
  1169. case PHY_ID_RTL8201E:
  1170. val = MAC_PHYCFG2_RTL8201E_LED_MODES;
  1171. break;
  1172. default:
  1173. return;
  1174. }
  1175. if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
  1176. tw32(MAC_PHYCFG2, val);
  1177. val = tr32(MAC_PHYCFG1);
  1178. val &= ~(MAC_PHYCFG1_RGMII_INT |
  1179. MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
  1180. val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
  1181. tw32(MAC_PHYCFG1, val);
  1182. return;
  1183. }
  1184. if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
  1185. val |= MAC_PHYCFG2_EMODE_MASK_MASK |
  1186. MAC_PHYCFG2_FMODE_MASK_MASK |
  1187. MAC_PHYCFG2_GMODE_MASK_MASK |
  1188. MAC_PHYCFG2_ACT_MASK_MASK |
  1189. MAC_PHYCFG2_QUAL_MASK_MASK |
  1190. MAC_PHYCFG2_INBAND_ENABLE;
  1191. tw32(MAC_PHYCFG2, val);
  1192. val = tr32(MAC_PHYCFG1);
  1193. val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
  1194. MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
  1195. if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
  1196. if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
  1197. val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
  1198. if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
  1199. val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
  1200. }
  1201. val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
  1202. MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
  1203. tw32(MAC_PHYCFG1, val);
  1204. val = tr32(MAC_EXT_RGMII_MODE);
  1205. val &= ~(MAC_RGMII_MODE_RX_INT_B |
  1206. MAC_RGMII_MODE_RX_QUALITY |
  1207. MAC_RGMII_MODE_RX_ACTIVITY |
  1208. MAC_RGMII_MODE_RX_ENG_DET |
  1209. MAC_RGMII_MODE_TX_ENABLE |
  1210. MAC_RGMII_MODE_TX_LOWPWR |
  1211. MAC_RGMII_MODE_TX_RESET);
  1212. if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
  1213. if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
  1214. val |= MAC_RGMII_MODE_RX_INT_B |
  1215. MAC_RGMII_MODE_RX_QUALITY |
  1216. MAC_RGMII_MODE_RX_ACTIVITY |
  1217. MAC_RGMII_MODE_RX_ENG_DET;
  1218. if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
  1219. val |= MAC_RGMII_MODE_TX_ENABLE |
  1220. MAC_RGMII_MODE_TX_LOWPWR |
  1221. MAC_RGMII_MODE_TX_RESET;
  1222. }
  1223. tw32(MAC_EXT_RGMII_MODE, val);
  1224. }
  1225. static void tg3_mdio_start(struct tg3 *tp)
  1226. {
  1227. tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
  1228. tw32_f(MAC_MI_MODE, tp->mi_mode);
  1229. udelay(80);
  1230. if (tg3_flag(tp, MDIOBUS_INITED) &&
  1231. tg3_asic_rev(tp) == ASIC_REV_5785)
  1232. tg3_mdio_config_5785(tp);
  1233. }
  1234. static int tg3_mdio_init(struct tg3 *tp)
  1235. {
  1236. int i;
  1237. u32 reg;
  1238. struct phy_device *phydev;
  1239. if (tg3_flag(tp, 5717_PLUS)) {
  1240. u32 is_serdes;
  1241. tp->phy_addr = tp->pci_fn + 1;
  1242. if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
  1243. is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
  1244. else
  1245. is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
  1246. TG3_CPMU_PHY_STRAP_IS_SERDES;
  1247. if (is_serdes)
  1248. tp->phy_addr += 7;
  1249. } else
  1250. tp->phy_addr = TG3_PHY_MII_ADDR;
  1251. tg3_mdio_start(tp);
  1252. if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
  1253. return 0;
  1254. tp->mdio_bus = mdiobus_alloc();
  1255. if (tp->mdio_bus == NULL)
  1256. return -ENOMEM;
  1257. tp->mdio_bus->name = "tg3 mdio bus";
  1258. snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
  1259. (tp->pdev->bus->number << 8) | tp->pdev->devfn);
  1260. tp->mdio_bus->priv = tp;
  1261. tp->mdio_bus->parent = &tp->pdev->dev;
  1262. tp->mdio_bus->read = &tg3_mdio_read;
  1263. tp->mdio_bus->write = &tg3_mdio_write;
  1264. tp->mdio_bus->reset = &tg3_mdio_reset;
  1265. tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
  1266. tp->mdio_bus->irq = &tp->mdio_irq[0];
  1267. for (i = 0; i < PHY_MAX_ADDR; i++)
  1268. tp->mdio_bus->irq[i] = PHY_POLL;
  1269. /* The bus registration will look for all the PHYs on the mdio bus.
  1270. * Unfortunately, it does not ensure the PHY is powered up before
  1271. * accessing the PHY ID registers. A chip reset is the
  1272. * quickest way to bring the device back to an operational state..
  1273. */
  1274. if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
  1275. tg3_bmcr_reset(tp);
  1276. i = mdiobus_register(tp->mdio_bus);
  1277. if (i) {
  1278. dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
  1279. mdiobus_free(tp->mdio_bus);
  1280. return i;
  1281. }
  1282. phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
  1283. if (!phydev || !phydev->drv) {
  1284. dev_warn(&tp->pdev->dev, "No PHY devices\n");
  1285. mdiobus_unregister(tp->mdio_bus);
  1286. mdiobus_free(tp->mdio_bus);
  1287. return -ENODEV;
  1288. }
  1289. switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
  1290. case PHY_ID_BCM57780:
  1291. phydev->interface = PHY_INTERFACE_MODE_GMII;
  1292. phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
  1293. break;
  1294. case PHY_ID_BCM50610:
  1295. case PHY_ID_BCM50610M:
  1296. phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
  1297. PHY_BRCM_RX_REFCLK_UNUSED |
  1298. PHY_BRCM_DIS_TXCRXC_NOENRGY |
  1299. PHY_BRCM_AUTO_PWRDWN_ENABLE;
  1300. if (tg3_flag(tp, RGMII_INBAND_DISABLE))
  1301. phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
  1302. if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
  1303. phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
  1304. if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
  1305. phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
  1306. /* fallthru */
  1307. case PHY_ID_RTL8211C:
  1308. phydev->interface = PHY_INTERFACE_MODE_RGMII;
  1309. break;
  1310. case PHY_ID_RTL8201E:
  1311. case PHY_ID_BCMAC131:
  1312. phydev->interface = PHY_INTERFACE_MODE_MII;
  1313. phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
  1314. tp->phy_flags |= TG3_PHYFLG_IS_FET;
  1315. break;
  1316. }
  1317. tg3_flag_set(tp, MDIOBUS_INITED);
  1318. if (tg3_asic_rev(tp) == ASIC_REV_5785)
  1319. tg3_mdio_config_5785(tp);
  1320. return 0;
  1321. }
  1322. static void tg3_mdio_fini(struct tg3 *tp)
  1323. {
  1324. if (tg3_flag(tp, MDIOBUS_INITED)) {
  1325. tg3_flag_clear(tp, MDIOBUS_INITED);
  1326. mdiobus_unregister(tp->mdio_bus);
  1327. mdiobus_free(tp->mdio_bus);
  1328. }
  1329. }
  1330. /* tp->lock is held. */
  1331. static inline void tg3_generate_fw_event(struct tg3 *tp)
  1332. {
  1333. u32 val;
  1334. val = tr32(GRC_RX_CPU_EVENT);
  1335. val |= GRC_RX_CPU_DRIVER_EVENT;
  1336. tw32_f(GRC_RX_CPU_EVENT, val);
  1337. tp->last_event_jiffies = jiffies;
  1338. }
  1339. #define TG3_FW_EVENT_TIMEOUT_USEC 2500
  1340. /* tp->lock is held. */
  1341. static void tg3_wait_for_event_ack(struct tg3 *tp)
  1342. {
  1343. int i;
  1344. unsigned int delay_cnt;
  1345. long time_remain;
  1346. /* If enough time has passed, no wait is necessary. */
  1347. time_remain = (long)(tp->last_event_jiffies + 1 +
  1348. usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
  1349. (long)jiffies;
  1350. if (time_remain < 0)
  1351. return;
  1352. /* Check if we can shorten the wait time. */
  1353. delay_cnt = jiffies_to_usecs(time_remain);
  1354. if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
  1355. delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
  1356. delay_cnt = (delay_cnt >> 3) + 1;
  1357. for (i = 0; i < delay_cnt; i++) {
  1358. if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
  1359. break;
  1360. udelay(8);
  1361. }
  1362. }
  1363. /* tp->lock is held. */
  1364. static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
  1365. {
  1366. u32 reg, val;
  1367. val = 0;
  1368. if (!tg3_readphy(tp, MII_BMCR, &reg))
  1369. val = reg << 16;
  1370. if (!tg3_readphy(tp, MII_BMSR, &reg))
  1371. val |= (reg & 0xffff);
  1372. *data++ = val;
  1373. val = 0;
  1374. if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
  1375. val = reg << 16;
  1376. if (!tg3_readphy(tp, MII_LPA, &reg))
  1377. val |= (reg & 0xffff);
  1378. *data++ = val;
  1379. val = 0;
  1380. if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
  1381. if (!tg3_readphy(tp, MII_CTRL1000, &reg))
  1382. val = reg << 16;
  1383. if (!tg3_readphy(tp, MII_STAT1000, &reg))
  1384. val |= (reg & 0xffff);
  1385. }
  1386. *data++ = val;
  1387. if (!tg3_readphy(tp, MII_PHYADDR, &reg))
  1388. val = reg << 16;
  1389. else
  1390. val = 0;
  1391. *data++ = val;
  1392. }
  1393. /* tp->lock is held. */
  1394. static void tg3_ump_link_report(struct tg3 *tp)
  1395. {
  1396. u32 data[4];
  1397. if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
  1398. return;
  1399. tg3_phy_gather_ump_data(tp, data);
  1400. tg3_wait_for_event_ack(tp);
  1401. tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
  1402. tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
  1403. tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
  1404. tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
  1405. tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
  1406. tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
  1407. tg3_generate_fw_event(tp);
  1408. }
  1409. /* tp->lock is held. */
  1410. static void tg3_stop_fw(struct tg3 *tp)
  1411. {
  1412. if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
  1413. /* Wait for RX cpu to ACK the previous event. */
  1414. tg3_wait_for_event_ack(tp);
  1415. tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
  1416. tg3_generate_fw_event(tp);
  1417. /* Wait for RX cpu to ACK this event. */
  1418. tg3_wait_for_event_ack(tp);
  1419. }
  1420. }
  1421. /* tp->lock is held. */
  1422. static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
  1423. {
  1424. tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
  1425. NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
  1426. if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
  1427. switch (kind) {
  1428. case RESET_KIND_INIT:
  1429. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  1430. DRV_STATE_START);
  1431. break;
  1432. case RESET_KIND_SHUTDOWN:
  1433. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  1434. DRV_STATE_UNLOAD);
  1435. break;
  1436. case RESET_KIND_SUSPEND:
  1437. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  1438. DRV_STATE_SUSPEND);
  1439. break;
  1440. default:
  1441. break;
  1442. }
  1443. }
  1444. if (kind == RESET_KIND_INIT ||
  1445. kind == RESET_KIND_SUSPEND)
  1446. tg3_ape_driver_state_change(tp, kind);
  1447. }
  1448. /* tp->lock is held. */
  1449. static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
  1450. {
  1451. if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
  1452. switch (kind) {
  1453. case RESET_KIND_INIT:
  1454. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  1455. DRV_STATE_START_DONE);
  1456. break;
  1457. case RESET_KIND_SHUTDOWN:
  1458. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  1459. DRV_STATE_UNLOAD_DONE);
  1460. break;
  1461. default:
  1462. break;
  1463. }
  1464. }
  1465. if (kind == RESET_KIND_SHUTDOWN)
  1466. tg3_ape_driver_state_change(tp, kind);
  1467. }
  1468. /* tp->lock is held. */
  1469. static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
  1470. {
  1471. if (tg3_flag(tp, ENABLE_ASF)) {
  1472. switch (kind) {
  1473. case RESET_KIND_INIT:
  1474. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  1475. DRV_STATE_START);
  1476. break;
  1477. case RESET_KIND_SHUTDOWN:
  1478. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  1479. DRV_STATE_UNLOAD);
  1480. break;
  1481. case RESET_KIND_SUSPEND:
  1482. tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  1483. DRV_STATE_SUSPEND);
  1484. break;
  1485. default:
  1486. break;
  1487. }
  1488. }
  1489. }
  1490. static int tg3_poll_fw(struct tg3 *tp)
  1491. {
  1492. int i;
  1493. u32 val;
  1494. if (tg3_flag(tp, IS_SSB_CORE)) {
  1495. /* We don't use firmware. */
  1496. return 0;
  1497. }
  1498. if (tg3_asic_rev(tp) == ASIC_REV_5906) {
  1499. /* Wait up to 20ms for init done. */
  1500. for (i = 0; i < 200; i++) {
  1501. if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
  1502. return 0;
  1503. udelay(100);
  1504. }
  1505. return -ENODEV;
  1506. }
  1507. /* Wait for firmware initialization to complete. */
  1508. for (i = 0; i < 100000; i++) {
  1509. tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
  1510. if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
  1511. break;
  1512. udelay(10);
  1513. }
  1514. /* Chip might not be fitted with firmware. Some Sun onboard
  1515. * parts are configured like that. So don't signal the timeout
  1516. * of the above loop as an error, but do report the lack of
  1517. * running firmware once.
  1518. */
  1519. if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
  1520. tg3_flag_set(tp, NO_FWARE_REPORTED);
  1521. netdev_info(tp->dev, "No firmware running\n");
  1522. }
  1523. if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
  1524. /* The 57765 A0 needs a little more
  1525. * time to do some important work.
  1526. */
  1527. mdelay(10);
  1528. }
  1529. return 0;
  1530. }
  1531. static void tg3_link_report(struct tg3 *tp)
  1532. {
  1533. if (!netif_carrier_ok(tp->dev)) {
  1534. netif_info(tp, link, tp->dev, "Link is down\n");
  1535. tg3_ump_link_report(tp);
  1536. } else if (netif_msg_link(tp)) {
  1537. netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
  1538. (tp->link_config.active_speed == SPEED_1000 ?
  1539. 1000 :
  1540. (tp->link_config.active_speed == SPEED_100 ?
  1541. 100 : 10)),
  1542. (tp->link_config.active_duplex == DUPLEX_FULL ?
  1543. "full" : "half"));
  1544. netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
  1545. (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
  1546. "on" : "off",
  1547. (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
  1548. "on" : "off");
  1549. if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
  1550. netdev_info(tp->dev, "EEE is %s\n",
  1551. tp->setlpicnt ? "enabled" : "disabled");
  1552. tg3_ump_link_report(tp);
  1553. }
  1554. tp->link_up = netif_carrier_ok(tp->dev);
  1555. }
  1556. static u32 tg3_decode_flowctrl_1000T(u32 adv)
  1557. {
  1558. u32 flowctrl = 0;
  1559. if (adv & ADVERTISE_PAUSE_CAP) {
  1560. flowctrl |= FLOW_CTRL_RX;
  1561. if (!(adv & ADVERTISE_PAUSE_ASYM))
  1562. flowctrl |= FLOW_CTRL_TX;
  1563. } else if (adv & ADVERTISE_PAUSE_ASYM)
  1564. flowctrl |= FLOW_CTRL_TX;
  1565. return flowctrl;
  1566. }
  1567. static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
  1568. {
  1569. u16 miireg;
  1570. if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
  1571. miireg = ADVERTISE_1000XPAUSE;
  1572. else if (flow_ctrl & FLOW_CTRL_TX)
  1573. miireg = ADVERTISE_1000XPSE_ASYM;
  1574. else if (flow_ctrl & FLOW_CTRL_RX)
  1575. miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
  1576. else
  1577. miireg = 0;
  1578. return miireg;
  1579. }
  1580. static u32 tg3_decode_flowctrl_1000X(u32 adv)
  1581. {
  1582. u32 flowctrl = 0;
  1583. if (adv & ADVERTISE_1000XPAUSE) {
  1584. flowctrl |= FLOW_CTRL_RX;
  1585. if (!(adv & ADVERTISE_1000XPSE_ASYM))
  1586. flowctrl |= FLOW_CTRL_TX;
  1587. } else if (adv & ADVERTISE_1000XPSE_ASYM)
  1588. flowctrl |= FLOW_CTRL_TX;
  1589. return flowctrl;
  1590. }
  1591. static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
  1592. {
  1593. u8 cap = 0;
  1594. if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
  1595. cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
  1596. } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
  1597. if (lcladv & ADVERTISE_1000XPAUSE)
  1598. cap = FLOW_CTRL_RX;
  1599. if (rmtadv & ADVERTISE_1000XPAUSE)
  1600. cap = FLOW_CTRL_TX;
  1601. }
  1602. return cap;
  1603. }
  1604. static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
  1605. {
  1606. u8 autoneg;
  1607. u8 flowctrl = 0;
  1608. u32 old_rx_mode = tp->rx_mode;
  1609. u32 old_tx_mode = tp->tx_mode;
  1610. if (tg3_flag(tp, USE_PHYLIB))
  1611. autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
  1612. else
  1613. autoneg = tp->link_config.autoneg;
  1614. if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
  1615. if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
  1616. flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
  1617. else
  1618. flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
  1619. } else
  1620. flowctrl = tp->link_config.flowctrl;
  1621. tp->link_config.active_flowctrl = flowctrl;
  1622. if (flowctrl & FLOW_CTRL_RX)
  1623. tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
  1624. else
  1625. tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
  1626. if (old_rx_mode != tp->rx_mode)
  1627. tw32_f(MAC_RX_MODE, tp->rx_mode);
  1628. if (flowctrl & FLOW_CTRL_TX)
  1629. tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
  1630. else
  1631. tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
  1632. if (old_tx_mode != tp->tx_mode)
  1633. tw32_f(MAC_TX_MODE, tp->tx_mode);
  1634. }
  1635. static void tg3_adjust_link(struct net_device *dev)
  1636. {
  1637. u8 oldflowctrl, linkmesg = 0;
  1638. u32 mac_mode, lcl_adv, rmt_adv;
  1639. struct tg3 *tp = netdev_priv(dev);
  1640. struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
  1641. spin_lock_bh(&tp->lock);
  1642. mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
  1643. MAC_MODE_HALF_DUPLEX);
  1644. oldflowctrl = tp->link_config.active_flowctrl;
  1645. if (phydev->link) {
  1646. lcl_adv = 0;
  1647. rmt_adv = 0;
  1648. if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
  1649. mac_mode |= MAC_MODE_PORT_MODE_MII;
  1650. else if (phydev->speed == SPEED_1000 ||
  1651. tg3_asic_rev(tp) != ASIC_REV_5785)
  1652. mac_mode |= MAC_MODE_PORT_MODE_GMII;
  1653. else
  1654. mac_mode |= MAC_MODE_PORT_MODE_MII;
  1655. if (phydev->duplex == DUPLEX_HALF)
  1656. mac_mode |= MAC_MODE_HALF_DUPLEX;
  1657. else {
  1658. lcl_adv = mii_advertise_flowctrl(
  1659. tp->link_config.flowctrl);
  1660. if (phydev->pause)
  1661. rmt_adv = LPA_PAUSE_CAP;
  1662. if (phydev->asym_pause)
  1663. rmt_adv |= LPA_PAUSE_ASYM;
  1664. }
  1665. tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
  1666. } else
  1667. mac_mode |= MAC_MODE_PORT_MODE_GMII;
  1668. if (mac_mode != tp->mac_mode) {
  1669. tp->mac_mode = mac_mode;
  1670. tw32_f(MAC_MODE, tp->mac_mode);
  1671. udelay(40);
  1672. }
  1673. if (tg3_asic_rev(tp) == ASIC_REV_5785) {
  1674. if (phydev->speed == SPEED_10)
  1675. tw32(MAC_MI_STAT,
  1676. MAC_MI_STAT_10MBPS_MODE |
  1677. MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
  1678. else
  1679. tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
  1680. }
  1681. if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
  1682. tw32(MAC_TX_LENGTHS,
  1683. ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
  1684. (6 << TX_LENGTHS_IPG_SHIFT) |
  1685. (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
  1686. else
  1687. tw32(MAC_TX_LENGTHS,
  1688. ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
  1689. (6 << TX_LENGTHS_IPG_SHIFT) |
  1690. (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
  1691. if (phydev->link != tp->old_link ||
  1692. phydev->speed != tp->link_config.active_speed ||
  1693. phydev->duplex != tp->link_config.active_duplex ||
  1694. oldflowctrl != tp->link_config.active_flowctrl)
  1695. linkmesg = 1;
  1696. tp->old_link = phydev->link;
  1697. tp->link_config.active_speed = phydev->speed;
  1698. tp->link_config.active_duplex = phydev->duplex;
  1699. spin_unlock_bh(&tp->lock);
  1700. if (linkmesg)
  1701. tg3_link_report(tp);
  1702. }
  1703. static int tg3_phy_init(struct tg3 *tp)
  1704. {
  1705. struct phy_device *phydev;
  1706. if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
  1707. return 0;
  1708. /* Bring the PHY back to a known state. */
  1709. tg3_bmcr_reset(tp);
  1710. phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
  1711. /* Attach the MAC to the PHY. */
  1712. phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
  1713. tg3_adjust_link, phydev->interface);
  1714. if (IS_ERR(phydev)) {
  1715. dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
  1716. return PTR_ERR(phydev);
  1717. }
  1718. /* Mask with MAC supported features. */
  1719. switch (phydev->interface) {
  1720. case PHY_INTERFACE_MODE_GMII:
  1721. case PHY_INTERFACE_MODE_RGMII:
  1722. if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
  1723. phydev->supported &= (PHY_GBIT_FEATURES |
  1724. SUPPORTED_Pause |
  1725. SUPPORTED_Asym_Pause);
  1726. break;
  1727. }
  1728. /* fallthru */
  1729. case PHY_INTERFACE_MODE_MII:
  1730. phydev->supported &= (PHY_BASIC_FEATURES |
  1731. SUPPORTED_Pause |
  1732. SUPPORTED_Asym_Pause);
  1733. break;
  1734. default:
  1735. phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
  1736. return -EINVAL;
  1737. }
  1738. tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
  1739. phydev->advertising = phydev->supported;
  1740. return 0;
  1741. }
  1742. static void tg3_phy_start(struct tg3 *tp)
  1743. {
  1744. struct phy_device *phydev;
  1745. if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
  1746. return;
  1747. phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
  1748. if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
  1749. tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
  1750. phydev->speed = tp->link_config.speed;
  1751. phydev->duplex = tp->link_config.duplex;
  1752. phydev->autoneg = tp->link_config.autoneg;
  1753. phydev->advertising = tp->link_config.advertising;
  1754. }
  1755. phy_start(phydev);
  1756. phy_start_aneg(phydev);
  1757. }
  1758. static void tg3_phy_stop(struct tg3 *tp)
  1759. {
  1760. if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
  1761. return;
  1762. phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
  1763. }
  1764. static void tg3_phy_fini(struct tg3 *tp)
  1765. {
  1766. if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
  1767. phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
  1768. tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
  1769. }
  1770. }
  1771. static int tg3_phy_set_extloopbk(struct tg3 *tp)
  1772. {
  1773. int err;
  1774. u32 val;
  1775. if (tp->phy_flags & TG3_PHYFLG_IS_FET)
  1776. return 0;
  1777. if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
  1778. /* Cannot do read-modify-write on 5401 */
  1779. err = tg3_phy_auxctl_write(tp,
  1780. MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
  1781. MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
  1782. 0x4c20);
  1783. goto done;
  1784. }
  1785. err = tg3_phy_auxctl_read(tp,
  1786. MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
  1787. if (err)
  1788. return err;
  1789. val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
  1790. err = tg3_phy_auxctl_write(tp,
  1791. MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
  1792. done:
  1793. return err;
  1794. }
  1795. static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
  1796. {
  1797. u32 phytest;
  1798. if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
  1799. u32 phy;
  1800. tg3_writephy(tp, MII_TG3_FET_TEST,
  1801. phytest | MII_TG3_FET_SHADOW_EN);
  1802. if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
  1803. if (enable)
  1804. phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
  1805. else
  1806. phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
  1807. tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
  1808. }
  1809. tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
  1810. }
  1811. }
  1812. static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
  1813. {
  1814. u32 reg;
  1815. if (!tg3_flag(tp, 5705_PLUS) ||
  1816. (tg3_flag(tp, 5717_PLUS) &&
  1817. (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
  1818. return;
  1819. if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
  1820. tg3_phy_fet_toggle_apd(tp, enable);
  1821. return;
  1822. }
  1823. reg = MII_TG3_MISC_SHDW_WREN |
  1824. MII_TG3_MISC_SHDW_SCR5_SEL |
  1825. MII_TG3_MISC_SHDW_SCR5_LPED |
  1826. MII_TG3_MISC_SHDW_SCR5_DLPTLM |
  1827. MII_TG3_MISC_SHDW_SCR5_SDTL |
  1828. MII_TG3_MISC_SHDW_SCR5_C125OE;
  1829. if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
  1830. reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
  1831. tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
  1832. reg = MII_TG3_MISC_SHDW_WREN |
  1833. MII_TG3_MISC_SHDW_APD_SEL |
  1834. MII_TG3_MISC_SHDW_APD_WKTM_84MS;
  1835. if (enable)
  1836. reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
  1837. tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
  1838. }
  1839. static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
  1840. {
  1841. u32 phy;
  1842. if (!tg3_flag(tp, 5705_PLUS) ||
  1843. (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
  1844. return;
  1845. if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
  1846. u32 ephy;
  1847. if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
  1848. u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
  1849. tg3_writephy(tp, MII_TG3_FET_TEST,
  1850. ephy | MII_TG3_FET_SHADOW_EN);
  1851. if (!tg3_readphy(tp, reg, &phy)) {
  1852. if (enable)
  1853. phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
  1854. else
  1855. phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
  1856. tg3_writephy(tp, reg, phy);
  1857. }
  1858. tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
  1859. }
  1860. } else {
  1861. int ret;
  1862. ret = tg3_phy_auxctl_read(tp,
  1863. MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
  1864. if (!ret) {
  1865. if (enable)
  1866. phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
  1867. else
  1868. phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
  1869. tg3_phy_auxctl_write(tp,
  1870. MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
  1871. }
  1872. }
  1873. }
  1874. static void tg3_phy_set_wirespeed(struct tg3 *tp)
  1875. {
  1876. int ret;
  1877. u32 val;
  1878. if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
  1879. return;
  1880. ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
  1881. if (!ret)
  1882. tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
  1883. val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
  1884. }
  1885. static void tg3_phy_apply_otp(struct tg3 *tp)
  1886. {
  1887. u32 otp, phy;
  1888. if (!tp->phy_otp)
  1889. return;
  1890. otp = tp->phy_otp;
  1891. if (tg3_phy_toggle_auxctl_smdsp(tp, true))
  1892. return;
  1893. phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
  1894. phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
  1895. tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
  1896. phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
  1897. ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
  1898. tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
  1899. phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
  1900. phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
  1901. tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
  1902. phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
  1903. tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
  1904. phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
  1905. tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
  1906. phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
  1907. ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
  1908. tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
  1909. tg3_phy_toggle_auxctl_smdsp(tp, false);
  1910. }
  1911. static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
  1912. {
  1913. u32 val;
  1914. if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
  1915. return;
  1916. tp->setlpicnt = 0;
  1917. if (tp->link_config.autoneg == AUTONEG_ENABLE &&
  1918. current_link_up == 1 &&
  1919. tp->link_config.active_duplex == DUPLEX_FULL &&
  1920. (tp->link_config.active_speed == SPEED_100 ||
  1921. tp->link_config.active_speed == SPEED_1000)) {
  1922. u32 eeectl;
  1923. if (tp->link_config.active_speed == SPEED_1000)
  1924. eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
  1925. else
  1926. eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
  1927. tw32(TG3_CPMU_EEE_CTRL, eeectl);
  1928. tg3_phy_cl45_read(tp, MDIO_MMD_AN,
  1929. TG3_CL45_D7_EEERES_STAT, &val);
  1930. if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
  1931. val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
  1932. tp->setlpicnt = 2;
  1933. }
  1934. if (!tp->setlpicnt) {
  1935. if (current_link_up == 1 &&
  1936. !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
  1937. tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
  1938. tg3_phy_toggle_auxctl_smdsp(tp, false);
  1939. }
  1940. val = tr32(TG3_CPMU_EEE_MODE);
  1941. tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
  1942. }
  1943. }
  1944. static void tg3_phy_eee_enable(struct tg3 *tp)
  1945. {
  1946. u32 val;
  1947. if (tp->link_config.active_speed == SPEED_1000 &&
  1948. (tg3_asic_rev(tp) == ASIC_REV_5717 ||
  1949. tg3_asic_rev(tp) == ASIC_REV_5719 ||
  1950. tg3_flag(tp, 57765_CLASS)) &&
  1951. !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
  1952. val = MII_TG3_DSP_TAP26_ALNOKO |
  1953. MII_TG3_DSP_TAP26_RMRXSTO;
  1954. tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
  1955. tg3_phy_toggle_auxctl_smdsp(tp, false);
  1956. }
  1957. val = tr32(TG3_CPMU_EEE_MODE);
  1958. tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
  1959. }
  1960. static int tg3_wait_macro_done(struct tg3 *tp)
  1961. {
  1962. int limit = 100;
  1963. while (limit--) {
  1964. u32 tmp32;
  1965. if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
  1966. if ((tmp32 & 0x1000) == 0)
  1967. break;
  1968. }
  1969. }
  1970. if (limit < 0)
  1971. return -EBUSY;
  1972. return 0;
  1973. }
  1974. static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
  1975. {
  1976. static const u32 test_pat[4][6] = {
  1977. { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
  1978. { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
  1979. { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
  1980. { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
  1981. };
  1982. int chan;
  1983. for (chan = 0; chan < 4; chan++) {
  1984. int i;
  1985. tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
  1986. (chan * 0x2000) | 0x0200);
  1987. tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
  1988. for (i = 0; i < 6; i++)
  1989. tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
  1990. test_pat[chan][i]);
  1991. tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
  1992. if (tg3_wait_macro_done(tp)) {
  1993. *resetp = 1;
  1994. return -EBUSY;
  1995. }
  1996. tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
  1997. (chan * 0x2000) | 0x0200);
  1998. tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
  1999. if (tg3_wait_macro_done(tp)) {
  2000. *resetp = 1;
  2001. return -EBUSY;
  2002. }
  2003. tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
  2004. if (tg3_wait_macro_done(tp)) {
  2005. *resetp = 1;
  2006. return -EBUSY;
  2007. }
  2008. for (i = 0; i < 6; i += 2) {
  2009. u32 low, high;
  2010. if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
  2011. tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
  2012. tg3_wait_macro_done(tp)) {
  2013. *resetp = 1;
  2014. return -EBUSY;
  2015. }
  2016. low &= 0x7fff;
  2017. high &= 0x000f;
  2018. if (low != test_pat[chan][i] ||
  2019. high != test_pat[chan][i+1]) {
  2020. tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
  2021. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
  2022. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
  2023. return -EBUSY;
  2024. }
  2025. }
  2026. }
  2027. return 0;
  2028. }
  2029. static int tg3_phy_reset_chanpat(struct tg3 *tp)
  2030. {
  2031. int chan;
  2032. for (chan = 0; chan < 4; chan++) {
  2033. int i;
  2034. tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
  2035. (chan * 0x2000) | 0x0200);
  2036. tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
  2037. for (i = 0; i < 6; i++)
  2038. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
  2039. tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
  2040. if (tg3_wait_macro_done(tp))
  2041. return -EBUSY;
  2042. }
  2043. return 0;
  2044. }
  2045. static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
  2046. {
  2047. u32 reg32, phy9_orig;
  2048. int retries, do_phy_reset, err;
  2049. retries = 10;
  2050. do_phy_reset = 1;
  2051. do {
  2052. if (do_phy_reset) {
  2053. err = tg3_bmcr_reset(tp);
  2054. if (err)
  2055. return err;
  2056. do_phy_reset = 0;
  2057. }
  2058. /* Disable transmitter and interrupt. */
  2059. if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
  2060. continue;
  2061. reg32 |= 0x3000;
  2062. tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
  2063. /* Set full-duplex, 1000 mbps. */
  2064. tg3_writephy(tp, MII_BMCR,
  2065. BMCR_FULLDPLX | BMCR_SPEED1000);
  2066. /* Set to master mode. */
  2067. if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
  2068. continue;
  2069. tg3_writephy(tp, MII_CTRL1000,
  2070. CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
  2071. err = tg3_phy_toggle_auxctl_smdsp(tp, true);
  2072. if (err)
  2073. return err;
  2074. /* Block the PHY control access. */
  2075. tg3_phydsp_write(tp, 0x8005, 0x0800);
  2076. err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
  2077. if (!err)
  2078. break;
  2079. } while (--retries);
  2080. err = tg3_phy_reset_chanpat(tp);
  2081. if (err)
  2082. return err;
  2083. tg3_phydsp_write(tp, 0x8005, 0x0000);
  2084. tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
  2085. tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
  2086. tg3_phy_toggle_auxctl_smdsp(tp, false);
  2087. tg3_writephy(tp, MII_CTRL1000, phy9_orig);
  2088. if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
  2089. reg32 &= ~0x3000;
  2090. tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
  2091. } else if (!err)
  2092. err = -EBUSY;
  2093. return err;
  2094. }
  2095. static void tg3_carrier_off(struct tg3 *tp)
  2096. {
  2097. netif_carrier_off(tp->dev);
  2098. tp->link_up = false;
  2099. }
  2100. static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
  2101. {
  2102. if (tg3_flag(tp, ENABLE_ASF))
  2103. netdev_warn(tp->dev,
  2104. "Management side-band traffic will be interrupted during phy settings change\n");
  2105. }
  2106. /* This will reset the tigon3 PHY if there is no valid
  2107. * link unless the FORCE argument is non-zero.
  2108. */
  2109. static int tg3_phy_reset(struct tg3 *tp)
  2110. {
  2111. u32 val, cpmuctrl;
  2112. int err;
  2113. if (tg3_asic_rev(tp) == ASIC_REV_5906) {
  2114. val = tr32(GRC_MISC_CFG);
  2115. tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
  2116. udelay(40);
  2117. }
  2118. err = tg3_readphy(tp, MII_BMSR, &val);
  2119. err |= tg3_readphy(tp, MII_BMSR, &val);
  2120. if (err != 0)
  2121. return -EBUSY;
  2122. if (netif_running(tp->dev) && tp->link_up) {
  2123. netif_carrier_off(tp->dev);
  2124. tg3_link_report(tp);
  2125. }
  2126. if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
  2127. tg3_asic_rev(tp) == ASIC_REV_5704 ||
  2128. tg3_asic_rev(tp) == ASIC_REV_5705) {
  2129. err = tg3_phy_reset_5703_4_5(tp);
  2130. if (err)
  2131. return err;
  2132. goto out;
  2133. }
  2134. cpmuctrl = 0;
  2135. if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
  2136. tg3_chip_rev(tp) != CHIPREV_5784_AX) {
  2137. cpmuctrl = tr32(TG3_CPMU_CTRL);
  2138. if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
  2139. tw32(TG3_CPMU_CTRL,
  2140. cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
  2141. }
  2142. err = tg3_bmcr_reset(tp);
  2143. if (err)
  2144. return err;
  2145. if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
  2146. val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
  2147. tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
  2148. tw32(TG3_CPMU_CTRL, cpmuctrl);
  2149. }
  2150. if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
  2151. tg3_chip_rev(tp) == CHIPREV_5761_AX) {
  2152. val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
  2153. if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
  2154. CPMU_LSPD_1000MB_MACCLK_12_5) {
  2155. val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
  2156. udelay(40);
  2157. tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
  2158. }
  2159. }
  2160. if (tg3_flag(tp, 5717_PLUS) &&
  2161. (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
  2162. return 0;
  2163. tg3_phy_apply_otp(tp);
  2164. if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
  2165. tg3_phy_toggle_apd(tp, true);
  2166. else
  2167. tg3_phy_toggle_apd(tp, false);
  2168. out:
  2169. if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
  2170. !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
  2171. tg3_phydsp_write(tp, 0x201f, 0x2aaa);
  2172. tg3_phydsp_write(tp, 0x000a, 0x0323);
  2173. tg3_phy_toggle_auxctl_smdsp(tp, false);
  2174. }
  2175. if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
  2176. tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
  2177. tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
  2178. }
  2179. if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
  2180. if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
  2181. tg3_phydsp_write(tp, 0x000a, 0x310b);
  2182. tg3_phydsp_write(tp, 0x201f, 0x9506);
  2183. tg3_phydsp_write(tp, 0x401f, 0x14e2);
  2184. tg3_phy_toggle_auxctl_smdsp(tp, false);
  2185. }
  2186. } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
  2187. if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
  2188. tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
  2189. if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
  2190. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
  2191. tg3_writephy(tp, MII_TG3_TEST1,
  2192. MII_TG3_TEST1_TRIM_EN | 0x4);
  2193. } else
  2194. tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
  2195. tg3_phy_toggle_auxctl_smdsp(tp, false);
  2196. }
  2197. }
  2198. /* Set Extended packet length bit (bit 14) on all chips that */
  2199. /* support jumbo frames */
  2200. if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
  2201. /* Cannot do read-modify-write on 5401 */
  2202. tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
  2203. } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
  2204. /* Set bit 14 with read-modify-write to preserve other bits */
  2205. err = tg3_phy_auxctl_read(tp,
  2206. MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
  2207. if (!err)
  2208. tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
  2209. val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
  2210. }
  2211. /* Set phy register 0x10 bit 0 to high fifo elasticity to support
  2212. * jumbo frames transmission.
  2213. */
  2214. if (tg3_flag(tp, JUMBO_CAPABLE)) {
  2215. if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
  2216. tg3_writephy(tp, MII_TG3_EXT_CTRL,
  2217. val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
  2218. }
  2219. if (tg3_asic_rev(tp) == ASIC_REV_5906) {
  2220. /* adjust output voltage */
  2221. tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
  2222. }
  2223. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
  2224. tg3_phydsp_write(tp, 0xffb, 0x4000);
  2225. tg3_phy_toggle_automdix(tp, 1);
  2226. tg3_phy_set_wirespeed(tp);
  2227. return 0;
  2228. }
  2229. #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
  2230. #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
  2231. #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
  2232. TG3_GPIO_MSG_NEED_VAUX)
  2233. #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
  2234. ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
  2235. (TG3_GPIO_MSG_DRVR_PRES << 4) | \
  2236. (TG3_GPIO_MSG_DRVR_PRES << 8) | \
  2237. (TG3_GPIO_MSG_DRVR_PRES << 12))
  2238. #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
  2239. ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
  2240. (TG3_GPIO_MSG_NEED_VAUX << 4) | \
  2241. (TG3_GPIO_MSG_NEED_VAUX << 8) | \
  2242. (TG3_GPIO_MSG_NEED_VAUX << 12))
  2243. static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
  2244. {
  2245. u32 status, shift;
  2246. if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
  2247. tg3_asic_rev(tp) == ASIC_REV_5719)
  2248. status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
  2249. else
  2250. status = tr32(TG3_CPMU_DRV_STATUS);
  2251. shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
  2252. status &= ~(TG3_GPIO_MSG_MASK << shift);
  2253. status |= (newstat << shift);
  2254. if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
  2255. tg3_asic_rev(tp) == ASIC_REV_5719)
  2256. tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
  2257. else
  2258. tw32(TG3_CPMU_DRV_STATUS, status);
  2259. return status >> TG3_APE_GPIO_MSG_SHIFT;
  2260. }
  2261. static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
  2262. {
  2263. if (!tg3_flag(tp, IS_NIC))
  2264. return 0;
  2265. if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
  2266. tg3_asic_rev(tp) == ASIC_REV_5719 ||
  2267. tg3_asic_rev(tp) == ASIC_REV_5720) {
  2268. if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
  2269. return -EIO;
  2270. tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
  2271. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
  2272. TG3_GRC_LCLCTL_PWRSW_DELAY);
  2273. tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
  2274. } else {
  2275. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
  2276. TG3_GRC_LCLCTL_PWRSW_DELAY);
  2277. }
  2278. return 0;
  2279. }
  2280. static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
  2281. {
  2282. u32 grc_local_ctrl;
  2283. if (!tg3_flag(tp, IS_NIC) ||
  2284. tg3_asic_rev(tp) == ASIC_REV_5700 ||
  2285. tg3_asic_rev(tp) == ASIC_REV_5701)
  2286. return;
  2287. grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
  2288. tw32_wait_f(GRC_LOCAL_CTRL,
  2289. grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
  2290. TG3_GRC_LCLCTL_PWRSW_DELAY);
  2291. tw32_wait_f(GRC_LOCAL_CTRL,
  2292. grc_local_ctrl,
  2293. TG3_GRC_LCLCTL_PWRSW_DELAY);
  2294. tw32_wait_f(GRC_LOCAL_CTRL,
  2295. grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
  2296. TG3_GRC_LCLCTL_PWRSW_DELAY);
  2297. }
  2298. static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
  2299. {
  2300. if (!tg3_flag(tp, IS_NIC))
  2301. return;
  2302. if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
  2303. tg3_asic_rev(tp) == ASIC_REV_5701) {
  2304. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
  2305. (GRC_LCLCTRL_GPIO_OE0 |
  2306. GRC_LCLCTRL_GPIO_OE1 |
  2307. GRC_LCLCTRL_GPIO_OE2 |
  2308. GRC_LCLCTRL_GPIO_OUTPUT0 |
  2309. GRC_LCLCTRL_GPIO_OUTPUT1),
  2310. TG3_GRC_LCLCTL_PWRSW_DELAY);
  2311. } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
  2312. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
  2313. /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
  2314. u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
  2315. GRC_LCLCTRL_GPIO_OE1 |
  2316. GRC_LCLCTRL_GPIO_OE2 |
  2317. GRC_LCLCTRL_GPIO_OUTPUT0 |
  2318. GRC_LCLCTRL_GPIO_OUTPUT1 |
  2319. tp->grc_local_ctrl;
  2320. tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
  2321. TG3_GRC_LCLCTL_PWRSW_DELAY);
  2322. grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
  2323. tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
  2324. TG3_GRC_LCLCTL_PWRSW_DELAY);
  2325. grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
  2326. tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
  2327. TG3_GRC_LCLCTL_PWRSW_DELAY);
  2328. } else {
  2329. u32 no_gpio2;
  2330. u32 grc_local_ctrl = 0;
  2331. /* Workaround to prevent overdrawing Amps. */
  2332. if (tg3_asic_rev(tp) == ASIC_REV_5714) {
  2333. grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
  2334. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
  2335. grc_local_ctrl,
  2336. TG3_GRC_LCLCTL_PWRSW_DELAY);
  2337. }
  2338. /* On 5753 and variants, GPIO2 cannot be used. */
  2339. no_gpio2 = tp->nic_sram_data_cfg &
  2340. NIC_SRAM_DATA_CFG_NO_GPIO2;
  2341. grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
  2342. GRC_LCLCTRL_GPIO_OE1 |
  2343. GRC_LCLCTRL_GPIO_OE2 |
  2344. GRC_LCLCTRL_GPIO_OUTPUT1 |
  2345. GRC_LCLCTRL_GPIO_OUTPUT2;
  2346. if (no_gpio2) {
  2347. grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
  2348. GRC_LCLCTRL_GPIO_OUTPUT2);
  2349. }
  2350. tw32_wait_f(GRC_LOCAL_CTRL,
  2351. tp->grc_local_ctrl | grc_local_ctrl,
  2352. TG3_GRC_LCLCTL_PWRSW_DELAY);
  2353. grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
  2354. tw32_wait_f(GRC_LOCAL_CTRL,
  2355. tp->grc_local_ctrl | grc_local_ctrl,
  2356. TG3_GRC_LCLCTL_PWRSW_DELAY);
  2357. if (!no_gpio2) {
  2358. grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
  2359. tw32_wait_f(GRC_LOCAL_CTRL,
  2360. tp->grc_local_ctrl | grc_local_ctrl,
  2361. TG3_GRC_LCLCTL_PWRSW_DELAY);
  2362. }
  2363. }
  2364. }
  2365. static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
  2366. {
  2367. u32 msg = 0;
  2368. /* Serialize power state transitions */
  2369. if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
  2370. return;
  2371. if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
  2372. msg = TG3_GPIO_MSG_NEED_VAUX;
  2373. msg = tg3_set_function_status(tp, msg);
  2374. if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
  2375. goto done;
  2376. if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
  2377. tg3_pwrsrc_switch_to_vaux(tp);
  2378. else
  2379. tg3_pwrsrc_die_with_vmain(tp);
  2380. done:
  2381. tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
  2382. }
  2383. static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
  2384. {
  2385. bool need_vaux = false;
  2386. /* The GPIOs do something completely different on 57765. */
  2387. if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
  2388. return;
  2389. if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
  2390. tg3_asic_rev(tp) == ASIC_REV_5719 ||
  2391. tg3_asic_rev(tp) == ASIC_REV_5720) {
  2392. tg3_frob_aux_power_5717(tp, include_wol ?
  2393. tg3_flag(tp, WOL_ENABLE) != 0 : 0);
  2394. return;
  2395. }
  2396. if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
  2397. struct net_device *dev_peer;
  2398. dev_peer = pci_get_drvdata(tp->pdev_peer);
  2399. /* remove_one() may have been run on the peer. */
  2400. if (dev_peer) {
  2401. struct tg3 *tp_peer = netdev_priv(dev_peer);
  2402. if (tg3_flag(tp_peer, INIT_COMPLETE))
  2403. return;
  2404. if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
  2405. tg3_flag(tp_peer, ENABLE_ASF))
  2406. need_vaux = true;
  2407. }
  2408. }
  2409. if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
  2410. tg3_flag(tp, ENABLE_ASF))
  2411. need_vaux = true;
  2412. if (need_vaux)
  2413. tg3_pwrsrc_switch_to_vaux(tp);
  2414. else
  2415. tg3_pwrsrc_die_with_vmain(tp);
  2416. }
  2417. static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
  2418. {
  2419. if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
  2420. return 1;
  2421. else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
  2422. if (speed != SPEED_10)
  2423. return 1;
  2424. } else if (speed == SPEED_10)
  2425. return 1;
  2426. return 0;
  2427. }
  2428. static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
  2429. {
  2430. u32 val;
  2431. if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
  2432. return;
  2433. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  2434. if (tg3_asic_rev(tp) == ASIC_REV_5704) {
  2435. u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
  2436. u32 serdes_cfg = tr32(MAC_SERDES_CFG);
  2437. sg_dig_ctrl |=
  2438. SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
  2439. tw32(SG_DIG_CTRL, sg_dig_ctrl);
  2440. tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
  2441. }
  2442. return;
  2443. }
  2444. if (tg3_asic_rev(tp) == ASIC_REV_5906) {
  2445. tg3_bmcr_reset(tp);
  2446. val = tr32(GRC_MISC_CFG);
  2447. tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
  2448. udelay(40);
  2449. return;
  2450. } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
  2451. u32 phytest;
  2452. if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
  2453. u32 phy;
  2454. tg3_writephy(tp, MII_ADVERTISE, 0);
  2455. tg3_writephy(tp, MII_BMCR,
  2456. BMCR_ANENABLE | BMCR_ANRESTART);
  2457. tg3_writephy(tp, MII_TG3_FET_TEST,
  2458. phytest | MII_TG3_FET_SHADOW_EN);
  2459. if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
  2460. phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
  2461. tg3_writephy(tp,
  2462. MII_TG3_FET_SHDW_AUXMODE4,
  2463. phy);
  2464. }
  2465. tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
  2466. }
  2467. return;
  2468. } else if (do_low_power) {
  2469. tg3_writephy(tp, MII_TG3_EXT_CTRL,
  2470. MII_TG3_EXT_CTRL_FORCE_LED_OFF);
  2471. val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
  2472. MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
  2473. MII_TG3_AUXCTL_PCTL_VREG_11V;
  2474. tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
  2475. }
  2476. /* The PHY should not be powered down on some chips because
  2477. * of bugs.
  2478. */
  2479. if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
  2480. tg3_asic_rev(tp) == ASIC_REV_5704 ||
  2481. (tg3_asic_rev(tp) == ASIC_REV_5780 &&
  2482. (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
  2483. (tg3_asic_rev(tp) == ASIC_REV_5717 &&
  2484. !tp->pci_fn))
  2485. return;
  2486. if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
  2487. tg3_chip_rev(tp) == CHIPREV_5761_AX) {
  2488. val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
  2489. val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
  2490. val |= CPMU_LSPD_1000MB_MACCLK_12_5;
  2491. tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
  2492. }
  2493. tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
  2494. }
  2495. /* tp->lock is held. */
  2496. static int tg3_nvram_lock(struct tg3 *tp)
  2497. {
  2498. if (tg3_flag(tp, NVRAM)) {
  2499. int i;
  2500. if (tp->nvram_lock_cnt == 0) {
  2501. tw32(NVRAM_SWARB, SWARB_REQ_SET1);
  2502. for (i = 0; i < 8000; i++) {
  2503. if (tr32(NVRAM_SWARB) & SWARB_GNT1)
  2504. break;
  2505. udelay(20);
  2506. }
  2507. if (i == 8000) {
  2508. tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
  2509. return -ENODEV;
  2510. }
  2511. }
  2512. tp->nvram_lock_cnt++;
  2513. }
  2514. return 0;
  2515. }
  2516. /* tp->lock is held. */
  2517. static void tg3_nvram_unlock(struct tg3 *tp)
  2518. {
  2519. if (tg3_flag(tp, NVRAM)) {
  2520. if (tp->nvram_lock_cnt > 0)
  2521. tp->nvram_lock_cnt--;
  2522. if (tp->nvram_lock_cnt == 0)
  2523. tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
  2524. }
  2525. }
  2526. /* tp->lock is held. */
  2527. static void tg3_enable_nvram_access(struct tg3 *tp)
  2528. {
  2529. if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
  2530. u32 nvaccess = tr32(NVRAM_ACCESS);
  2531. tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
  2532. }
  2533. }
  2534. /* tp->lock is held. */
  2535. static void tg3_disable_nvram_access(struct tg3 *tp)
  2536. {
  2537. if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
  2538. u32 nvaccess = tr32(NVRAM_ACCESS);
  2539. tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
  2540. }
  2541. }
  2542. static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
  2543. u32 offset, u32 *val)
  2544. {
  2545. u32 tmp;
  2546. int i;
  2547. if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
  2548. return -EINVAL;
  2549. tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
  2550. EEPROM_ADDR_DEVID_MASK |
  2551. EEPROM_ADDR_READ);
  2552. tw32(GRC_EEPROM_ADDR,
  2553. tmp |
  2554. (0 << EEPROM_ADDR_DEVID_SHIFT) |
  2555. ((offset << EEPROM_ADDR_ADDR_SHIFT) &
  2556. EEPROM_ADDR_ADDR_MASK) |
  2557. EEPROM_ADDR_READ | EEPROM_ADDR_START);
  2558. for (i = 0; i < 1000; i++) {
  2559. tmp = tr32(GRC_EEPROM_ADDR);
  2560. if (tmp & EEPROM_ADDR_COMPLETE)
  2561. break;
  2562. msleep(1);
  2563. }
  2564. if (!(tmp & EEPROM_ADDR_COMPLETE))
  2565. return -EBUSY;
  2566. tmp = tr32(GRC_EEPROM_DATA);
  2567. /*
  2568. * The data will always be opposite the native endian
  2569. * format. Perform a blind byteswap to compensate.
  2570. */
  2571. *val = swab32(tmp);
  2572. return 0;
  2573. }
  2574. #define NVRAM_CMD_TIMEOUT 10000
  2575. static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
  2576. {
  2577. int i;
  2578. tw32(NVRAM_CMD, nvram_cmd);
  2579. for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
  2580. udelay(10);
  2581. if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
  2582. udelay(10);
  2583. break;
  2584. }
  2585. }
  2586. if (i == NVRAM_CMD_TIMEOUT)
  2587. return -EBUSY;
  2588. return 0;
  2589. }
  2590. static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
  2591. {
  2592. if (tg3_flag(tp, NVRAM) &&
  2593. tg3_flag(tp, NVRAM_BUFFERED) &&
  2594. tg3_flag(tp, FLASH) &&
  2595. !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
  2596. (tp->nvram_jedecnum == JEDEC_ATMEL))
  2597. addr = ((addr / tp->nvram_pagesize) <<
  2598. ATMEL_AT45DB0X1B_PAGE_POS) +
  2599. (addr % tp->nvram_pagesize);
  2600. return addr;
  2601. }
  2602. static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
  2603. {
  2604. if (tg3_flag(tp, NVRAM) &&
  2605. tg3_flag(tp, NVRAM_BUFFERED) &&
  2606. tg3_flag(tp, FLASH) &&
  2607. !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
  2608. (tp->nvram_jedecnum == JEDEC_ATMEL))
  2609. addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
  2610. tp->nvram_pagesize) +
  2611. (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
  2612. return addr;
  2613. }
  2614. /* NOTE: Data read in from NVRAM is byteswapped according to
  2615. * the byteswapping settings for all other register accesses.
  2616. * tg3 devices are BE devices, so on a BE machine, the data
  2617. * returned will be exactly as it is seen in NVRAM. On a LE
  2618. * machine, the 32-bit value will be byteswapped.
  2619. */
  2620. static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
  2621. {
  2622. int ret;
  2623. if (!tg3_flag(tp, NVRAM))
  2624. return tg3_nvram_read_using_eeprom(tp, offset, val);
  2625. offset = tg3_nvram_phys_addr(tp, offset);
  2626. if (offset > NVRAM_ADDR_MSK)
  2627. return -EINVAL;
  2628. ret = tg3_nvram_lock(tp);
  2629. if (ret)
  2630. return ret;
  2631. tg3_enable_nvram_access(tp);
  2632. tw32(NVRAM_ADDR, offset);
  2633. ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
  2634. NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
  2635. if (ret == 0)
  2636. *val = tr32(NVRAM_RDDATA);
  2637. tg3_disable_nvram_access(tp);
  2638. tg3_nvram_unlock(tp);
  2639. return ret;
  2640. }
  2641. /* Ensures NVRAM data is in bytestream format. */
  2642. static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
  2643. {
  2644. u32 v;
  2645. int res = tg3_nvram_read(tp, offset, &v);
  2646. if (!res)
  2647. *val = cpu_to_be32(v);
  2648. return res;
  2649. }
  2650. static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
  2651. u32 offset, u32 len, u8 *buf)
  2652. {
  2653. int i, j, rc = 0;
  2654. u32 val;
  2655. for (i = 0; i < len; i += 4) {
  2656. u32 addr;
  2657. __be32 data;
  2658. addr = offset + i;
  2659. memcpy(&data, buf + i, 4);
  2660. /*
  2661. * The SEEPROM interface expects the data to always be opposite
  2662. * the native endian format. We accomplish this by reversing
  2663. * all the operations that would have been performed on the
  2664. * data from a call to tg3_nvram_read_be32().
  2665. */
  2666. tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
  2667. val = tr32(GRC_EEPROM_ADDR);
  2668. tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
  2669. val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
  2670. EEPROM_ADDR_READ);
  2671. tw32(GRC_EEPROM_ADDR, val |
  2672. (0 << EEPROM_ADDR_DEVID_SHIFT) |
  2673. (addr & EEPROM_ADDR_ADDR_MASK) |
  2674. EEPROM_ADDR_START |
  2675. EEPROM_ADDR_WRITE);
  2676. for (j = 0; j < 1000; j++) {
  2677. val = tr32(GRC_EEPROM_ADDR);
  2678. if (val & EEPROM_ADDR_COMPLETE)
  2679. break;
  2680. msleep(1);
  2681. }
  2682. if (!(val & EEPROM_ADDR_COMPLETE)) {
  2683. rc = -EBUSY;
  2684. break;
  2685. }
  2686. }
  2687. return rc;
  2688. }
  2689. /* offset and length are dword aligned */
  2690. static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
  2691. u8 *buf)
  2692. {
  2693. int ret = 0;
  2694. u32 pagesize = tp->nvram_pagesize;
  2695. u32 pagemask = pagesize - 1;
  2696. u32 nvram_cmd;
  2697. u8 *tmp;
  2698. tmp = kmalloc(pagesize, GFP_KERNEL);
  2699. if (tmp == NULL)
  2700. return -ENOMEM;
  2701. while (len) {
  2702. int j;
  2703. u32 phy_addr, page_off, size;
  2704. phy_addr = offset & ~pagemask;
  2705. for (j = 0; j < pagesize; j += 4) {
  2706. ret = tg3_nvram_read_be32(tp, phy_addr + j,
  2707. (__be32 *) (tmp + j));
  2708. if (ret)
  2709. break;
  2710. }
  2711. if (ret)
  2712. break;
  2713. page_off = offset & pagemask;
  2714. size = pagesize;
  2715. if (len < size)
  2716. size = len;
  2717. len -= size;
  2718. memcpy(tmp + page_off, buf, size);
  2719. offset = offset + (pagesize - page_off);
  2720. tg3_enable_nvram_access(tp);
  2721. /*
  2722. * Before we can erase the flash page, we need
  2723. * to issue a special "write enable" command.
  2724. */
  2725. nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
  2726. if (tg3_nvram_exec_cmd(tp, nvram_cmd))
  2727. break;
  2728. /* Erase the target page */
  2729. tw32(NVRAM_ADDR, phy_addr);
  2730. nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
  2731. NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
  2732. if (tg3_nvram_exec_cmd(tp, nvram_cmd))
  2733. break;
  2734. /* Issue another write enable to start the write. */
  2735. nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
  2736. if (tg3_nvram_exec_cmd(tp, nvram_cmd))
  2737. break;
  2738. for (j = 0; j < pagesize; j += 4) {
  2739. __be32 data;
  2740. data = *((__be32 *) (tmp + j));
  2741. tw32(NVRAM_WRDATA, be32_to_cpu(data));
  2742. tw32(NVRAM_ADDR, phy_addr + j);
  2743. nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
  2744. NVRAM_CMD_WR;
  2745. if (j == 0)
  2746. nvram_cmd |= NVRAM_CMD_FIRST;
  2747. else if (j == (pagesize - 4))
  2748. nvram_cmd |= NVRAM_CMD_LAST;
  2749. ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
  2750. if (ret)
  2751. break;
  2752. }
  2753. if (ret)
  2754. break;
  2755. }
  2756. nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
  2757. tg3_nvram_exec_cmd(tp, nvram_cmd);
  2758. kfree(tmp);
  2759. return ret;
  2760. }
  2761. /* offset and length are dword aligned */
  2762. static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
  2763. u8 *buf)
  2764. {
  2765. int i, ret = 0;
  2766. for (i = 0; i < len; i += 4, offset += 4) {
  2767. u32 page_off, phy_addr, nvram_cmd;
  2768. __be32 data;
  2769. memcpy(&data, buf + i, 4);
  2770. tw32(NVRAM_WRDATA, be32_to_cpu(data));
  2771. page_off = offset % tp->nvram_pagesize;
  2772. phy_addr = tg3_nvram_phys_addr(tp, offset);
  2773. nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
  2774. if (page_off == 0 || i == 0)
  2775. nvram_cmd |= NVRAM_CMD_FIRST;
  2776. if (page_off == (tp->nvram_pagesize - 4))
  2777. nvram_cmd |= NVRAM_CMD_LAST;
  2778. if (i == (len - 4))
  2779. nvram_cmd |= NVRAM_CMD_LAST;
  2780. if ((nvram_cmd & NVRAM_CMD_FIRST) ||
  2781. !tg3_flag(tp, FLASH) ||
  2782. !tg3_flag(tp, 57765_PLUS))
  2783. tw32(NVRAM_ADDR, phy_addr);
  2784. if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
  2785. !tg3_flag(tp, 5755_PLUS) &&
  2786. (tp->nvram_jedecnum == JEDEC_ST) &&
  2787. (nvram_cmd & NVRAM_CMD_FIRST)) {
  2788. u32 cmd;
  2789. cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
  2790. ret = tg3_nvram_exec_cmd(tp, cmd);
  2791. if (ret)
  2792. break;
  2793. }
  2794. if (!tg3_flag(tp, FLASH)) {
  2795. /* We always do complete word writes to eeprom. */
  2796. nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
  2797. }
  2798. ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
  2799. if (ret)
  2800. break;
  2801. }
  2802. return ret;
  2803. }
  2804. /* offset and length are dword aligned */
  2805. static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
  2806. {
  2807. int ret;
  2808. if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
  2809. tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
  2810. ~GRC_LCLCTRL_GPIO_OUTPUT1);
  2811. udelay(40);
  2812. }
  2813. if (!tg3_flag(tp, NVRAM)) {
  2814. ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
  2815. } else {
  2816. u32 grc_mode;
  2817. ret = tg3_nvram_lock(tp);
  2818. if (ret)
  2819. return ret;
  2820. tg3_enable_nvram_access(tp);
  2821. if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
  2822. tw32(NVRAM_WRITE1, 0x406);
  2823. grc_mode = tr32(GRC_MODE);
  2824. tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
  2825. if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
  2826. ret = tg3_nvram_write_block_buffered(tp, offset, len,
  2827. buf);
  2828. } else {
  2829. ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
  2830. buf);
  2831. }
  2832. grc_mode = tr32(GRC_MODE);
  2833. tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
  2834. tg3_disable_nvram_access(tp);
  2835. tg3_nvram_unlock(tp);
  2836. }
  2837. if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
  2838. tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
  2839. udelay(40);
  2840. }
  2841. return ret;
  2842. }
  2843. #define RX_CPU_SCRATCH_BASE 0x30000
  2844. #define RX_CPU_SCRATCH_SIZE 0x04000
  2845. #define TX_CPU_SCRATCH_BASE 0x34000
  2846. #define TX_CPU_SCRATCH_SIZE 0x04000
  2847. /* tp->lock is held. */
  2848. static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
  2849. {
  2850. int i;
  2851. const int iters = 10000;
  2852. for (i = 0; i < iters; i++) {
  2853. tw32(cpu_base + CPU_STATE, 0xffffffff);
  2854. tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
  2855. if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
  2856. break;
  2857. }
  2858. return (i == iters) ? -EBUSY : 0;
  2859. }
  2860. /* tp->lock is held. */
  2861. static int tg3_rxcpu_pause(struct tg3 *tp)
  2862. {
  2863. int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
  2864. tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
  2865. tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
  2866. udelay(10);
  2867. return rc;
  2868. }
  2869. /* tp->lock is held. */
  2870. static int tg3_txcpu_pause(struct tg3 *tp)
  2871. {
  2872. return tg3_pause_cpu(tp, TX_CPU_BASE);
  2873. }
  2874. /* tp->lock is held. */
  2875. static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
  2876. {
  2877. tw32(cpu_base + CPU_STATE, 0xffffffff);
  2878. tw32_f(cpu_base + CPU_MODE, 0x00000000);
  2879. }
  2880. /* tp->lock is held. */
  2881. static void tg3_rxcpu_resume(struct tg3 *tp)
  2882. {
  2883. tg3_resume_cpu(tp, RX_CPU_BASE);
  2884. }
  2885. /* tp->lock is held. */
  2886. static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
  2887. {
  2888. int rc;
  2889. BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
  2890. if (tg3_asic_rev(tp) == ASIC_REV_5906) {
  2891. u32 val = tr32(GRC_VCPU_EXT_CTRL);
  2892. tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
  2893. return 0;
  2894. }
  2895. if (cpu_base == RX_CPU_BASE) {
  2896. rc = tg3_rxcpu_pause(tp);
  2897. } else {
  2898. /*
  2899. * There is only an Rx CPU for the 5750 derivative in the
  2900. * BCM4785.
  2901. */
  2902. if (tg3_flag(tp, IS_SSB_CORE))
  2903. return 0;
  2904. rc = tg3_txcpu_pause(tp);
  2905. }
  2906. if (rc) {
  2907. netdev_err(tp->dev, "%s timed out, %s CPU\n",
  2908. __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
  2909. return -ENODEV;
  2910. }
  2911. /* Clear firmware's nvram arbitration. */
  2912. if (tg3_flag(tp, NVRAM))
  2913. tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
  2914. return 0;
  2915. }
  2916. static int tg3_fw_data_len(struct tg3 *tp,
  2917. const struct tg3_firmware_hdr *fw_hdr)
  2918. {
  2919. int fw_len;
  2920. /* Non fragmented firmware have one firmware header followed by a
  2921. * contiguous chunk of data to be written. The length field in that
  2922. * header is not the length of data to be written but the complete
  2923. * length of the bss. The data length is determined based on
  2924. * tp->fw->size minus headers.
  2925. *
  2926. * Fragmented firmware have a main header followed by multiple
  2927. * fragments. Each fragment is identical to non fragmented firmware
  2928. * with a firmware header followed by a contiguous chunk of data. In
  2929. * the main header, the length field is unused and set to 0xffffffff.
  2930. * In each fragment header the length is the entire size of that
  2931. * fragment i.e. fragment data + header length. Data length is
  2932. * therefore length field in the header minus TG3_FW_HDR_LEN.
  2933. */
  2934. if (tp->fw_len == 0xffffffff)
  2935. fw_len = be32_to_cpu(fw_hdr->len);
  2936. else
  2937. fw_len = tp->fw->size;
  2938. return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
  2939. }
  2940. /* tp->lock is held. */
  2941. static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
  2942. u32 cpu_scratch_base, int cpu_scratch_size,
  2943. const struct tg3_firmware_hdr *fw_hdr)
  2944. {
  2945. int err, i;
  2946. void (*write_op)(struct tg3 *, u32, u32);
  2947. int total_len = tp->fw->size;
  2948. if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
  2949. netdev_err(tp->dev,
  2950. "%s: Trying to load TX cpu firmware which is 5705\n",
  2951. __func__);
  2952. return -EINVAL;
  2953. }
  2954. if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
  2955. write_op = tg3_write_mem;
  2956. else
  2957. write_op = tg3_write_indirect_reg32;
  2958. if (tg3_asic_rev(tp) != ASIC_REV_57766) {
  2959. /* It is possible that bootcode is still loading at this point.
  2960. * Get the nvram lock first before halting the cpu.
  2961. */
  2962. int lock_err = tg3_nvram_lock(tp);
  2963. err = tg3_halt_cpu(tp, cpu_base);
  2964. if (!lock_err)
  2965. tg3_nvram_unlock(tp);
  2966. if (err)
  2967. goto out;
  2968. for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
  2969. write_op(tp, cpu_scratch_base + i, 0);
  2970. tw32(cpu_base + CPU_STATE, 0xffffffff);
  2971. tw32(cpu_base + CPU_MODE,
  2972. tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
  2973. } else {
  2974. /* Subtract additional main header for fragmented firmware and
  2975. * advance to the first fragment
  2976. */
  2977. total_len -= TG3_FW_HDR_LEN;
  2978. fw_hdr++;
  2979. }
  2980. do {
  2981. u32 *fw_data = (u32 *)(fw_hdr + 1);
  2982. for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
  2983. write_op(tp, cpu_scratch_base +
  2984. (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
  2985. (i * sizeof(u32)),
  2986. be32_to_cpu(fw_data[i]));
  2987. total_len -= be32_to_cpu(fw_hdr->len);
  2988. /* Advance to next fragment */
  2989. fw_hdr = (struct tg3_firmware_hdr *)
  2990. ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
  2991. } while (total_len > 0);
  2992. err = 0;
  2993. out:
  2994. return err;
  2995. }
  2996. /* tp->lock is held. */
  2997. static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
  2998. {
  2999. int i;
  3000. const int iters = 5;
  3001. tw32(cpu_base + CPU_STATE, 0xffffffff);
  3002. tw32_f(cpu_base + CPU_PC, pc);
  3003. for (i = 0; i < iters; i++) {
  3004. if (tr32(cpu_base + CPU_PC) == pc)
  3005. break;
  3006. tw32(cpu_base + CPU_STATE, 0xffffffff);
  3007. tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
  3008. tw32_f(cpu_base + CPU_PC, pc);
  3009. udelay(1000);
  3010. }
  3011. return (i == iters) ? -EBUSY : 0;
  3012. }
  3013. /* tp->lock is held. */
  3014. static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
  3015. {
  3016. const struct tg3_firmware_hdr *fw_hdr;
  3017. int err;
  3018. fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
  3019. /* Firmware blob starts with version numbers, followed by
  3020. start address and length. We are setting complete length.
  3021. length = end_address_of_bss - start_address_of_text.
  3022. Remainder is the blob to be loaded contiguously
  3023. from start address. */
  3024. err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
  3025. RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
  3026. fw_hdr);
  3027. if (err)
  3028. return err;
  3029. err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
  3030. TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
  3031. fw_hdr);
  3032. if (err)
  3033. return err;
  3034. /* Now startup only the RX cpu. */
  3035. err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
  3036. be32_to_cpu(fw_hdr->base_addr));
  3037. if (err) {
  3038. netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
  3039. "should be %08x\n", __func__,
  3040. tr32(RX_CPU_BASE + CPU_PC),
  3041. be32_to_cpu(fw_hdr->base_addr));
  3042. return -ENODEV;
  3043. }
  3044. tg3_rxcpu_resume(tp);
  3045. return 0;
  3046. }
  3047. static int tg3_validate_rxcpu_state(struct tg3 *tp)
  3048. {
  3049. const int iters = 1000;
  3050. int i;
  3051. u32 val;
  3052. /* Wait for boot code to complete initialization and enter service
  3053. * loop. It is then safe to download service patches
  3054. */
  3055. for (i = 0; i < iters; i++) {
  3056. if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
  3057. break;
  3058. udelay(10);
  3059. }
  3060. if (i == iters) {
  3061. netdev_err(tp->dev, "Boot code not ready for service patches\n");
  3062. return -EBUSY;
  3063. }
  3064. val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
  3065. if (val & 0xff) {
  3066. netdev_warn(tp->dev,
  3067. "Other patches exist. Not downloading EEE patch\n");
  3068. return -EEXIST;
  3069. }
  3070. return 0;
  3071. }
  3072. /* tp->lock is held. */
  3073. static void tg3_load_57766_firmware(struct tg3 *tp)
  3074. {
  3075. struct tg3_firmware_hdr *fw_hdr;
  3076. if (!tg3_flag(tp, NO_NVRAM))
  3077. return;
  3078. if (tg3_validate_rxcpu_state(tp))
  3079. return;
  3080. if (!tp->fw)
  3081. return;
  3082. /* This firmware blob has a different format than older firmware
  3083. * releases as given below. The main difference is we have fragmented
  3084. * data to be written to non-contiguous locations.
  3085. *
  3086. * In the beginning we have a firmware header identical to other
  3087. * firmware which consists of version, base addr and length. The length
  3088. * here is unused and set to 0xffffffff.
  3089. *
  3090. * This is followed by a series of firmware fragments which are
  3091. * individually identical to previous firmware. i.e. they have the
  3092. * firmware header and followed by data for that fragment. The version
  3093. * field of the individual fragment header is unused.
  3094. */
  3095. fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
  3096. if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
  3097. return;
  3098. if (tg3_rxcpu_pause(tp))
  3099. return;
  3100. /* tg3_load_firmware_cpu() will always succeed for the 57766 */
  3101. tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
  3102. tg3_rxcpu_resume(tp);
  3103. }
  3104. /* tp->lock is held. */
  3105. static int tg3_load_tso_firmware(struct tg3 *tp)
  3106. {
  3107. const struct tg3_firmware_hdr *fw_hdr;
  3108. unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
  3109. int err;
  3110. if (!tg3_flag(tp, FW_TSO))
  3111. return 0;
  3112. fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
  3113. /* Firmware blob starts with version numbers, followed by
  3114. start address and length. We are setting complete length.
  3115. length = end_address_of_bss - start_address_of_text.
  3116. Remainder is the blob to be loaded contiguously
  3117. from start address. */
  3118. cpu_scratch_size = tp->fw_len;
  3119. if (tg3_asic_rev(tp) == ASIC_REV_5705) {
  3120. cpu_base = RX_CPU_BASE;
  3121. cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
  3122. } else {
  3123. cpu_base = TX_CPU_BASE;
  3124. cpu_scratch_base = TX_CPU_SCRATCH_BASE;
  3125. cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
  3126. }
  3127. err = tg3_load_firmware_cpu(tp, cpu_base,
  3128. cpu_scratch_base, cpu_scratch_size,
  3129. fw_hdr);
  3130. if (err)
  3131. return err;
  3132. /* Now startup the cpu. */
  3133. err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
  3134. be32_to_cpu(fw_hdr->base_addr));
  3135. if (err) {
  3136. netdev_err(tp->dev,
  3137. "%s fails to set CPU PC, is %08x should be %08x\n",
  3138. __func__, tr32(cpu_base + CPU_PC),
  3139. be32_to_cpu(fw_hdr->base_addr));
  3140. return -ENODEV;
  3141. }
  3142. tg3_resume_cpu(tp, cpu_base);
  3143. return 0;
  3144. }
  3145. /* tp->lock is held. */
  3146. static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
  3147. {
  3148. u32 addr_high, addr_low;
  3149. int i;
  3150. addr_high = ((tp->dev->dev_addr[0] << 8) |
  3151. tp->dev->dev_addr[1]);
  3152. addr_low = ((tp->dev->dev_addr[2] << 24) |
  3153. (tp->dev->dev_addr[3] << 16) |
  3154. (tp->dev->dev_addr[4] << 8) |
  3155. (tp->dev->dev_addr[5] << 0));
  3156. for (i = 0; i < 4; i++) {
  3157. if (i == 1 && skip_mac_1)
  3158. continue;
  3159. tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
  3160. tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
  3161. }
  3162. if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
  3163. tg3_asic_rev(tp) == ASIC_REV_5704) {
  3164. for (i = 0; i < 12; i++) {
  3165. tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
  3166. tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
  3167. }
  3168. }
  3169. addr_high = (tp->dev->dev_addr[0] +
  3170. tp->dev->dev_addr[1] +
  3171. tp->dev->dev_addr[2] +
  3172. tp->dev->dev_addr[3] +
  3173. tp->dev->dev_addr[4] +
  3174. tp->dev->dev_addr[5]) &
  3175. TX_BACKOFF_SEED_MASK;
  3176. tw32(MAC_TX_BACKOFF_SEED, addr_high);
  3177. }
  3178. static void tg3_enable_register_access(struct tg3 *tp)
  3179. {
  3180. /*
  3181. * Make sure register accesses (indirect or otherwise) will function
  3182. * correctly.
  3183. */
  3184. pci_write_config_dword(tp->pdev,
  3185. TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
  3186. }
  3187. static int tg3_power_up(struct tg3 *tp)
  3188. {
  3189. int err;
  3190. tg3_enable_register_access(tp);
  3191. err = pci_set_power_state(tp->pdev, PCI_D0);
  3192. if (!err) {
  3193. /* Switch out of Vaux if it is a NIC */
  3194. tg3_pwrsrc_switch_to_vmain(tp);
  3195. } else {
  3196. netdev_err(tp->dev, "Transition to D0 failed\n");
  3197. }
  3198. return err;
  3199. }
  3200. static int tg3_setup_phy(struct tg3 *, int);
  3201. static int tg3_power_down_prepare(struct tg3 *tp)
  3202. {
  3203. u32 misc_host_ctrl;
  3204. bool device_should_wake, do_low_power;
  3205. tg3_enable_register_access(tp);
  3206. /* Restore the CLKREQ setting. */
  3207. if (tg3_flag(tp, CLKREQ_BUG))
  3208. pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
  3209. PCI_EXP_LNKCTL_CLKREQ_EN);
  3210. misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
  3211. tw32(TG3PCI_MISC_HOST_CTRL,
  3212. misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
  3213. device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
  3214. tg3_flag(tp, WOL_ENABLE);
  3215. if (tg3_flag(tp, USE_PHYLIB)) {
  3216. do_low_power = false;
  3217. if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
  3218. !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
  3219. struct phy_device *phydev;
  3220. u32 phyid, advertising;
  3221. phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
  3222. tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
  3223. tp->link_config.speed = phydev->speed;
  3224. tp->link_config.duplex = phydev->duplex;
  3225. tp->link_config.autoneg = phydev->autoneg;
  3226. tp->link_config.advertising = phydev->advertising;
  3227. advertising = ADVERTISED_TP |
  3228. ADVERTISED_Pause |
  3229. ADVERTISED_Autoneg |
  3230. ADVERTISED_10baseT_Half;
  3231. if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
  3232. if (tg3_flag(tp, WOL_SPEED_100MB))
  3233. advertising |=
  3234. ADVERTISED_100baseT_Half |
  3235. ADVERTISED_100baseT_Full |
  3236. ADVERTISED_10baseT_Full;
  3237. else
  3238. advertising |= ADVERTISED_10baseT_Full;
  3239. }
  3240. phydev->advertising = advertising;
  3241. phy_start_aneg(phydev);
  3242. phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
  3243. if (phyid != PHY_ID_BCMAC131) {
  3244. phyid &= PHY_BCM_OUI_MASK;
  3245. if (phyid == PHY_BCM_OUI_1 ||
  3246. phyid == PHY_BCM_OUI_2 ||
  3247. phyid == PHY_BCM_OUI_3)
  3248. do_low_power = true;
  3249. }
  3250. }
  3251. } else {
  3252. do_low_power = true;
  3253. if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
  3254. tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
  3255. if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
  3256. tg3_setup_phy(tp, 0);
  3257. }
  3258. if (tg3_asic_rev(tp) == ASIC_REV_5906) {
  3259. u32 val;
  3260. val = tr32(GRC_VCPU_EXT_CTRL);
  3261. tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
  3262. } else if (!tg3_flag(tp, ENABLE_ASF)) {
  3263. int i;
  3264. u32 val;
  3265. for (i = 0; i < 200; i++) {
  3266. tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
  3267. if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
  3268. break;
  3269. msleep(1);
  3270. }
  3271. }
  3272. if (tg3_flag(tp, WOL_CAP))
  3273. tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
  3274. WOL_DRV_STATE_SHUTDOWN |
  3275. WOL_DRV_WOL |
  3276. WOL_SET_MAGIC_PKT);
  3277. if (device_should_wake) {
  3278. u32 mac_mode;
  3279. if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
  3280. if (do_low_power &&
  3281. !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
  3282. tg3_phy_auxctl_write(tp,
  3283. MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
  3284. MII_TG3_AUXCTL_PCTL_WOL_EN |
  3285. MII_TG3_AUXCTL_PCTL_100TX_LPWR |
  3286. MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
  3287. udelay(40);
  3288. }
  3289. if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
  3290. mac_mode = MAC_MODE_PORT_MODE_GMII;
  3291. else if (tp->phy_flags &
  3292. TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
  3293. if (tp->link_config.active_speed == SPEED_1000)
  3294. mac_mode = MAC_MODE_PORT_MODE_GMII;
  3295. else
  3296. mac_mode = MAC_MODE_PORT_MODE_MII;
  3297. } else
  3298. mac_mode = MAC_MODE_PORT_MODE_MII;
  3299. mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
  3300. if (tg3_asic_rev(tp) == ASIC_REV_5700) {
  3301. u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
  3302. SPEED_100 : SPEED_10;
  3303. if (tg3_5700_link_polarity(tp, speed))
  3304. mac_mode |= MAC_MODE_LINK_POLARITY;
  3305. else
  3306. mac_mode &= ~MAC_MODE_LINK_POLARITY;
  3307. }
  3308. } else {
  3309. mac_mode = MAC_MODE_PORT_MODE_TBI;
  3310. }
  3311. if (!tg3_flag(tp, 5750_PLUS))
  3312. tw32(MAC_LED_CTRL, tp->led_ctrl);
  3313. mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
  3314. if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
  3315. (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
  3316. mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
  3317. if (tg3_flag(tp, ENABLE_APE))
  3318. mac_mode |= MAC_MODE_APE_TX_EN |
  3319. MAC_MODE_APE_RX_EN |
  3320. MAC_MODE_TDE_ENABLE;
  3321. tw32_f(MAC_MODE, mac_mode);
  3322. udelay(100);
  3323. tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
  3324. udelay(10);
  3325. }
  3326. if (!tg3_flag(tp, WOL_SPEED_100MB) &&
  3327. (tg3_asic_rev(tp) == ASIC_REV_5700 ||
  3328. tg3_asic_rev(tp) == ASIC_REV_5701)) {
  3329. u32 base_val;
  3330. base_val = tp->pci_clock_ctrl;
  3331. base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
  3332. CLOCK_CTRL_TXCLK_DISABLE);
  3333. tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
  3334. CLOCK_CTRL_PWRDOWN_PLL133, 40);
  3335. } else if (tg3_flag(tp, 5780_CLASS) ||
  3336. tg3_flag(tp, CPMU_PRESENT) ||
  3337. tg3_asic_rev(tp) == ASIC_REV_5906) {
  3338. /* do nothing */
  3339. } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
  3340. u32 newbits1, newbits2;
  3341. if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
  3342. tg3_asic_rev(tp) == ASIC_REV_5701) {
  3343. newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
  3344. CLOCK_CTRL_TXCLK_DISABLE |
  3345. CLOCK_CTRL_ALTCLK);
  3346. newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
  3347. } else if (tg3_flag(tp, 5705_PLUS)) {
  3348. newbits1 = CLOCK_CTRL_625_CORE;
  3349. newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
  3350. } else {
  3351. newbits1 = CLOCK_CTRL_ALTCLK;
  3352. newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
  3353. }
  3354. tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
  3355. 40);
  3356. tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
  3357. 40);
  3358. if (!tg3_flag(tp, 5705_PLUS)) {
  3359. u32 newbits3;
  3360. if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
  3361. tg3_asic_rev(tp) == ASIC_REV_5701) {
  3362. newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
  3363. CLOCK_CTRL_TXCLK_DISABLE |
  3364. CLOCK_CTRL_44MHZ_CORE);
  3365. } else {
  3366. newbits3 = CLOCK_CTRL_44MHZ_CORE;
  3367. }
  3368. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  3369. tp->pci_clock_ctrl | newbits3, 40);
  3370. }
  3371. }
  3372. if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
  3373. tg3_power_down_phy(tp, do_low_power);
  3374. tg3_frob_aux_power(tp, true);
  3375. /* Workaround for unstable PLL clock */
  3376. if ((!tg3_flag(tp, IS_SSB_CORE)) &&
  3377. ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
  3378. (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
  3379. u32 val = tr32(0x7d00);
  3380. val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
  3381. tw32(0x7d00, val);
  3382. if (!tg3_flag(tp, ENABLE_ASF)) {
  3383. int err;
  3384. err = tg3_nvram_lock(tp);
  3385. tg3_halt_cpu(tp, RX_CPU_BASE);
  3386. if (!err)
  3387. tg3_nvram_unlock(tp);
  3388. }
  3389. }
  3390. tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
  3391. return 0;
  3392. }
  3393. static void tg3_power_down(struct tg3 *tp)
  3394. {
  3395. tg3_power_down_prepare(tp);
  3396. pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
  3397. pci_set_power_state(tp->pdev, PCI_D3hot);
  3398. }
  3399. static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
  3400. {
  3401. switch (val & MII_TG3_AUX_STAT_SPDMASK) {
  3402. case MII_TG3_AUX_STAT_10HALF:
  3403. *speed = SPEED_10;
  3404. *duplex = DUPLEX_HALF;
  3405. break;
  3406. case MII_TG3_AUX_STAT_10FULL:
  3407. *speed = SPEED_10;
  3408. *duplex = DUPLEX_FULL;
  3409. break;
  3410. case MII_TG3_AUX_STAT_100HALF:
  3411. *speed = SPEED_100;
  3412. *duplex = DUPLEX_HALF;
  3413. break;
  3414. case MII_TG3_AUX_STAT_100FULL:
  3415. *speed = SPEED_100;
  3416. *duplex = DUPLEX_FULL;
  3417. break;
  3418. case MII_TG3_AUX_STAT_1000HALF:
  3419. *speed = SPEED_1000;
  3420. *duplex = DUPLEX_HALF;
  3421. break;
  3422. case MII_TG3_AUX_STAT_1000FULL:
  3423. *speed = SPEED_1000;
  3424. *duplex = DUPLEX_FULL;
  3425. break;
  3426. default:
  3427. if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
  3428. *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
  3429. SPEED_10;
  3430. *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
  3431. DUPLEX_HALF;
  3432. break;
  3433. }
  3434. *speed = SPEED_UNKNOWN;
  3435. *duplex = DUPLEX_UNKNOWN;
  3436. break;
  3437. }
  3438. }
  3439. static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
  3440. {
  3441. int err = 0;
  3442. u32 val, new_adv;
  3443. new_adv = ADVERTISE_CSMA;
  3444. new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
  3445. new_adv |= mii_advertise_flowctrl(flowctrl);
  3446. err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
  3447. if (err)
  3448. goto done;
  3449. if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
  3450. new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
  3451. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
  3452. tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
  3453. new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
  3454. err = tg3_writephy(tp, MII_CTRL1000, new_adv);
  3455. if (err)
  3456. goto done;
  3457. }
  3458. if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
  3459. goto done;
  3460. tw32(TG3_CPMU_EEE_MODE,
  3461. tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
  3462. err = tg3_phy_toggle_auxctl_smdsp(tp, true);
  3463. if (!err) {
  3464. u32 err2;
  3465. val = 0;
  3466. /* Advertise 100-BaseTX EEE ability */
  3467. if (advertise & ADVERTISED_100baseT_Full)
  3468. val |= MDIO_AN_EEE_ADV_100TX;
  3469. /* Advertise 1000-BaseT EEE ability */
  3470. if (advertise & ADVERTISED_1000baseT_Full)
  3471. val |= MDIO_AN_EEE_ADV_1000T;
  3472. err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
  3473. if (err)
  3474. val = 0;
  3475. switch (tg3_asic_rev(tp)) {
  3476. case ASIC_REV_5717:
  3477. case ASIC_REV_57765:
  3478. case ASIC_REV_57766:
  3479. case ASIC_REV_5719:
  3480. /* If we advertised any eee advertisements above... */
  3481. if (val)
  3482. val = MII_TG3_DSP_TAP26_ALNOKO |
  3483. MII_TG3_DSP_TAP26_RMRXSTO |
  3484. MII_TG3_DSP_TAP26_OPCSINPT;
  3485. tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
  3486. /* Fall through */
  3487. case ASIC_REV_5720:
  3488. case ASIC_REV_5762:
  3489. if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
  3490. tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
  3491. MII_TG3_DSP_CH34TP2_HIBW01);
  3492. }
  3493. err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
  3494. if (!err)
  3495. err = err2;
  3496. }
  3497. done:
  3498. return err;
  3499. }
  3500. static void tg3_phy_copper_begin(struct tg3 *tp)
  3501. {
  3502. if (tp->link_config.autoneg == AUTONEG_ENABLE ||
  3503. (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
  3504. u32 adv, fc;
  3505. if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
  3506. !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
  3507. adv = ADVERTISED_10baseT_Half |
  3508. ADVERTISED_10baseT_Full;
  3509. if (tg3_flag(tp, WOL_SPEED_100MB))
  3510. adv |= ADVERTISED_100baseT_Half |
  3511. ADVERTISED_100baseT_Full;
  3512. if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
  3513. adv |= ADVERTISED_1000baseT_Half |
  3514. ADVERTISED_1000baseT_Full;
  3515. fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
  3516. } else {
  3517. adv = tp->link_config.advertising;
  3518. if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
  3519. adv &= ~(ADVERTISED_1000baseT_Half |
  3520. ADVERTISED_1000baseT_Full);
  3521. fc = tp->link_config.flowctrl;
  3522. }
  3523. tg3_phy_autoneg_cfg(tp, adv, fc);
  3524. if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
  3525. (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
  3526. /* Normally during power down we want to autonegotiate
  3527. * the lowest possible speed for WOL. However, to avoid
  3528. * link flap, we leave it untouched.
  3529. */
  3530. return;
  3531. }
  3532. tg3_writephy(tp, MII_BMCR,
  3533. BMCR_ANENABLE | BMCR_ANRESTART);
  3534. } else {
  3535. int i;
  3536. u32 bmcr, orig_bmcr;
  3537. tp->link_config.active_speed = tp->link_config.speed;
  3538. tp->link_config.active_duplex = tp->link_config.duplex;
  3539. if (tg3_asic_rev(tp) == ASIC_REV_5714) {
  3540. /* With autoneg disabled, 5715 only links up when the
  3541. * advertisement register has the configured speed
  3542. * enabled.
  3543. */
  3544. tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
  3545. }
  3546. bmcr = 0;
  3547. switch (tp->link_config.speed) {
  3548. default:
  3549. case SPEED_10:
  3550. break;
  3551. case SPEED_100:
  3552. bmcr |= BMCR_SPEED100;
  3553. break;
  3554. case SPEED_1000:
  3555. bmcr |= BMCR_SPEED1000;
  3556. break;
  3557. }
  3558. if (tp->link_config.duplex == DUPLEX_FULL)
  3559. bmcr |= BMCR_FULLDPLX;
  3560. if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
  3561. (bmcr != orig_bmcr)) {
  3562. tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
  3563. for (i = 0; i < 1500; i++) {
  3564. u32 tmp;
  3565. udelay(10);
  3566. if (tg3_readphy(tp, MII_BMSR, &tmp) ||
  3567. tg3_readphy(tp, MII_BMSR, &tmp))
  3568. continue;
  3569. if (!(tmp & BMSR_LSTATUS)) {
  3570. udelay(40);
  3571. break;
  3572. }
  3573. }
  3574. tg3_writephy(tp, MII_BMCR, bmcr);
  3575. udelay(40);
  3576. }
  3577. }
  3578. }
  3579. static int tg3_phy_pull_config(struct tg3 *tp)
  3580. {
  3581. int err;
  3582. u32 val;
  3583. err = tg3_readphy(tp, MII_BMCR, &val);
  3584. if (err)
  3585. goto done;
  3586. if (!(val & BMCR_ANENABLE)) {
  3587. tp->link_config.autoneg = AUTONEG_DISABLE;
  3588. tp->link_config.advertising = 0;
  3589. tg3_flag_clear(tp, PAUSE_AUTONEG);
  3590. err = -EIO;
  3591. switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
  3592. case 0:
  3593. if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
  3594. goto done;
  3595. tp->link_config.speed = SPEED_10;
  3596. break;
  3597. case BMCR_SPEED100:
  3598. if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
  3599. goto done;
  3600. tp->link_config.speed = SPEED_100;
  3601. break;
  3602. case BMCR_SPEED1000:
  3603. if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
  3604. tp->link_config.speed = SPEED_1000;
  3605. break;
  3606. }
  3607. /* Fall through */
  3608. default:
  3609. goto done;
  3610. }
  3611. if (val & BMCR_FULLDPLX)
  3612. tp->link_config.duplex = DUPLEX_FULL;
  3613. else
  3614. tp->link_config.duplex = DUPLEX_HALF;
  3615. tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
  3616. err = 0;
  3617. goto done;
  3618. }
  3619. tp->link_config.autoneg = AUTONEG_ENABLE;
  3620. tp->link_config.advertising = ADVERTISED_Autoneg;
  3621. tg3_flag_set(tp, PAUSE_AUTONEG);
  3622. if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
  3623. u32 adv;
  3624. err = tg3_readphy(tp, MII_ADVERTISE, &val);
  3625. if (err)
  3626. goto done;
  3627. adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
  3628. tp->link_config.advertising |= adv | ADVERTISED_TP;
  3629. tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
  3630. } else {
  3631. tp->link_config.advertising |= ADVERTISED_FIBRE;
  3632. }
  3633. if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
  3634. u32 adv;
  3635. if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
  3636. err = tg3_readphy(tp, MII_CTRL1000, &val);
  3637. if (err)
  3638. goto done;
  3639. adv = mii_ctrl1000_to_ethtool_adv_t(val);
  3640. } else {
  3641. err = tg3_readphy(tp, MII_ADVERTISE, &val);
  3642. if (err)
  3643. goto done;
  3644. adv = tg3_decode_flowctrl_1000X(val);
  3645. tp->link_config.flowctrl = adv;
  3646. val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
  3647. adv = mii_adv_to_ethtool_adv_x(val);
  3648. }
  3649. tp->link_config.advertising |= adv;
  3650. }
  3651. done:
  3652. return err;
  3653. }
  3654. static int tg3_init_5401phy_dsp(struct tg3 *tp)
  3655. {
  3656. int err;
  3657. /* Turn off tap power management. */
  3658. /* Set Extended packet length bit */
  3659. err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
  3660. err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
  3661. err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
  3662. err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
  3663. err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
  3664. err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
  3665. udelay(40);
  3666. return err;
  3667. }
  3668. static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
  3669. {
  3670. u32 advmsk, tgtadv, advertising;
  3671. advertising = tp->link_config.advertising;
  3672. tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
  3673. advmsk = ADVERTISE_ALL;
  3674. if (tp->link_config.active_duplex == DUPLEX_FULL) {
  3675. tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
  3676. advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  3677. }
  3678. if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
  3679. return false;
  3680. if ((*lcladv & advmsk) != tgtadv)
  3681. return false;
  3682. if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
  3683. u32 tg3_ctrl;
  3684. tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
  3685. if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
  3686. return false;
  3687. if (tgtadv &&
  3688. (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
  3689. tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
  3690. tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
  3691. tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
  3692. CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
  3693. } else {
  3694. tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
  3695. }
  3696. if (tg3_ctrl != tgtadv)
  3697. return false;
  3698. }
  3699. return true;
  3700. }
  3701. static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
  3702. {
  3703. u32 lpeth = 0;
  3704. if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
  3705. u32 val;
  3706. if (tg3_readphy(tp, MII_STAT1000, &val))
  3707. return false;
  3708. lpeth = mii_stat1000_to_ethtool_lpa_t(val);
  3709. }
  3710. if (tg3_readphy(tp, MII_LPA, rmtadv))
  3711. return false;
  3712. lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
  3713. tp->link_config.rmt_adv = lpeth;
  3714. return true;
  3715. }
  3716. static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
  3717. {
  3718. if (curr_link_up != tp->link_up) {
  3719. if (curr_link_up) {
  3720. netif_carrier_on(tp->dev);
  3721. } else {
  3722. netif_carrier_off(tp->dev);
  3723. if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
  3724. tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
  3725. }
  3726. tg3_link_report(tp);
  3727. return true;
  3728. }
  3729. return false;
  3730. }
  3731. static void tg3_clear_mac_status(struct tg3 *tp)
  3732. {
  3733. tw32(MAC_EVENT, 0);
  3734. tw32_f(MAC_STATUS,
  3735. MAC_STATUS_SYNC_CHANGED |
  3736. MAC_STATUS_CFG_CHANGED |
  3737. MAC_STATUS_MI_COMPLETION |
  3738. MAC_STATUS_LNKSTATE_CHANGED);
  3739. udelay(40);
  3740. }
  3741. static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
  3742. {
  3743. int current_link_up;
  3744. u32 bmsr, val;
  3745. u32 lcl_adv, rmt_adv;
  3746. u16 current_speed;
  3747. u8 current_duplex;
  3748. int i, err;
  3749. tg3_clear_mac_status(tp);
  3750. if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
  3751. tw32_f(MAC_MI_MODE,
  3752. (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
  3753. udelay(80);
  3754. }
  3755. tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
  3756. /* Some third-party PHYs need to be reset on link going
  3757. * down.
  3758. */
  3759. if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
  3760. tg3_asic_rev(tp) == ASIC_REV_5704 ||
  3761. tg3_asic_rev(tp) == ASIC_REV_5705) &&
  3762. tp->link_up) {
  3763. tg3_readphy(tp, MII_BMSR, &bmsr);
  3764. if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
  3765. !(bmsr & BMSR_LSTATUS))
  3766. force_reset = 1;
  3767. }
  3768. if (force_reset)
  3769. tg3_phy_reset(tp);
  3770. if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
  3771. tg3_readphy(tp, MII_BMSR, &bmsr);
  3772. if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
  3773. !tg3_flag(tp, INIT_COMPLETE))
  3774. bmsr = 0;
  3775. if (!(bmsr & BMSR_LSTATUS)) {
  3776. err = tg3_init_5401phy_dsp(tp);
  3777. if (err)
  3778. return err;
  3779. tg3_readphy(tp, MII_BMSR, &bmsr);
  3780. for (i = 0; i < 1000; i++) {
  3781. udelay(10);
  3782. if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
  3783. (bmsr & BMSR_LSTATUS)) {
  3784. udelay(40);
  3785. break;
  3786. }
  3787. }
  3788. if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
  3789. TG3_PHY_REV_BCM5401_B0 &&
  3790. !(bmsr & BMSR_LSTATUS) &&
  3791. tp->link_config.active_speed == SPEED_1000) {
  3792. err = tg3_phy_reset(tp);
  3793. if (!err)
  3794. err = tg3_init_5401phy_dsp(tp);
  3795. if (err)
  3796. return err;
  3797. }
  3798. }
  3799. } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
  3800. tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
  3801. /* 5701 {A0,B0} CRC bug workaround */
  3802. tg3_writephy(tp, 0x15, 0x0a75);
  3803. tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
  3804. tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
  3805. tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
  3806. }
  3807. /* Clear pending interrupts... */
  3808. tg3_readphy(tp, MII_TG3_ISTAT, &val);
  3809. tg3_readphy(tp, MII_TG3_ISTAT, &val);
  3810. if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
  3811. tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
  3812. else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
  3813. tg3_writephy(tp, MII_TG3_IMASK, ~0);
  3814. if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
  3815. tg3_asic_rev(tp) == ASIC_REV_5701) {
  3816. if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
  3817. tg3_writephy(tp, MII_TG3_EXT_CTRL,
  3818. MII_TG3_EXT_CTRL_LNK3_LED_MODE);
  3819. else
  3820. tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
  3821. }
  3822. current_link_up = 0;
  3823. current_speed = SPEED_UNKNOWN;
  3824. current_duplex = DUPLEX_UNKNOWN;
  3825. tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
  3826. tp->link_config.rmt_adv = 0;
  3827. if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
  3828. err = tg3_phy_auxctl_read(tp,
  3829. MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
  3830. &val);
  3831. if (!err && !(val & (1 << 10))) {
  3832. tg3_phy_auxctl_write(tp,
  3833. MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
  3834. val | (1 << 10));
  3835. goto relink;
  3836. }
  3837. }
  3838. bmsr = 0;
  3839. for (i = 0; i < 100; i++) {
  3840. tg3_readphy(tp, MII_BMSR, &bmsr);
  3841. if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
  3842. (bmsr & BMSR_LSTATUS))
  3843. break;
  3844. udelay(40);
  3845. }
  3846. if (bmsr & BMSR_LSTATUS) {
  3847. u32 aux_stat, bmcr;
  3848. tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
  3849. for (i = 0; i < 2000; i++) {
  3850. udelay(10);
  3851. if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
  3852. aux_stat)
  3853. break;
  3854. }
  3855. tg3_aux_stat_to_speed_duplex(tp, aux_stat,
  3856. &current_speed,
  3857. &current_duplex);
  3858. bmcr = 0;
  3859. for (i = 0; i < 200; i++) {
  3860. tg3_readphy(tp, MII_BMCR, &bmcr);
  3861. if (tg3_readphy(tp, MII_BMCR, &bmcr))
  3862. continue;
  3863. if (bmcr && bmcr != 0x7fff)
  3864. break;
  3865. udelay(10);
  3866. }
  3867. lcl_adv = 0;
  3868. rmt_adv = 0;
  3869. tp->link_config.active_speed = current_speed;
  3870. tp->link_config.active_duplex = current_duplex;
  3871. if (tp->link_config.autoneg == AUTONEG_ENABLE) {
  3872. if ((bmcr & BMCR_ANENABLE) &&
  3873. tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
  3874. tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
  3875. current_link_up = 1;
  3876. } else {
  3877. if (!(bmcr & BMCR_ANENABLE) &&
  3878. tp->link_config.speed == current_speed &&
  3879. tp->link_config.duplex == current_duplex) {
  3880. current_link_up = 1;
  3881. }
  3882. }
  3883. if (current_link_up == 1 &&
  3884. tp->link_config.active_duplex == DUPLEX_FULL) {
  3885. u32 reg, bit;
  3886. if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
  3887. reg = MII_TG3_FET_GEN_STAT;
  3888. bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
  3889. } else {
  3890. reg = MII_TG3_EXT_STAT;
  3891. bit = MII_TG3_EXT_STAT_MDIX;
  3892. }
  3893. if (!tg3_readphy(tp, reg, &val) && (val & bit))
  3894. tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
  3895. tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
  3896. }
  3897. }
  3898. relink:
  3899. if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
  3900. tg3_phy_copper_begin(tp);
  3901. if (tg3_flag(tp, ROBOSWITCH)) {
  3902. current_link_up = 1;
  3903. /* FIXME: when BCM5325 switch is used use 100 MBit/s */
  3904. current_speed = SPEED_1000;
  3905. current_duplex = DUPLEX_FULL;
  3906. tp->link_config.active_speed = current_speed;
  3907. tp->link_config.active_duplex = current_duplex;
  3908. }
  3909. tg3_readphy(tp, MII_BMSR, &bmsr);
  3910. if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
  3911. (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
  3912. current_link_up = 1;
  3913. }
  3914. tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
  3915. if (current_link_up == 1) {
  3916. if (tp->link_config.active_speed == SPEED_100 ||
  3917. tp->link_config.active_speed == SPEED_10)
  3918. tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
  3919. else
  3920. tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
  3921. } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
  3922. tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
  3923. else
  3924. tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
  3925. /* In order for the 5750 core in BCM4785 chip to work properly
  3926. * in RGMII mode, the Led Control Register must be set up.
  3927. */
  3928. if (tg3_flag(tp, RGMII_MODE)) {
  3929. u32 led_ctrl = tr32(MAC_LED_CTRL);
  3930. led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
  3931. if (tp->link_config.active_speed == SPEED_10)
  3932. led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
  3933. else if (tp->link_config.active_speed == SPEED_100)
  3934. led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
  3935. LED_CTRL_100MBPS_ON);
  3936. else if (tp->link_config.active_speed == SPEED_1000)
  3937. led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
  3938. LED_CTRL_1000MBPS_ON);
  3939. tw32(MAC_LED_CTRL, led_ctrl);
  3940. udelay(40);
  3941. }
  3942. tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
  3943. if (tp->link_config.active_duplex == DUPLEX_HALF)
  3944. tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
  3945. if (tg3_asic_rev(tp) == ASIC_REV_5700) {
  3946. if (current_link_up == 1 &&
  3947. tg3_5700_link_polarity(tp, tp->link_config.active_speed))
  3948. tp->mac_mode |= MAC_MODE_LINK_POLARITY;
  3949. else
  3950. tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
  3951. }
  3952. /* ??? Without this setting Netgear GA302T PHY does not
  3953. * ??? send/receive packets...
  3954. */
  3955. if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
  3956. tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
  3957. tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
  3958. tw32_f(MAC_MI_MODE, tp->mi_mode);
  3959. udelay(80);
  3960. }
  3961. tw32_f(MAC_MODE, tp->mac_mode);
  3962. udelay(40);
  3963. tg3_phy_eee_adjust(tp, current_link_up);
  3964. if (tg3_flag(tp, USE_LINKCHG_REG)) {
  3965. /* Polled via timer. */
  3966. tw32_f(MAC_EVENT, 0);
  3967. } else {
  3968. tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
  3969. }
  3970. udelay(40);
  3971. if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
  3972. current_link_up == 1 &&
  3973. tp->link_config.active_speed == SPEED_1000 &&
  3974. (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
  3975. udelay(120);
  3976. tw32_f(MAC_STATUS,
  3977. (MAC_STATUS_SYNC_CHANGED |
  3978. MAC_STATUS_CFG_CHANGED));
  3979. udelay(40);
  3980. tg3_write_mem(tp,
  3981. NIC_SRAM_FIRMWARE_MBOX,
  3982. NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
  3983. }
  3984. /* Prevent send BD corruption. */
  3985. if (tg3_flag(tp, CLKREQ_BUG)) {
  3986. if (tp->link_config.active_speed == SPEED_100 ||
  3987. tp->link_config.active_speed == SPEED_10)
  3988. pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
  3989. PCI_EXP_LNKCTL_CLKREQ_EN);
  3990. else
  3991. pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
  3992. PCI_EXP_LNKCTL_CLKREQ_EN);
  3993. }
  3994. tg3_test_and_report_link_chg(tp, current_link_up);
  3995. return 0;
  3996. }
  3997. struct tg3_fiber_aneginfo {
  3998. int state;
  3999. #define ANEG_STATE_UNKNOWN 0
  4000. #define ANEG_STATE_AN_ENABLE 1
  4001. #define ANEG_STATE_RESTART_INIT 2
  4002. #define ANEG_STATE_RESTART 3
  4003. #define ANEG_STATE_DISABLE_LINK_OK 4
  4004. #define ANEG_STATE_ABILITY_DETECT_INIT 5
  4005. #define ANEG_STATE_ABILITY_DETECT 6
  4006. #define ANEG_STATE_ACK_DETECT_INIT 7
  4007. #define ANEG_STATE_ACK_DETECT 8
  4008. #define ANEG_STATE_COMPLETE_ACK_INIT 9
  4009. #define ANEG_STATE_COMPLETE_ACK 10
  4010. #define ANEG_STATE_IDLE_DETECT_INIT 11
  4011. #define ANEG_STATE_IDLE_DETECT 12
  4012. #define ANEG_STATE_LINK_OK 13
  4013. #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
  4014. #define ANEG_STATE_NEXT_PAGE_WAIT 15
  4015. u32 flags;
  4016. #define MR_AN_ENABLE 0x00000001
  4017. #define MR_RESTART_AN 0x00000002
  4018. #define MR_AN_COMPLETE 0x00000004
  4019. #define MR_PAGE_RX 0x00000008
  4020. #define MR_NP_LOADED 0x00000010
  4021. #define MR_TOGGLE_TX 0x00000020
  4022. #define MR_LP_ADV_FULL_DUPLEX 0x00000040
  4023. #define MR_LP_ADV_HALF_DUPLEX 0x00000080
  4024. #define MR_LP_ADV_SYM_PAUSE 0x00000100
  4025. #define MR_LP_ADV_ASYM_PAUSE 0x00000200
  4026. #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
  4027. #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
  4028. #define MR_LP_ADV_NEXT_PAGE 0x00001000
  4029. #define MR_TOGGLE_RX 0x00002000
  4030. #define MR_NP_RX 0x00004000
  4031. #define MR_LINK_OK 0x80000000
  4032. unsigned long link_time, cur_time;
  4033. u32 ability_match_cfg;
  4034. int ability_match_count;
  4035. char ability_match, idle_match, ack_match;
  4036. u32 txconfig, rxconfig;
  4037. #define ANEG_CFG_NP 0x00000080
  4038. #define ANEG_CFG_ACK 0x00000040
  4039. #define ANEG_CFG_RF2 0x00000020
  4040. #define ANEG_CFG_RF1 0x00000010
  4041. #define ANEG_CFG_PS2 0x00000001
  4042. #define ANEG_CFG_PS1 0x00008000
  4043. #define ANEG_CFG_HD 0x00004000
  4044. #define ANEG_CFG_FD 0x00002000
  4045. #define ANEG_CFG_INVAL 0x00001f06
  4046. };
  4047. #define ANEG_OK 0
  4048. #define ANEG_DONE 1
  4049. #define ANEG_TIMER_ENAB 2
  4050. #define ANEG_FAILED -1
  4051. #define ANEG_STATE_SETTLE_TIME 10000
  4052. static int tg3_fiber_aneg_smachine(struct tg3 *tp,
  4053. struct tg3_fiber_aneginfo *ap)
  4054. {
  4055. u16 flowctrl;
  4056. unsigned long delta;
  4057. u32 rx_cfg_reg;
  4058. int ret;
  4059. if (ap->state == ANEG_STATE_UNKNOWN) {
  4060. ap->rxconfig = 0;
  4061. ap->link_time = 0;
  4062. ap->cur_time = 0;
  4063. ap->ability_match_cfg = 0;
  4064. ap->ability_match_count = 0;
  4065. ap->ability_match = 0;
  4066. ap->idle_match = 0;
  4067. ap->ack_match = 0;
  4068. }
  4069. ap->cur_time++;
  4070. if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
  4071. rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
  4072. if (rx_cfg_reg != ap->ability_match_cfg) {
  4073. ap->ability_match_cfg = rx_cfg_reg;
  4074. ap->ability_match = 0;
  4075. ap->ability_match_count = 0;
  4076. } else {
  4077. if (++ap->ability_match_count > 1) {
  4078. ap->ability_match = 1;
  4079. ap->ability_match_cfg = rx_cfg_reg;
  4080. }
  4081. }
  4082. if (rx_cfg_reg & ANEG_CFG_ACK)
  4083. ap->ack_match = 1;
  4084. else
  4085. ap->ack_match = 0;
  4086. ap->idle_match = 0;
  4087. } else {
  4088. ap->idle_match = 1;
  4089. ap->ability_match_cfg = 0;
  4090. ap->ability_match_count = 0;
  4091. ap->ability_match = 0;
  4092. ap->ack_match = 0;
  4093. rx_cfg_reg = 0;
  4094. }
  4095. ap->rxconfig = rx_cfg_reg;
  4096. ret = ANEG_OK;
  4097. switch (ap->state) {
  4098. case ANEG_STATE_UNKNOWN:
  4099. if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
  4100. ap->state = ANEG_STATE_AN_ENABLE;
  4101. /* fallthru */
  4102. case ANEG_STATE_AN_ENABLE:
  4103. ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
  4104. if (ap->flags & MR_AN_ENABLE) {
  4105. ap->link_time = 0;
  4106. ap->cur_time = 0;
  4107. ap->ability_match_cfg = 0;
  4108. ap->ability_match_count = 0;
  4109. ap->ability_match = 0;
  4110. ap->idle_match = 0;
  4111. ap->ack_match = 0;
  4112. ap->state = ANEG_STATE_RESTART_INIT;
  4113. } else {
  4114. ap->state = ANEG_STATE_DISABLE_LINK_OK;
  4115. }
  4116. break;
  4117. case ANEG_STATE_RESTART_INIT:
  4118. ap->link_time = ap->cur_time;
  4119. ap->flags &= ~(MR_NP_LOADED);
  4120. ap->txconfig = 0;
  4121. tw32(MAC_TX_AUTO_NEG, 0);
  4122. tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
  4123. tw32_f(MAC_MODE, tp->mac_mode);
  4124. udelay(40);
  4125. ret = ANEG_TIMER_ENAB;
  4126. ap->state = ANEG_STATE_RESTART;
  4127. /* fallthru */
  4128. case ANEG_STATE_RESTART:
  4129. delta = ap->cur_time - ap->link_time;
  4130. if (delta > ANEG_STATE_SETTLE_TIME)
  4131. ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
  4132. else
  4133. ret = ANEG_TIMER_ENAB;
  4134. break;
  4135. case ANEG_STATE_DISABLE_LINK_OK:
  4136. ret = ANEG_DONE;
  4137. break;
  4138. case ANEG_STATE_ABILITY_DETECT_INIT:
  4139. ap->flags &= ~(MR_TOGGLE_TX);
  4140. ap->txconfig = ANEG_CFG_FD;
  4141. flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
  4142. if (flowctrl & ADVERTISE_1000XPAUSE)
  4143. ap->txconfig |= ANEG_CFG_PS1;
  4144. if (flowctrl & ADVERTISE_1000XPSE_ASYM)
  4145. ap->txconfig |= ANEG_CFG_PS2;
  4146. tw32(MAC_TX_AUTO_NEG, ap->txconfig);
  4147. tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
  4148. tw32_f(MAC_MODE, tp->mac_mode);
  4149. udelay(40);
  4150. ap->state = ANEG_STATE_ABILITY_DETECT;
  4151. break;
  4152. case ANEG_STATE_ABILITY_DETECT:
  4153. if (ap->ability_match != 0 && ap->rxconfig != 0)
  4154. ap->state = ANEG_STATE_ACK_DETECT_INIT;
  4155. break;
  4156. case ANEG_STATE_ACK_DETECT_INIT:
  4157. ap->txconfig |= ANEG_CFG_ACK;
  4158. tw32(MAC_TX_AUTO_NEG, ap->txconfig);
  4159. tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
  4160. tw32_f(MAC_MODE, tp->mac_mode);
  4161. udelay(40);
  4162. ap->state = ANEG_STATE_ACK_DETECT;
  4163. /* fallthru */
  4164. case ANEG_STATE_ACK_DETECT:
  4165. if (ap->ack_match != 0) {
  4166. if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
  4167. (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
  4168. ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
  4169. } else {
  4170. ap->state = ANEG_STATE_AN_ENABLE;
  4171. }
  4172. } else if (ap->ability_match != 0 &&
  4173. ap->rxconfig == 0) {
  4174. ap->state = ANEG_STATE_AN_ENABLE;
  4175. }
  4176. break;
  4177. case ANEG_STATE_COMPLETE_ACK_INIT:
  4178. if (ap->rxconfig & ANEG_CFG_INVAL) {
  4179. ret = ANEG_FAILED;
  4180. break;
  4181. }
  4182. ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
  4183. MR_LP_ADV_HALF_DUPLEX |
  4184. MR_LP_ADV_SYM_PAUSE |
  4185. MR_LP_ADV_ASYM_PAUSE |
  4186. MR_LP_ADV_REMOTE_FAULT1 |
  4187. MR_LP_ADV_REMOTE_FAULT2 |
  4188. MR_LP_ADV_NEXT_PAGE |
  4189. MR_TOGGLE_RX |
  4190. MR_NP_RX);
  4191. if (ap->rxconfig & ANEG_CFG_FD)
  4192. ap->flags |= MR_LP_ADV_FULL_DUPLEX;
  4193. if (ap->rxconfig & ANEG_CFG_HD)
  4194. ap->flags |= MR_LP_ADV_HALF_DUPLEX;
  4195. if (ap->rxconfig & ANEG_CFG_PS1)
  4196. ap->flags |= MR_LP_ADV_SYM_PAUSE;
  4197. if (ap->rxconfig & ANEG_CFG_PS2)
  4198. ap->flags |= MR_LP_ADV_ASYM_PAUSE;
  4199. if (ap->rxconfig & ANEG_CFG_RF1)
  4200. ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
  4201. if (ap->rxconfig & ANEG_CFG_RF2)
  4202. ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
  4203. if (ap->rxconfig & ANEG_CFG_NP)
  4204. ap->flags |= MR_LP_ADV_NEXT_PAGE;
  4205. ap->link_time = ap->cur_time;
  4206. ap->flags ^= (MR_TOGGLE_TX);
  4207. if (ap->rxconfig & 0x0008)
  4208. ap->flags |= MR_TOGGLE_RX;
  4209. if (ap->rxconfig & ANEG_CFG_NP)
  4210. ap->flags |= MR_NP_RX;
  4211. ap->flags |= MR_PAGE_RX;
  4212. ap->state = ANEG_STATE_COMPLETE_ACK;
  4213. ret = ANEG_TIMER_ENAB;
  4214. break;
  4215. case ANEG_STATE_COMPLETE_ACK:
  4216. if (ap->ability_match != 0 &&
  4217. ap->rxconfig == 0) {
  4218. ap->state = ANEG_STATE_AN_ENABLE;
  4219. break;
  4220. }
  4221. delta = ap->cur_time - ap->link_time;
  4222. if (delta > ANEG_STATE_SETTLE_TIME) {
  4223. if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
  4224. ap->state = ANEG_STATE_IDLE_DETECT_INIT;
  4225. } else {
  4226. if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
  4227. !(ap->flags & MR_NP_RX)) {
  4228. ap->state = ANEG_STATE_IDLE_DETECT_INIT;
  4229. } else {
  4230. ret = ANEG_FAILED;
  4231. }
  4232. }
  4233. }
  4234. break;
  4235. case ANEG_STATE_IDLE_DETECT_INIT:
  4236. ap->link_time = ap->cur_time;
  4237. tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
  4238. tw32_f(MAC_MODE, tp->mac_mode);
  4239. udelay(40);
  4240. ap->state = ANEG_STATE_IDLE_DETECT;
  4241. ret = ANEG_TIMER_ENAB;
  4242. break;
  4243. case ANEG_STATE_IDLE_DETECT:
  4244. if (ap->ability_match != 0 &&
  4245. ap->rxconfig == 0) {
  4246. ap->state = ANEG_STATE_AN_ENABLE;
  4247. break;
  4248. }
  4249. delta = ap->cur_time - ap->link_time;
  4250. if (delta > ANEG_STATE_SETTLE_TIME) {
  4251. /* XXX another gem from the Broadcom driver :( */
  4252. ap->state = ANEG_STATE_LINK_OK;
  4253. }
  4254. break;
  4255. case ANEG_STATE_LINK_OK:
  4256. ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
  4257. ret = ANEG_DONE;
  4258. break;
  4259. case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
  4260. /* ??? unimplemented */
  4261. break;
  4262. case ANEG_STATE_NEXT_PAGE_WAIT:
  4263. /* ??? unimplemented */
  4264. break;
  4265. default:
  4266. ret = ANEG_FAILED;
  4267. break;
  4268. }
  4269. return ret;
  4270. }
  4271. static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
  4272. {
  4273. int res = 0;
  4274. struct tg3_fiber_aneginfo aninfo;
  4275. int status = ANEG_FAILED;
  4276. unsigned int tick;
  4277. u32 tmp;
  4278. tw32_f(MAC_TX_AUTO_NEG, 0);
  4279. tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
  4280. tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
  4281. udelay(40);
  4282. tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
  4283. udelay(40);
  4284. memset(&aninfo, 0, sizeof(aninfo));
  4285. aninfo.flags |= MR_AN_ENABLE;
  4286. aninfo.state = ANEG_STATE_UNKNOWN;
  4287. aninfo.cur_time = 0;
  4288. tick = 0;
  4289. while (++tick < 195000) {
  4290. status = tg3_fiber_aneg_smachine(tp, &aninfo);
  4291. if (status == ANEG_DONE || status == ANEG_FAILED)
  4292. break;
  4293. udelay(1);
  4294. }
  4295. tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
  4296. tw32_f(MAC_MODE, tp->mac_mode);
  4297. udelay(40);
  4298. *txflags = aninfo.txconfig;
  4299. *rxflags = aninfo.flags;
  4300. if (status == ANEG_DONE &&
  4301. (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
  4302. MR_LP_ADV_FULL_DUPLEX)))
  4303. res = 1;
  4304. return res;
  4305. }
  4306. static void tg3_init_bcm8002(struct tg3 *tp)
  4307. {
  4308. u32 mac_status = tr32(MAC_STATUS);
  4309. int i;
  4310. /* Reset when initting first time or we have a link. */
  4311. if (tg3_flag(tp, INIT_COMPLETE) &&
  4312. !(mac_status & MAC_STATUS_PCS_SYNCED))
  4313. return;
  4314. /* Set PLL lock range. */
  4315. tg3_writephy(tp, 0x16, 0x8007);
  4316. /* SW reset */
  4317. tg3_writephy(tp, MII_BMCR, BMCR_RESET);
  4318. /* Wait for reset to complete. */
  4319. /* XXX schedule_timeout() ... */
  4320. for (i = 0; i < 500; i++)
  4321. udelay(10);
  4322. /* Config mode; select PMA/Ch 1 regs. */
  4323. tg3_writephy(tp, 0x10, 0x8411);
  4324. /* Enable auto-lock and comdet, select txclk for tx. */
  4325. tg3_writephy(tp, 0x11, 0x0a10);
  4326. tg3_writephy(tp, 0x18, 0x00a0);
  4327. tg3_writephy(tp, 0x16, 0x41ff);
  4328. /* Assert and deassert POR. */
  4329. tg3_writephy(tp, 0x13, 0x0400);
  4330. udelay(40);
  4331. tg3_writephy(tp, 0x13, 0x0000);
  4332. tg3_writephy(tp, 0x11, 0x0a50);
  4333. udelay(40);
  4334. tg3_writephy(tp, 0x11, 0x0a10);
  4335. /* Wait for signal to stabilize */
  4336. /* XXX schedule_timeout() ... */
  4337. for (i = 0; i < 15000; i++)
  4338. udelay(10);
  4339. /* Deselect the channel register so we can read the PHYID
  4340. * later.
  4341. */
  4342. tg3_writephy(tp, 0x10, 0x8011);
  4343. }
  4344. static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
  4345. {
  4346. u16 flowctrl;
  4347. u32 sg_dig_ctrl, sg_dig_status;
  4348. u32 serdes_cfg, expected_sg_dig_ctrl;
  4349. int workaround, port_a;
  4350. int current_link_up;
  4351. serdes_cfg = 0;
  4352. expected_sg_dig_ctrl = 0;
  4353. workaround = 0;
  4354. port_a = 1;
  4355. current_link_up = 0;
  4356. if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
  4357. tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
  4358. workaround = 1;
  4359. if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
  4360. port_a = 0;
  4361. /* preserve bits 0-11,13,14 for signal pre-emphasis */
  4362. /* preserve bits 20-23 for voltage regulator */
  4363. serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
  4364. }
  4365. sg_dig_ctrl = tr32(SG_DIG_CTRL);
  4366. if (tp->link_config.autoneg != AUTONEG_ENABLE) {
  4367. if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
  4368. if (workaround) {
  4369. u32 val = serdes_cfg;
  4370. if (port_a)
  4371. val |= 0xc010000;
  4372. else
  4373. val |= 0x4010000;
  4374. tw32_f(MAC_SERDES_CFG, val);
  4375. }
  4376. tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
  4377. }
  4378. if (mac_status & MAC_STATUS_PCS_SYNCED) {
  4379. tg3_setup_flow_control(tp, 0, 0);
  4380. current_link_up = 1;
  4381. }
  4382. goto out;
  4383. }
  4384. /* Want auto-negotiation. */
  4385. expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
  4386. flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
  4387. if (flowctrl & ADVERTISE_1000XPAUSE)
  4388. expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
  4389. if (flowctrl & ADVERTISE_1000XPSE_ASYM)
  4390. expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
  4391. if (sg_dig_ctrl != expected_sg_dig_ctrl) {
  4392. if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
  4393. tp->serdes_counter &&
  4394. ((mac_status & (MAC_STATUS_PCS_SYNCED |
  4395. MAC_STATUS_RCVD_CFG)) ==
  4396. MAC_STATUS_PCS_SYNCED)) {
  4397. tp->serdes_counter--;
  4398. current_link_up = 1;
  4399. goto out;
  4400. }
  4401. restart_autoneg:
  4402. if (workaround)
  4403. tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
  4404. tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
  4405. udelay(5);
  4406. tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
  4407. tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
  4408. tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
  4409. } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
  4410. MAC_STATUS_SIGNAL_DET)) {
  4411. sg_dig_status = tr32(SG_DIG_STATUS);
  4412. mac_status = tr32(MAC_STATUS);
  4413. if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
  4414. (mac_status & MAC_STATUS_PCS_SYNCED)) {
  4415. u32 local_adv = 0, remote_adv = 0;
  4416. if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
  4417. local_adv |= ADVERTISE_1000XPAUSE;
  4418. if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
  4419. local_adv |= ADVERTISE_1000XPSE_ASYM;
  4420. if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
  4421. remote_adv |= LPA_1000XPAUSE;
  4422. if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
  4423. remote_adv |= LPA_1000XPAUSE_ASYM;
  4424. tp->link_config.rmt_adv =
  4425. mii_adv_to_ethtool_adv_x(remote_adv);
  4426. tg3_setup_flow_control(tp, local_adv, remote_adv);
  4427. current_link_up = 1;
  4428. tp->serdes_counter = 0;
  4429. tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
  4430. } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
  4431. if (tp->serdes_counter)
  4432. tp->serdes_counter--;
  4433. else {
  4434. if (workaround) {
  4435. u32 val = serdes_cfg;
  4436. if (port_a)
  4437. val |= 0xc010000;
  4438. else
  4439. val |= 0x4010000;
  4440. tw32_f(MAC_SERDES_CFG, val);
  4441. }
  4442. tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
  4443. udelay(40);
  4444. /* Link parallel detection - link is up */
  4445. /* only if we have PCS_SYNC and not */
  4446. /* receiving config code words */
  4447. mac_status = tr32(MAC_STATUS);
  4448. if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
  4449. !(mac_status & MAC_STATUS_RCVD_CFG)) {
  4450. tg3_setup_flow_control(tp, 0, 0);
  4451. current_link_up = 1;
  4452. tp->phy_flags |=
  4453. TG3_PHYFLG_PARALLEL_DETECT;
  4454. tp->serdes_counter =
  4455. SERDES_PARALLEL_DET_TIMEOUT;
  4456. } else
  4457. goto restart_autoneg;
  4458. }
  4459. }
  4460. } else {
  4461. tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
  4462. tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
  4463. }
  4464. out:
  4465. return current_link_up;
  4466. }
  4467. static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
  4468. {
  4469. int current_link_up = 0;
  4470. if (!(mac_status & MAC_STATUS_PCS_SYNCED))
  4471. goto out;
  4472. if (tp->link_config.autoneg == AUTONEG_ENABLE) {
  4473. u32 txflags, rxflags;
  4474. int i;
  4475. if (fiber_autoneg(tp, &txflags, &rxflags)) {
  4476. u32 local_adv = 0, remote_adv = 0;
  4477. if (txflags & ANEG_CFG_PS1)
  4478. local_adv |= ADVERTISE_1000XPAUSE;
  4479. if (txflags & ANEG_CFG_PS2)
  4480. local_adv |= ADVERTISE_1000XPSE_ASYM;
  4481. if (rxflags & MR_LP_ADV_SYM_PAUSE)
  4482. remote_adv |= LPA_1000XPAUSE;
  4483. if (rxflags & MR_LP_ADV_ASYM_PAUSE)
  4484. remote_adv |= LPA_1000XPAUSE_ASYM;
  4485. tp->link_config.rmt_adv =
  4486. mii_adv_to_ethtool_adv_x(remote_adv);
  4487. tg3_setup_flow_control(tp, local_adv, remote_adv);
  4488. current_link_up = 1;
  4489. }
  4490. for (i = 0; i < 30; i++) {
  4491. udelay(20);
  4492. tw32_f(MAC_STATUS,
  4493. (MAC_STATUS_SYNC_CHANGED |
  4494. MAC_STATUS_CFG_CHANGED));
  4495. udelay(40);
  4496. if ((tr32(MAC_STATUS) &
  4497. (MAC_STATUS_SYNC_CHANGED |
  4498. MAC_STATUS_CFG_CHANGED)) == 0)
  4499. break;
  4500. }
  4501. mac_status = tr32(MAC_STATUS);
  4502. if (current_link_up == 0 &&
  4503. (mac_status & MAC_STATUS_PCS_SYNCED) &&
  4504. !(mac_status & MAC_STATUS_RCVD_CFG))
  4505. current_link_up = 1;
  4506. } else {
  4507. tg3_setup_flow_control(tp, 0, 0);
  4508. /* Forcing 1000FD link up. */
  4509. current_link_up = 1;
  4510. tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
  4511. udelay(40);
  4512. tw32_f(MAC_MODE, tp->mac_mode);
  4513. udelay(40);
  4514. }
  4515. out:
  4516. return current_link_up;
  4517. }
  4518. static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
  4519. {
  4520. u32 orig_pause_cfg;
  4521. u16 orig_active_speed;
  4522. u8 orig_active_duplex;
  4523. u32 mac_status;
  4524. int current_link_up;
  4525. int i;
  4526. orig_pause_cfg = tp->link_config.active_flowctrl;
  4527. orig_active_speed = tp->link_config.active_speed;
  4528. orig_active_duplex = tp->link_config.active_duplex;
  4529. if (!tg3_flag(tp, HW_AUTONEG) &&
  4530. tp->link_up &&
  4531. tg3_flag(tp, INIT_COMPLETE)) {
  4532. mac_status = tr32(MAC_STATUS);
  4533. mac_status &= (MAC_STATUS_PCS_SYNCED |
  4534. MAC_STATUS_SIGNAL_DET |
  4535. MAC_STATUS_CFG_CHANGED |
  4536. MAC_STATUS_RCVD_CFG);
  4537. if (mac_status == (MAC_STATUS_PCS_SYNCED |
  4538. MAC_STATUS_SIGNAL_DET)) {
  4539. tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
  4540. MAC_STATUS_CFG_CHANGED));
  4541. return 0;
  4542. }
  4543. }
  4544. tw32_f(MAC_TX_AUTO_NEG, 0);
  4545. tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
  4546. tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
  4547. tw32_f(MAC_MODE, tp->mac_mode);
  4548. udelay(40);
  4549. if (tp->phy_id == TG3_PHY_ID_BCM8002)
  4550. tg3_init_bcm8002(tp);
  4551. /* Enable link change event even when serdes polling. */
  4552. tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
  4553. udelay(40);
  4554. current_link_up = 0;
  4555. tp->link_config.rmt_adv = 0;
  4556. mac_status = tr32(MAC_STATUS);
  4557. if (tg3_flag(tp, HW_AUTONEG))
  4558. current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
  4559. else
  4560. current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
  4561. tp->napi[0].hw_status->status =
  4562. (SD_STATUS_UPDATED |
  4563. (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
  4564. for (i = 0; i < 100; i++) {
  4565. tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
  4566. MAC_STATUS_CFG_CHANGED));
  4567. udelay(5);
  4568. if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
  4569. MAC_STATUS_CFG_CHANGED |
  4570. MAC_STATUS_LNKSTATE_CHANGED)) == 0)
  4571. break;
  4572. }
  4573. mac_status = tr32(MAC_STATUS);
  4574. if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
  4575. current_link_up = 0;
  4576. if (tp->link_config.autoneg == AUTONEG_ENABLE &&
  4577. tp->serdes_counter == 0) {
  4578. tw32_f(MAC_MODE, (tp->mac_mode |
  4579. MAC_MODE_SEND_CONFIGS));
  4580. udelay(1);
  4581. tw32_f(MAC_MODE, tp->mac_mode);
  4582. }
  4583. }
  4584. if (current_link_up == 1) {
  4585. tp->link_config.active_speed = SPEED_1000;
  4586. tp->link_config.active_duplex = DUPLEX_FULL;
  4587. tw32(MAC_LED_CTRL, (tp->led_ctrl |
  4588. LED_CTRL_LNKLED_OVERRIDE |
  4589. LED_CTRL_1000MBPS_ON));
  4590. } else {
  4591. tp->link_config.active_speed = SPEED_UNKNOWN;
  4592. tp->link_config.active_duplex = DUPLEX_UNKNOWN;
  4593. tw32(MAC_LED_CTRL, (tp->led_ctrl |
  4594. LED_CTRL_LNKLED_OVERRIDE |
  4595. LED_CTRL_TRAFFIC_OVERRIDE));
  4596. }
  4597. if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
  4598. u32 now_pause_cfg = tp->link_config.active_flowctrl;
  4599. if (orig_pause_cfg != now_pause_cfg ||
  4600. orig_active_speed != tp->link_config.active_speed ||
  4601. orig_active_duplex != tp->link_config.active_duplex)
  4602. tg3_link_report(tp);
  4603. }
  4604. return 0;
  4605. }
  4606. static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
  4607. {
  4608. int current_link_up = 0, err = 0;
  4609. u32 bmsr, bmcr;
  4610. u16 current_speed = SPEED_UNKNOWN;
  4611. u8 current_duplex = DUPLEX_UNKNOWN;
  4612. u32 local_adv, remote_adv, sgsr;
  4613. if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
  4614. tg3_asic_rev(tp) == ASIC_REV_5720) &&
  4615. !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
  4616. (sgsr & SERDES_TG3_SGMII_MODE)) {
  4617. if (force_reset)
  4618. tg3_phy_reset(tp);
  4619. tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
  4620. if (!(sgsr & SERDES_TG3_LINK_UP)) {
  4621. tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
  4622. } else {
  4623. current_link_up = 1;
  4624. if (sgsr & SERDES_TG3_SPEED_1000) {
  4625. current_speed = SPEED_1000;
  4626. tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
  4627. } else if (sgsr & SERDES_TG3_SPEED_100) {
  4628. current_speed = SPEED_100;
  4629. tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
  4630. } else {
  4631. current_speed = SPEED_10;
  4632. tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
  4633. }
  4634. if (sgsr & SERDES_TG3_FULL_DUPLEX)
  4635. current_duplex = DUPLEX_FULL;
  4636. else
  4637. current_duplex = DUPLEX_HALF;
  4638. }
  4639. tw32_f(MAC_MODE, tp->mac_mode);
  4640. udelay(40);
  4641. tg3_clear_mac_status(tp);
  4642. goto fiber_setup_done;
  4643. }
  4644. tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
  4645. tw32_f(MAC_MODE, tp->mac_mode);
  4646. udelay(40);
  4647. tg3_clear_mac_status(tp);
  4648. if (force_reset)
  4649. tg3_phy_reset(tp);
  4650. tp->link_config.rmt_adv = 0;
  4651. err |= tg3_readphy(tp, MII_BMSR, &bmsr);
  4652. err |= tg3_readphy(tp, MII_BMSR, &bmsr);
  4653. if (tg3_asic_rev(tp) == ASIC_REV_5714) {
  4654. if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
  4655. bmsr |= BMSR_LSTATUS;
  4656. else
  4657. bmsr &= ~BMSR_LSTATUS;
  4658. }
  4659. err |= tg3_readphy(tp, MII_BMCR, &bmcr);
  4660. if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
  4661. (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
  4662. /* do nothing, just check for link up at the end */
  4663. } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
  4664. u32 adv, newadv;
  4665. err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
  4666. newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
  4667. ADVERTISE_1000XPAUSE |
  4668. ADVERTISE_1000XPSE_ASYM |
  4669. ADVERTISE_SLCT);
  4670. newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
  4671. newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
  4672. if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
  4673. tg3_writephy(tp, MII_ADVERTISE, newadv);
  4674. bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
  4675. tg3_writephy(tp, MII_BMCR, bmcr);
  4676. tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
  4677. tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
  4678. tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
  4679. return err;
  4680. }
  4681. } else {
  4682. u32 new_bmcr;
  4683. bmcr &= ~BMCR_SPEED1000;
  4684. new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
  4685. if (tp->link_config.duplex == DUPLEX_FULL)
  4686. new_bmcr |= BMCR_FULLDPLX;
  4687. if (new_bmcr != bmcr) {
  4688. /* BMCR_SPEED1000 is a reserved bit that needs
  4689. * to be set on write.
  4690. */
  4691. new_bmcr |= BMCR_SPEED1000;
  4692. /* Force a linkdown */
  4693. if (tp->link_up) {
  4694. u32 adv;
  4695. err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
  4696. adv &= ~(ADVERTISE_1000XFULL |
  4697. ADVERTISE_1000XHALF |
  4698. ADVERTISE_SLCT);
  4699. tg3_writephy(tp, MII_ADVERTISE, adv);
  4700. tg3_writephy(tp, MII_BMCR, bmcr |
  4701. BMCR_ANRESTART |
  4702. BMCR_ANENABLE);
  4703. udelay(10);
  4704. tg3_carrier_off(tp);
  4705. }
  4706. tg3_writephy(tp, MII_BMCR, new_bmcr);
  4707. bmcr = new_bmcr;
  4708. err |= tg3_readphy(tp, MII_BMSR, &bmsr);
  4709. err |= tg3_readphy(tp, MII_BMSR, &bmsr);
  4710. if (tg3_asic_rev(tp) == ASIC_REV_5714) {
  4711. if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
  4712. bmsr |= BMSR_LSTATUS;
  4713. else
  4714. bmsr &= ~BMSR_LSTATUS;
  4715. }
  4716. tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
  4717. }
  4718. }
  4719. if (bmsr & BMSR_LSTATUS) {
  4720. current_speed = SPEED_1000;
  4721. current_link_up = 1;
  4722. if (bmcr & BMCR_FULLDPLX)
  4723. current_duplex = DUPLEX_FULL;
  4724. else
  4725. current_duplex = DUPLEX_HALF;
  4726. local_adv = 0;
  4727. remote_adv = 0;
  4728. if (bmcr & BMCR_ANENABLE) {
  4729. u32 common;
  4730. err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
  4731. err |= tg3_readphy(tp, MII_LPA, &remote_adv);
  4732. common = local_adv & remote_adv;
  4733. if (common & (ADVERTISE_1000XHALF |
  4734. ADVERTISE_1000XFULL)) {
  4735. if (common & ADVERTISE_1000XFULL)
  4736. current_duplex = DUPLEX_FULL;
  4737. else
  4738. current_duplex = DUPLEX_HALF;
  4739. tp->link_config.rmt_adv =
  4740. mii_adv_to_ethtool_adv_x(remote_adv);
  4741. } else if (!tg3_flag(tp, 5780_CLASS)) {
  4742. /* Link is up via parallel detect */
  4743. } else {
  4744. current_link_up = 0;
  4745. }
  4746. }
  4747. }
  4748. fiber_setup_done:
  4749. if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
  4750. tg3_setup_flow_control(tp, local_adv, remote_adv);
  4751. tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
  4752. if (tp->link_config.active_duplex == DUPLEX_HALF)
  4753. tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
  4754. tw32_f(MAC_MODE, tp->mac_mode);
  4755. udelay(40);
  4756. tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
  4757. tp->link_config.active_speed = current_speed;
  4758. tp->link_config.active_duplex = current_duplex;
  4759. tg3_test_and_report_link_chg(tp, current_link_up);
  4760. return err;
  4761. }
  4762. static void tg3_serdes_parallel_detect(struct tg3 *tp)
  4763. {
  4764. if (tp->serdes_counter) {
  4765. /* Give autoneg time to complete. */
  4766. tp->serdes_counter--;
  4767. return;
  4768. }
  4769. if (!tp->link_up &&
  4770. (tp->link_config.autoneg == AUTONEG_ENABLE)) {
  4771. u32 bmcr;
  4772. tg3_readphy(tp, MII_BMCR, &bmcr);
  4773. if (bmcr & BMCR_ANENABLE) {
  4774. u32 phy1, phy2;
  4775. /* Select shadow register 0x1f */
  4776. tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
  4777. tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
  4778. /* Select expansion interrupt status register */
  4779. tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
  4780. MII_TG3_DSP_EXP1_INT_STAT);
  4781. tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
  4782. tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
  4783. if ((phy1 & 0x10) && !(phy2 & 0x20)) {
  4784. /* We have signal detect and not receiving
  4785. * config code words, link is up by parallel
  4786. * detection.
  4787. */
  4788. bmcr &= ~BMCR_ANENABLE;
  4789. bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
  4790. tg3_writephy(tp, MII_BMCR, bmcr);
  4791. tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
  4792. }
  4793. }
  4794. } else if (tp->link_up &&
  4795. (tp->link_config.autoneg == AUTONEG_ENABLE) &&
  4796. (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
  4797. u32 phy2;
  4798. /* Select expansion interrupt status register */
  4799. tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
  4800. MII_TG3_DSP_EXP1_INT_STAT);
  4801. tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
  4802. if (phy2 & 0x20) {
  4803. u32 bmcr;
  4804. /* Config code words received, turn on autoneg. */
  4805. tg3_readphy(tp, MII_BMCR, &bmcr);
  4806. tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
  4807. tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
  4808. }
  4809. }
  4810. }
  4811. static int tg3_setup_phy(struct tg3 *tp, int force_reset)
  4812. {
  4813. u32 val;
  4814. int err;
  4815. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
  4816. err = tg3_setup_fiber_phy(tp, force_reset);
  4817. else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
  4818. err = tg3_setup_fiber_mii_phy(tp, force_reset);
  4819. else
  4820. err = tg3_setup_copper_phy(tp, force_reset);
  4821. if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
  4822. u32 scale;
  4823. val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
  4824. if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
  4825. scale = 65;
  4826. else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
  4827. scale = 6;
  4828. else
  4829. scale = 12;
  4830. val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
  4831. val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
  4832. tw32(GRC_MISC_CFG, val);
  4833. }
  4834. val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
  4835. (6 << TX_LENGTHS_IPG_SHIFT);
  4836. if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
  4837. tg3_asic_rev(tp) == ASIC_REV_5762)
  4838. val |= tr32(MAC_TX_LENGTHS) &
  4839. (TX_LENGTHS_JMB_FRM_LEN_MSK |
  4840. TX_LENGTHS_CNT_DWN_VAL_MSK);
  4841. if (tp->link_config.active_speed == SPEED_1000 &&
  4842. tp->link_config.active_duplex == DUPLEX_HALF)
  4843. tw32(MAC_TX_LENGTHS, val |
  4844. (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
  4845. else
  4846. tw32(MAC_TX_LENGTHS, val |
  4847. (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
  4848. if (!tg3_flag(tp, 5705_PLUS)) {
  4849. if (tp->link_up) {
  4850. tw32(HOSTCC_STAT_COAL_TICKS,
  4851. tp->coal.stats_block_coalesce_usecs);
  4852. } else {
  4853. tw32(HOSTCC_STAT_COAL_TICKS, 0);
  4854. }
  4855. }
  4856. if (tg3_flag(tp, ASPM_WORKAROUND)) {
  4857. val = tr32(PCIE_PWR_MGMT_THRESH);
  4858. if (!tp->link_up)
  4859. val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
  4860. tp->pwrmgmt_thresh;
  4861. else
  4862. val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
  4863. tw32(PCIE_PWR_MGMT_THRESH, val);
  4864. }
  4865. return err;
  4866. }
  4867. /* tp->lock must be held */
  4868. static u64 tg3_refclk_read(struct tg3 *tp)
  4869. {
  4870. u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
  4871. return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
  4872. }
  4873. /* tp->lock must be held */
  4874. static void tg3_refclk_write(struct tg3 *tp, u64 newval)
  4875. {
  4876. tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
  4877. tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
  4878. tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
  4879. tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
  4880. }
  4881. static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
  4882. static inline void tg3_full_unlock(struct tg3 *tp);
  4883. static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
  4884. {
  4885. struct tg3 *tp = netdev_priv(dev);
  4886. info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
  4887. SOF_TIMESTAMPING_RX_SOFTWARE |
  4888. SOF_TIMESTAMPING_SOFTWARE |
  4889. SOF_TIMESTAMPING_TX_HARDWARE |
  4890. SOF_TIMESTAMPING_RX_HARDWARE |
  4891. SOF_TIMESTAMPING_RAW_HARDWARE;
  4892. if (tp->ptp_clock)
  4893. info->phc_index = ptp_clock_index(tp->ptp_clock);
  4894. else
  4895. info->phc_index = -1;
  4896. info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
  4897. info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
  4898. (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
  4899. (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
  4900. (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
  4901. return 0;
  4902. }
  4903. static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
  4904. {
  4905. struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
  4906. bool neg_adj = false;
  4907. u32 correction = 0;
  4908. if (ppb < 0) {
  4909. neg_adj = true;
  4910. ppb = -ppb;
  4911. }
  4912. /* Frequency adjustment is performed using hardware with a 24 bit
  4913. * accumulator and a programmable correction value. On each clk, the
  4914. * correction value gets added to the accumulator and when it
  4915. * overflows, the time counter is incremented/decremented.
  4916. *
  4917. * So conversion from ppb to correction value is
  4918. * ppb * (1 << 24) / 1000000000
  4919. */
  4920. correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
  4921. TG3_EAV_REF_CLK_CORRECT_MASK;
  4922. tg3_full_lock(tp, 0);
  4923. if (correction)
  4924. tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
  4925. TG3_EAV_REF_CLK_CORRECT_EN |
  4926. (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
  4927. else
  4928. tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
  4929. tg3_full_unlock(tp);
  4930. return 0;
  4931. }
  4932. static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
  4933. {
  4934. struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
  4935. tg3_full_lock(tp, 0);
  4936. tp->ptp_adjust += delta;
  4937. tg3_full_unlock(tp);
  4938. return 0;
  4939. }
  4940. static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
  4941. {
  4942. u64 ns;
  4943. u32 remainder;
  4944. struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
  4945. tg3_full_lock(tp, 0);
  4946. ns = tg3_refclk_read(tp);
  4947. ns += tp->ptp_adjust;
  4948. tg3_full_unlock(tp);
  4949. ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
  4950. ts->tv_nsec = remainder;
  4951. return 0;
  4952. }
  4953. static int tg3_ptp_settime(struct ptp_clock_info *ptp,
  4954. const struct timespec *ts)
  4955. {
  4956. u64 ns;
  4957. struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
  4958. ns = timespec_to_ns(ts);
  4959. tg3_full_lock(tp, 0);
  4960. tg3_refclk_write(tp, ns);
  4961. tp->ptp_adjust = 0;
  4962. tg3_full_unlock(tp);
  4963. return 0;
  4964. }
  4965. static int tg3_ptp_enable(struct ptp_clock_info *ptp,
  4966. struct ptp_clock_request *rq, int on)
  4967. {
  4968. return -EOPNOTSUPP;
  4969. }
  4970. static const struct ptp_clock_info tg3_ptp_caps = {
  4971. .owner = THIS_MODULE,
  4972. .name = "tg3 clock",
  4973. .max_adj = 250000000,
  4974. .n_alarm = 0,
  4975. .n_ext_ts = 0,
  4976. .n_per_out = 0,
  4977. .pps = 0,
  4978. .adjfreq = tg3_ptp_adjfreq,
  4979. .adjtime = tg3_ptp_adjtime,
  4980. .gettime = tg3_ptp_gettime,
  4981. .settime = tg3_ptp_settime,
  4982. .enable = tg3_ptp_enable,
  4983. };
  4984. static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
  4985. struct skb_shared_hwtstamps *timestamp)
  4986. {
  4987. memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
  4988. timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
  4989. tp->ptp_adjust);
  4990. }
  4991. /* tp->lock must be held */
  4992. static void tg3_ptp_init(struct tg3 *tp)
  4993. {
  4994. if (!tg3_flag(tp, PTP_CAPABLE))
  4995. return;
  4996. /* Initialize the hardware clock to the system time. */
  4997. tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
  4998. tp->ptp_adjust = 0;
  4999. tp->ptp_info = tg3_ptp_caps;
  5000. }
  5001. /* tp->lock must be held */
  5002. static void tg3_ptp_resume(struct tg3 *tp)
  5003. {
  5004. if (!tg3_flag(tp, PTP_CAPABLE))
  5005. return;
  5006. tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
  5007. tp->ptp_adjust = 0;
  5008. }
  5009. static void tg3_ptp_fini(struct tg3 *tp)
  5010. {
  5011. if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
  5012. return;
  5013. ptp_clock_unregister(tp->ptp_clock);
  5014. tp->ptp_clock = NULL;
  5015. tp->ptp_adjust = 0;
  5016. }
  5017. static inline int tg3_irq_sync(struct tg3 *tp)
  5018. {
  5019. return tp->irq_sync;
  5020. }
  5021. static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
  5022. {
  5023. int i;
  5024. dst = (u32 *)((u8 *)dst + off);
  5025. for (i = 0; i < len; i += sizeof(u32))
  5026. *dst++ = tr32(off + i);
  5027. }
  5028. static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
  5029. {
  5030. tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
  5031. tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
  5032. tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
  5033. tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
  5034. tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
  5035. tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
  5036. tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
  5037. tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
  5038. tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
  5039. tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
  5040. tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
  5041. tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
  5042. tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
  5043. tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
  5044. tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
  5045. tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
  5046. tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
  5047. tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
  5048. tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
  5049. if (tg3_flag(tp, SUPPORT_MSIX))
  5050. tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
  5051. tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
  5052. tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
  5053. tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
  5054. tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
  5055. tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
  5056. tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
  5057. tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
  5058. tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
  5059. if (!tg3_flag(tp, 5705_PLUS)) {
  5060. tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
  5061. tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
  5062. tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
  5063. }
  5064. tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
  5065. tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
  5066. tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
  5067. tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
  5068. tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
  5069. if (tg3_flag(tp, NVRAM))
  5070. tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
  5071. }
  5072. static void tg3_dump_state(struct tg3 *tp)
  5073. {
  5074. int i;
  5075. u32 *regs;
  5076. regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
  5077. if (!regs)
  5078. return;
  5079. if (tg3_flag(tp, PCI_EXPRESS)) {
  5080. /* Read up to but not including private PCI registers */
  5081. for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
  5082. regs[i / sizeof(u32)] = tr32(i);
  5083. } else
  5084. tg3_dump_legacy_regs(tp, regs);
  5085. for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
  5086. if (!regs[i + 0] && !regs[i + 1] &&
  5087. !regs[i + 2] && !regs[i + 3])
  5088. continue;
  5089. netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
  5090. i * 4,
  5091. regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
  5092. }
  5093. kfree(regs);
  5094. for (i = 0; i < tp->irq_cnt; i++) {
  5095. struct tg3_napi *tnapi = &tp->napi[i];
  5096. /* SW status block */
  5097. netdev_err(tp->dev,
  5098. "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
  5099. i,
  5100. tnapi->hw_status->status,
  5101. tnapi->hw_status->status_tag,
  5102. tnapi->hw_status->rx_jumbo_consumer,
  5103. tnapi->hw_status->rx_consumer,
  5104. tnapi->hw_status->rx_mini_consumer,
  5105. tnapi->hw_status->idx[0].rx_producer,
  5106. tnapi->hw_status->idx[0].tx_consumer);
  5107. netdev_err(tp->dev,
  5108. "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
  5109. i,
  5110. tnapi->last_tag, tnapi->last_irq_tag,
  5111. tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
  5112. tnapi->rx_rcb_ptr,
  5113. tnapi->prodring.rx_std_prod_idx,
  5114. tnapi->prodring.rx_std_cons_idx,
  5115. tnapi->prodring.rx_jmb_prod_idx,
  5116. tnapi->prodring.rx_jmb_cons_idx);
  5117. }
  5118. }
  5119. /* This is called whenever we suspect that the system chipset is re-
  5120. * ordering the sequence of MMIO to the tx send mailbox. The symptom
  5121. * is bogus tx completions. We try to recover by setting the
  5122. * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
  5123. * in the workqueue.
  5124. */
  5125. static void tg3_tx_recover(struct tg3 *tp)
  5126. {
  5127. BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
  5128. tp->write32_tx_mbox == tg3_write_indirect_mbox);
  5129. netdev_warn(tp->dev,
  5130. "The system may be re-ordering memory-mapped I/O "
  5131. "cycles to the network device, attempting to recover. "
  5132. "Please report the problem to the driver maintainer "
  5133. "and include system chipset information.\n");
  5134. spin_lock(&tp->lock);
  5135. tg3_flag_set(tp, TX_RECOVERY_PENDING);
  5136. spin_unlock(&tp->lock);
  5137. }
  5138. static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
  5139. {
  5140. /* Tell compiler to fetch tx indices from memory. */
  5141. barrier();
  5142. return tnapi->tx_pending -
  5143. ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
  5144. }
  5145. /* Tigon3 never reports partial packet sends. So we do not
  5146. * need special logic to handle SKBs that have not had all
  5147. * of their frags sent yet, like SunGEM does.
  5148. */
  5149. static void tg3_tx(struct tg3_napi *tnapi)
  5150. {
  5151. struct tg3 *tp = tnapi->tp;
  5152. u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
  5153. u32 sw_idx = tnapi->tx_cons;
  5154. struct netdev_queue *txq;
  5155. int index = tnapi - tp->napi;
  5156. unsigned int pkts_compl = 0, bytes_compl = 0;
  5157. if (tg3_flag(tp, ENABLE_TSS))
  5158. index--;
  5159. txq = netdev_get_tx_queue(tp->dev, index);
  5160. while (sw_idx != hw_idx) {
  5161. struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
  5162. struct sk_buff *skb = ri->skb;
  5163. int i, tx_bug = 0;
  5164. if (unlikely(skb == NULL)) {
  5165. tg3_tx_recover(tp);
  5166. return;
  5167. }
  5168. if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
  5169. struct skb_shared_hwtstamps timestamp;
  5170. u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
  5171. hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
  5172. tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
  5173. skb_tstamp_tx(skb, &timestamp);
  5174. }
  5175. pci_unmap_single(tp->pdev,
  5176. dma_unmap_addr(ri, mapping),
  5177. skb_headlen(skb),
  5178. PCI_DMA_TODEVICE);
  5179. ri->skb = NULL;
  5180. while (ri->fragmented) {
  5181. ri->fragmented = false;
  5182. sw_idx = NEXT_TX(sw_idx);
  5183. ri = &tnapi->tx_buffers[sw_idx];
  5184. }
  5185. sw_idx = NEXT_TX(sw_idx);
  5186. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  5187. ri = &tnapi->tx_buffers[sw_idx];
  5188. if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
  5189. tx_bug = 1;
  5190. pci_unmap_page(tp->pdev,
  5191. dma_unmap_addr(ri, mapping),
  5192. skb_frag_size(&skb_shinfo(skb)->frags[i]),
  5193. PCI_DMA_TODEVICE);
  5194. while (ri->fragmented) {
  5195. ri->fragmented = false;
  5196. sw_idx = NEXT_TX(sw_idx);
  5197. ri = &tnapi->tx_buffers[sw_idx];
  5198. }
  5199. sw_idx = NEXT_TX(sw_idx);
  5200. }
  5201. pkts_compl++;
  5202. bytes_compl += skb->len;
  5203. dev_kfree_skb(skb);
  5204. if (unlikely(tx_bug)) {
  5205. tg3_tx_recover(tp);
  5206. return;
  5207. }
  5208. }
  5209. netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
  5210. tnapi->tx_cons = sw_idx;
  5211. /* Need to make the tx_cons update visible to tg3_start_xmit()
  5212. * before checking for netif_queue_stopped(). Without the
  5213. * memory barrier, there is a small possibility that tg3_start_xmit()
  5214. * will miss it and cause the queue to be stopped forever.
  5215. */
  5216. smp_mb();
  5217. if (unlikely(netif_tx_queue_stopped(txq) &&
  5218. (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
  5219. __netif_tx_lock(txq, smp_processor_id());
  5220. if (netif_tx_queue_stopped(txq) &&
  5221. (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
  5222. netif_tx_wake_queue(txq);
  5223. __netif_tx_unlock(txq);
  5224. }
  5225. }
  5226. static void tg3_frag_free(bool is_frag, void *data)
  5227. {
  5228. if (is_frag)
  5229. put_page(virt_to_head_page(data));
  5230. else
  5231. kfree(data);
  5232. }
  5233. static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
  5234. {
  5235. unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
  5236. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  5237. if (!ri->data)
  5238. return;
  5239. pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
  5240. map_sz, PCI_DMA_FROMDEVICE);
  5241. tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
  5242. ri->data = NULL;
  5243. }
  5244. /* Returns size of skb allocated or < 0 on error.
  5245. *
  5246. * We only need to fill in the address because the other members
  5247. * of the RX descriptor are invariant, see tg3_init_rings.
  5248. *
  5249. * Note the purposeful assymetry of cpu vs. chip accesses. For
  5250. * posting buffers we only dirty the first cache line of the RX
  5251. * descriptor (containing the address). Whereas for the RX status
  5252. * buffers the cpu only reads the last cacheline of the RX descriptor
  5253. * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
  5254. */
  5255. static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
  5256. u32 opaque_key, u32 dest_idx_unmasked,
  5257. unsigned int *frag_size)
  5258. {
  5259. struct tg3_rx_buffer_desc *desc;
  5260. struct ring_info *map;
  5261. u8 *data;
  5262. dma_addr_t mapping;
  5263. int skb_size, data_size, dest_idx;
  5264. switch (opaque_key) {
  5265. case RXD_OPAQUE_RING_STD:
  5266. dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
  5267. desc = &tpr->rx_std[dest_idx];
  5268. map = &tpr->rx_std_buffers[dest_idx];
  5269. data_size = tp->rx_pkt_map_sz;
  5270. break;
  5271. case RXD_OPAQUE_RING_JUMBO:
  5272. dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
  5273. desc = &tpr->rx_jmb[dest_idx].std;
  5274. map = &tpr->rx_jmb_buffers[dest_idx];
  5275. data_size = TG3_RX_JMB_MAP_SZ;
  5276. break;
  5277. default:
  5278. return -EINVAL;
  5279. }
  5280. /* Do not overwrite any of the map or rp information
  5281. * until we are sure we can commit to a new buffer.
  5282. *
  5283. * Callers depend upon this behavior and assume that
  5284. * we leave everything unchanged if we fail.
  5285. */
  5286. skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
  5287. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  5288. if (skb_size <= PAGE_SIZE) {
  5289. data = netdev_alloc_frag(skb_size);
  5290. *frag_size = skb_size;
  5291. } else {
  5292. data = kmalloc(skb_size, GFP_ATOMIC);
  5293. *frag_size = 0;
  5294. }
  5295. if (!data)
  5296. return -ENOMEM;
  5297. mapping = pci_map_single(tp->pdev,
  5298. data + TG3_RX_OFFSET(tp),
  5299. data_size,
  5300. PCI_DMA_FROMDEVICE);
  5301. if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
  5302. tg3_frag_free(skb_size <= PAGE_SIZE, data);
  5303. return -EIO;
  5304. }
  5305. map->data = data;
  5306. dma_unmap_addr_set(map, mapping, mapping);
  5307. desc->addr_hi = ((u64)mapping >> 32);
  5308. desc->addr_lo = ((u64)mapping & 0xffffffff);
  5309. return data_size;
  5310. }
  5311. /* We only need to move over in the address because the other
  5312. * members of the RX descriptor are invariant. See notes above
  5313. * tg3_alloc_rx_data for full details.
  5314. */
  5315. static void tg3_recycle_rx(struct tg3_napi *tnapi,
  5316. struct tg3_rx_prodring_set *dpr,
  5317. u32 opaque_key, int src_idx,
  5318. u32 dest_idx_unmasked)
  5319. {
  5320. struct tg3 *tp = tnapi->tp;
  5321. struct tg3_rx_buffer_desc *src_desc, *dest_desc;
  5322. struct ring_info *src_map, *dest_map;
  5323. struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
  5324. int dest_idx;
  5325. switch (opaque_key) {
  5326. case RXD_OPAQUE_RING_STD:
  5327. dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
  5328. dest_desc = &dpr->rx_std[dest_idx];
  5329. dest_map = &dpr->rx_std_buffers[dest_idx];
  5330. src_desc = &spr->rx_std[src_idx];
  5331. src_map = &spr->rx_std_buffers[src_idx];
  5332. break;
  5333. case RXD_OPAQUE_RING_JUMBO:
  5334. dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
  5335. dest_desc = &dpr->rx_jmb[dest_idx].std;
  5336. dest_map = &dpr->rx_jmb_buffers[dest_idx];
  5337. src_desc = &spr->rx_jmb[src_idx].std;
  5338. src_map = &spr->rx_jmb_buffers[src_idx];
  5339. break;
  5340. default:
  5341. return;
  5342. }
  5343. dest_map->data = src_map->data;
  5344. dma_unmap_addr_set(dest_map, mapping,
  5345. dma_unmap_addr(src_map, mapping));
  5346. dest_desc->addr_hi = src_desc->addr_hi;
  5347. dest_desc->addr_lo = src_desc->addr_lo;
  5348. /* Ensure that the update to the skb happens after the physical
  5349. * addresses have been transferred to the new BD location.
  5350. */
  5351. smp_wmb();
  5352. src_map->data = NULL;
  5353. }
  5354. /* The RX ring scheme is composed of multiple rings which post fresh
  5355. * buffers to the chip, and one special ring the chip uses to report
  5356. * status back to the host.
  5357. *
  5358. * The special ring reports the status of received packets to the
  5359. * host. The chip does not write into the original descriptor the
  5360. * RX buffer was obtained from. The chip simply takes the original
  5361. * descriptor as provided by the host, updates the status and length
  5362. * field, then writes this into the next status ring entry.
  5363. *
  5364. * Each ring the host uses to post buffers to the chip is described
  5365. * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
  5366. * it is first placed into the on-chip ram. When the packet's length
  5367. * is known, it walks down the TG3_BDINFO entries to select the ring.
  5368. * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
  5369. * which is within the range of the new packet's length is chosen.
  5370. *
  5371. * The "separate ring for rx status" scheme may sound queer, but it makes
  5372. * sense from a cache coherency perspective. If only the host writes
  5373. * to the buffer post rings, and only the chip writes to the rx status
  5374. * rings, then cache lines never move beyond shared-modified state.
  5375. * If both the host and chip were to write into the same ring, cache line
  5376. * eviction could occur since both entities want it in an exclusive state.
  5377. */
  5378. static int tg3_rx(struct tg3_napi *tnapi, int budget)
  5379. {
  5380. struct tg3 *tp = tnapi->tp;
  5381. u32 work_mask, rx_std_posted = 0;
  5382. u32 std_prod_idx, jmb_prod_idx;
  5383. u32 sw_idx = tnapi->rx_rcb_ptr;
  5384. u16 hw_idx;
  5385. int received;
  5386. struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
  5387. hw_idx = *(tnapi->rx_rcb_prod_idx);
  5388. /*
  5389. * We need to order the read of hw_idx and the read of
  5390. * the opaque cookie.
  5391. */
  5392. rmb();
  5393. work_mask = 0;
  5394. received = 0;
  5395. std_prod_idx = tpr->rx_std_prod_idx;
  5396. jmb_prod_idx = tpr->rx_jmb_prod_idx;
  5397. while (sw_idx != hw_idx && budget > 0) {
  5398. struct ring_info *ri;
  5399. struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
  5400. unsigned int len;
  5401. struct sk_buff *skb;
  5402. dma_addr_t dma_addr;
  5403. u32 opaque_key, desc_idx, *post_ptr;
  5404. u8 *data;
  5405. u64 tstamp = 0;
  5406. desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
  5407. opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
  5408. if (opaque_key == RXD_OPAQUE_RING_STD) {
  5409. ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
  5410. dma_addr = dma_unmap_addr(ri, mapping);
  5411. data = ri->data;
  5412. post_ptr = &std_prod_idx;
  5413. rx_std_posted++;
  5414. } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
  5415. ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
  5416. dma_addr = dma_unmap_addr(ri, mapping);
  5417. data = ri->data;
  5418. post_ptr = &jmb_prod_idx;
  5419. } else
  5420. goto next_pkt_nopost;
  5421. work_mask |= opaque_key;
  5422. if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
  5423. (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
  5424. drop_it:
  5425. tg3_recycle_rx(tnapi, tpr, opaque_key,
  5426. desc_idx, *post_ptr);
  5427. drop_it_no_recycle:
  5428. /* Other statistics kept track of by card. */
  5429. tp->rx_dropped++;
  5430. goto next_pkt;
  5431. }
  5432. prefetch(data + TG3_RX_OFFSET(tp));
  5433. len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
  5434. ETH_FCS_LEN;
  5435. if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
  5436. RXD_FLAG_PTPSTAT_PTPV1 ||
  5437. (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
  5438. RXD_FLAG_PTPSTAT_PTPV2) {
  5439. tstamp = tr32(TG3_RX_TSTAMP_LSB);
  5440. tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
  5441. }
  5442. if (len > TG3_RX_COPY_THRESH(tp)) {
  5443. int skb_size;
  5444. unsigned int frag_size;
  5445. skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
  5446. *post_ptr, &frag_size);
  5447. if (skb_size < 0)
  5448. goto drop_it;
  5449. pci_unmap_single(tp->pdev, dma_addr, skb_size,
  5450. PCI_DMA_FROMDEVICE);
  5451. skb = build_skb(data, frag_size);
  5452. if (!skb) {
  5453. tg3_frag_free(frag_size != 0, data);
  5454. goto drop_it_no_recycle;
  5455. }
  5456. skb_reserve(skb, TG3_RX_OFFSET(tp));
  5457. /* Ensure that the update to the data happens
  5458. * after the usage of the old DMA mapping.
  5459. */
  5460. smp_wmb();
  5461. ri->data = NULL;
  5462. } else {
  5463. tg3_recycle_rx(tnapi, tpr, opaque_key,
  5464. desc_idx, *post_ptr);
  5465. skb = netdev_alloc_skb(tp->dev,
  5466. len + TG3_RAW_IP_ALIGN);
  5467. if (skb == NULL)
  5468. goto drop_it_no_recycle;
  5469. skb_reserve(skb, TG3_RAW_IP_ALIGN);
  5470. pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
  5471. memcpy(skb->data,
  5472. data + TG3_RX_OFFSET(tp),
  5473. len);
  5474. pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
  5475. }
  5476. skb_put(skb, len);
  5477. if (tstamp)
  5478. tg3_hwclock_to_timestamp(tp, tstamp,
  5479. skb_hwtstamps(skb));
  5480. if ((tp->dev->features & NETIF_F_RXCSUM) &&
  5481. (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
  5482. (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
  5483. >> RXD_TCPCSUM_SHIFT) == 0xffff))
  5484. skb->ip_summed = CHECKSUM_UNNECESSARY;
  5485. else
  5486. skb_checksum_none_assert(skb);
  5487. skb->protocol = eth_type_trans(skb, tp->dev);
  5488. if (len > (tp->dev->mtu + ETH_HLEN) &&
  5489. skb->protocol != htons(ETH_P_8021Q)) {
  5490. dev_kfree_skb(skb);
  5491. goto drop_it_no_recycle;
  5492. }
  5493. if (desc->type_flags & RXD_FLAG_VLAN &&
  5494. !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
  5495. __vlan_hwaccel_put_tag(skb,
  5496. desc->err_vlan & RXD_VLAN_MASK);
  5497. napi_gro_receive(&tnapi->napi, skb);
  5498. received++;
  5499. budget--;
  5500. next_pkt:
  5501. (*post_ptr)++;
  5502. if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
  5503. tpr->rx_std_prod_idx = std_prod_idx &
  5504. tp->rx_std_ring_mask;
  5505. tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
  5506. tpr->rx_std_prod_idx);
  5507. work_mask &= ~RXD_OPAQUE_RING_STD;
  5508. rx_std_posted = 0;
  5509. }
  5510. next_pkt_nopost:
  5511. sw_idx++;
  5512. sw_idx &= tp->rx_ret_ring_mask;
  5513. /* Refresh hw_idx to see if there is new work */
  5514. if (sw_idx == hw_idx) {
  5515. hw_idx = *(tnapi->rx_rcb_prod_idx);
  5516. rmb();
  5517. }
  5518. }
  5519. /* ACK the status ring. */
  5520. tnapi->rx_rcb_ptr = sw_idx;
  5521. tw32_rx_mbox(tnapi->consmbox, sw_idx);
  5522. /* Refill RX ring(s). */
  5523. if (!tg3_flag(tp, ENABLE_RSS)) {
  5524. /* Sync BD data before updating mailbox */
  5525. wmb();
  5526. if (work_mask & RXD_OPAQUE_RING_STD) {
  5527. tpr->rx_std_prod_idx = std_prod_idx &
  5528. tp->rx_std_ring_mask;
  5529. tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
  5530. tpr->rx_std_prod_idx);
  5531. }
  5532. if (work_mask & RXD_OPAQUE_RING_JUMBO) {
  5533. tpr->rx_jmb_prod_idx = jmb_prod_idx &
  5534. tp->rx_jmb_ring_mask;
  5535. tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
  5536. tpr->rx_jmb_prod_idx);
  5537. }
  5538. mmiowb();
  5539. } else if (work_mask) {
  5540. /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
  5541. * updated before the producer indices can be updated.
  5542. */
  5543. smp_wmb();
  5544. tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
  5545. tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
  5546. if (tnapi != &tp->napi[1]) {
  5547. tp->rx_refill = true;
  5548. napi_schedule(&tp->napi[1].napi);
  5549. }
  5550. }
  5551. return received;
  5552. }
  5553. static void tg3_poll_link(struct tg3 *tp)
  5554. {
  5555. /* handle link change and other phy events */
  5556. if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
  5557. struct tg3_hw_status *sblk = tp->napi[0].hw_status;
  5558. if (sblk->status & SD_STATUS_LINK_CHG) {
  5559. sblk->status = SD_STATUS_UPDATED |
  5560. (sblk->status & ~SD_STATUS_LINK_CHG);
  5561. spin_lock(&tp->lock);
  5562. if (tg3_flag(tp, USE_PHYLIB)) {
  5563. tw32_f(MAC_STATUS,
  5564. (MAC_STATUS_SYNC_CHANGED |
  5565. MAC_STATUS_CFG_CHANGED |
  5566. MAC_STATUS_MI_COMPLETION |
  5567. MAC_STATUS_LNKSTATE_CHANGED));
  5568. udelay(40);
  5569. } else
  5570. tg3_setup_phy(tp, 0);
  5571. spin_unlock(&tp->lock);
  5572. }
  5573. }
  5574. }
  5575. static int tg3_rx_prodring_xfer(struct tg3 *tp,
  5576. struct tg3_rx_prodring_set *dpr,
  5577. struct tg3_rx_prodring_set *spr)
  5578. {
  5579. u32 si, di, cpycnt, src_prod_idx;
  5580. int i, err = 0;
  5581. while (1) {
  5582. src_prod_idx = spr->rx_std_prod_idx;
  5583. /* Make sure updates to the rx_std_buffers[] entries and the
  5584. * standard producer index are seen in the correct order.
  5585. */
  5586. smp_rmb();
  5587. if (spr->rx_std_cons_idx == src_prod_idx)
  5588. break;
  5589. if (spr->rx_std_cons_idx < src_prod_idx)
  5590. cpycnt = src_prod_idx - spr->rx_std_cons_idx;
  5591. else
  5592. cpycnt = tp->rx_std_ring_mask + 1 -
  5593. spr->rx_std_cons_idx;
  5594. cpycnt = min(cpycnt,
  5595. tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
  5596. si = spr->rx_std_cons_idx;
  5597. di = dpr->rx_std_prod_idx;
  5598. for (i = di; i < di + cpycnt; i++) {
  5599. if (dpr->rx_std_buffers[i].data) {
  5600. cpycnt = i - di;
  5601. err = -ENOSPC;
  5602. break;
  5603. }
  5604. }
  5605. if (!cpycnt)
  5606. break;
  5607. /* Ensure that updates to the rx_std_buffers ring and the
  5608. * shadowed hardware producer ring from tg3_recycle_skb() are
  5609. * ordered correctly WRT the skb check above.
  5610. */
  5611. smp_rmb();
  5612. memcpy(&dpr->rx_std_buffers[di],
  5613. &spr->rx_std_buffers[si],
  5614. cpycnt * sizeof(struct ring_info));
  5615. for (i = 0; i < cpycnt; i++, di++, si++) {
  5616. struct tg3_rx_buffer_desc *sbd, *dbd;
  5617. sbd = &spr->rx_std[si];
  5618. dbd = &dpr->rx_std[di];
  5619. dbd->addr_hi = sbd->addr_hi;
  5620. dbd->addr_lo = sbd->addr_lo;
  5621. }
  5622. spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
  5623. tp->rx_std_ring_mask;
  5624. dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
  5625. tp->rx_std_ring_mask;
  5626. }
  5627. while (1) {
  5628. src_prod_idx = spr->rx_jmb_prod_idx;
  5629. /* Make sure updates to the rx_jmb_buffers[] entries and
  5630. * the jumbo producer index are seen in the correct order.
  5631. */
  5632. smp_rmb();
  5633. if (spr->rx_jmb_cons_idx == src_prod_idx)
  5634. break;
  5635. if (spr->rx_jmb_cons_idx < src_prod_idx)
  5636. cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
  5637. else
  5638. cpycnt = tp->rx_jmb_ring_mask + 1 -
  5639. spr->rx_jmb_cons_idx;
  5640. cpycnt = min(cpycnt,
  5641. tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
  5642. si = spr->rx_jmb_cons_idx;
  5643. di = dpr->rx_jmb_prod_idx;
  5644. for (i = di; i < di + cpycnt; i++) {
  5645. if (dpr->rx_jmb_buffers[i].data) {
  5646. cpycnt = i - di;
  5647. err = -ENOSPC;
  5648. break;
  5649. }
  5650. }
  5651. if (!cpycnt)
  5652. break;
  5653. /* Ensure that updates to the rx_jmb_buffers ring and the
  5654. * shadowed hardware producer ring from tg3_recycle_skb() are
  5655. * ordered correctly WRT the skb check above.
  5656. */
  5657. smp_rmb();
  5658. memcpy(&dpr->rx_jmb_buffers[di],
  5659. &spr->rx_jmb_buffers[si],
  5660. cpycnt * sizeof(struct ring_info));
  5661. for (i = 0; i < cpycnt; i++, di++, si++) {
  5662. struct tg3_rx_buffer_desc *sbd, *dbd;
  5663. sbd = &spr->rx_jmb[si].std;
  5664. dbd = &dpr->rx_jmb[di].std;
  5665. dbd->addr_hi = sbd->addr_hi;
  5666. dbd->addr_lo = sbd->addr_lo;
  5667. }
  5668. spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
  5669. tp->rx_jmb_ring_mask;
  5670. dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
  5671. tp->rx_jmb_ring_mask;
  5672. }
  5673. return err;
  5674. }
  5675. static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
  5676. {
  5677. struct tg3 *tp = tnapi->tp;
  5678. /* run TX completion thread */
  5679. if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
  5680. tg3_tx(tnapi);
  5681. if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
  5682. return work_done;
  5683. }
  5684. if (!tnapi->rx_rcb_prod_idx)
  5685. return work_done;
  5686. /* run RX thread, within the bounds set by NAPI.
  5687. * All RX "locking" is done by ensuring outside
  5688. * code synchronizes with tg3->napi.poll()
  5689. */
  5690. if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
  5691. work_done += tg3_rx(tnapi, budget - work_done);
  5692. if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
  5693. struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
  5694. int i, err = 0;
  5695. u32 std_prod_idx = dpr->rx_std_prod_idx;
  5696. u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
  5697. tp->rx_refill = false;
  5698. for (i = 1; i <= tp->rxq_cnt; i++)
  5699. err |= tg3_rx_prodring_xfer(tp, dpr,
  5700. &tp->napi[i].prodring);
  5701. wmb();
  5702. if (std_prod_idx != dpr->rx_std_prod_idx)
  5703. tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
  5704. dpr->rx_std_prod_idx);
  5705. if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
  5706. tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
  5707. dpr->rx_jmb_prod_idx);
  5708. mmiowb();
  5709. if (err)
  5710. tw32_f(HOSTCC_MODE, tp->coal_now);
  5711. }
  5712. return work_done;
  5713. }
  5714. static inline void tg3_reset_task_schedule(struct tg3 *tp)
  5715. {
  5716. if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
  5717. schedule_work(&tp->reset_task);
  5718. }
  5719. static inline void tg3_reset_task_cancel(struct tg3 *tp)
  5720. {
  5721. cancel_work_sync(&tp->reset_task);
  5722. tg3_flag_clear(tp, RESET_TASK_PENDING);
  5723. tg3_flag_clear(tp, TX_RECOVERY_PENDING);
  5724. }
  5725. static int tg3_poll_msix(struct napi_struct *napi, int budget)
  5726. {
  5727. struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
  5728. struct tg3 *tp = tnapi->tp;
  5729. int work_done = 0;
  5730. struct tg3_hw_status *sblk = tnapi->hw_status;
  5731. while (1) {
  5732. work_done = tg3_poll_work(tnapi, work_done, budget);
  5733. if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
  5734. goto tx_recovery;
  5735. if (unlikely(work_done >= budget))
  5736. break;
  5737. /* tp->last_tag is used in tg3_int_reenable() below
  5738. * to tell the hw how much work has been processed,
  5739. * so we must read it before checking for more work.
  5740. */
  5741. tnapi->last_tag = sblk->status_tag;
  5742. tnapi->last_irq_tag = tnapi->last_tag;
  5743. rmb();
  5744. /* check for RX/TX work to do */
  5745. if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
  5746. *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
  5747. /* This test here is not race free, but will reduce
  5748. * the number of interrupts by looping again.
  5749. */
  5750. if (tnapi == &tp->napi[1] && tp->rx_refill)
  5751. continue;
  5752. napi_complete(napi);
  5753. /* Reenable interrupts. */
  5754. tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
  5755. /* This test here is synchronized by napi_schedule()
  5756. * and napi_complete() to close the race condition.
  5757. */
  5758. if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
  5759. tw32(HOSTCC_MODE, tp->coalesce_mode |
  5760. HOSTCC_MODE_ENABLE |
  5761. tnapi->coal_now);
  5762. }
  5763. mmiowb();
  5764. break;
  5765. }
  5766. }
  5767. return work_done;
  5768. tx_recovery:
  5769. /* work_done is guaranteed to be less than budget. */
  5770. napi_complete(napi);
  5771. tg3_reset_task_schedule(tp);
  5772. return work_done;
  5773. }
  5774. static void tg3_process_error(struct tg3 *tp)
  5775. {
  5776. u32 val;
  5777. bool real_error = false;
  5778. if (tg3_flag(tp, ERROR_PROCESSED))
  5779. return;
  5780. /* Check Flow Attention register */
  5781. val = tr32(HOSTCC_FLOW_ATTN);
  5782. if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
  5783. netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
  5784. real_error = true;
  5785. }
  5786. if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
  5787. netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
  5788. real_error = true;
  5789. }
  5790. if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
  5791. netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
  5792. real_error = true;
  5793. }
  5794. if (!real_error)
  5795. return;
  5796. tg3_dump_state(tp);
  5797. tg3_flag_set(tp, ERROR_PROCESSED);
  5798. tg3_reset_task_schedule(tp);
  5799. }
  5800. static int tg3_poll(struct napi_struct *napi, int budget)
  5801. {
  5802. struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
  5803. struct tg3 *tp = tnapi->tp;
  5804. int work_done = 0;
  5805. struct tg3_hw_status *sblk = tnapi->hw_status;
  5806. while (1) {
  5807. if (sblk->status & SD_STATUS_ERROR)
  5808. tg3_process_error(tp);
  5809. tg3_poll_link(tp);
  5810. work_done = tg3_poll_work(tnapi, work_done, budget);
  5811. if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
  5812. goto tx_recovery;
  5813. if (unlikely(work_done >= budget))
  5814. break;
  5815. if (tg3_flag(tp, TAGGED_STATUS)) {
  5816. /* tp->last_tag is used in tg3_int_reenable() below
  5817. * to tell the hw how much work has been processed,
  5818. * so we must read it before checking for more work.
  5819. */
  5820. tnapi->last_tag = sblk->status_tag;
  5821. tnapi->last_irq_tag = tnapi->last_tag;
  5822. rmb();
  5823. } else
  5824. sblk->status &= ~SD_STATUS_UPDATED;
  5825. if (likely(!tg3_has_work(tnapi))) {
  5826. napi_complete(napi);
  5827. tg3_int_reenable(tnapi);
  5828. break;
  5829. }
  5830. }
  5831. return work_done;
  5832. tx_recovery:
  5833. /* work_done is guaranteed to be less than budget. */
  5834. napi_complete(napi);
  5835. tg3_reset_task_schedule(tp);
  5836. return work_done;
  5837. }
  5838. static void tg3_napi_disable(struct tg3 *tp)
  5839. {
  5840. int i;
  5841. for (i = tp->irq_cnt - 1; i >= 0; i--)
  5842. napi_disable(&tp->napi[i].napi);
  5843. }
  5844. static void tg3_napi_enable(struct tg3 *tp)
  5845. {
  5846. int i;
  5847. for (i = 0; i < tp->irq_cnt; i++)
  5848. napi_enable(&tp->napi[i].napi);
  5849. }
  5850. static void tg3_napi_init(struct tg3 *tp)
  5851. {
  5852. int i;
  5853. netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
  5854. for (i = 1; i < tp->irq_cnt; i++)
  5855. netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
  5856. }
  5857. static void tg3_napi_fini(struct tg3 *tp)
  5858. {
  5859. int i;
  5860. for (i = 0; i < tp->irq_cnt; i++)
  5861. netif_napi_del(&tp->napi[i].napi);
  5862. }
  5863. static inline void tg3_netif_stop(struct tg3 *tp)
  5864. {
  5865. tp->dev->trans_start = jiffies; /* prevent tx timeout */
  5866. tg3_napi_disable(tp);
  5867. netif_carrier_off(tp->dev);
  5868. netif_tx_disable(tp->dev);
  5869. }
  5870. /* tp->lock must be held */
  5871. static inline void tg3_netif_start(struct tg3 *tp)
  5872. {
  5873. tg3_ptp_resume(tp);
  5874. /* NOTE: unconditional netif_tx_wake_all_queues is only
  5875. * appropriate so long as all callers are assured to
  5876. * have free tx slots (such as after tg3_init_hw)
  5877. */
  5878. netif_tx_wake_all_queues(tp->dev);
  5879. if (tp->link_up)
  5880. netif_carrier_on(tp->dev);
  5881. tg3_napi_enable(tp);
  5882. tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
  5883. tg3_enable_ints(tp);
  5884. }
  5885. static void tg3_irq_quiesce(struct tg3 *tp)
  5886. {
  5887. int i;
  5888. BUG_ON(tp->irq_sync);
  5889. tp->irq_sync = 1;
  5890. smp_mb();
  5891. for (i = 0; i < tp->irq_cnt; i++)
  5892. synchronize_irq(tp->napi[i].irq_vec);
  5893. }
  5894. /* Fully shutdown all tg3 driver activity elsewhere in the system.
  5895. * If irq_sync is non-zero, then the IRQ handler must be synchronized
  5896. * with as well. Most of the time, this is not necessary except when
  5897. * shutting down the device.
  5898. */
  5899. static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
  5900. {
  5901. spin_lock_bh(&tp->lock);
  5902. if (irq_sync)
  5903. tg3_irq_quiesce(tp);
  5904. }
  5905. static inline void tg3_full_unlock(struct tg3 *tp)
  5906. {
  5907. spin_unlock_bh(&tp->lock);
  5908. }
  5909. /* One-shot MSI handler - Chip automatically disables interrupt
  5910. * after sending MSI so driver doesn't have to do it.
  5911. */
  5912. static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
  5913. {
  5914. struct tg3_napi *tnapi = dev_id;
  5915. struct tg3 *tp = tnapi->tp;
  5916. prefetch(tnapi->hw_status);
  5917. if (tnapi->rx_rcb)
  5918. prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
  5919. if (likely(!tg3_irq_sync(tp)))
  5920. napi_schedule(&tnapi->napi);
  5921. return IRQ_HANDLED;
  5922. }
  5923. /* MSI ISR - No need to check for interrupt sharing and no need to
  5924. * flush status block and interrupt mailbox. PCI ordering rules
  5925. * guarantee that MSI will arrive after the status block.
  5926. */
  5927. static irqreturn_t tg3_msi(int irq, void *dev_id)
  5928. {
  5929. struct tg3_napi *tnapi = dev_id;
  5930. struct tg3 *tp = tnapi->tp;
  5931. prefetch(tnapi->hw_status);
  5932. if (tnapi->rx_rcb)
  5933. prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
  5934. /*
  5935. * Writing any value to intr-mbox-0 clears PCI INTA# and
  5936. * chip-internal interrupt pending events.
  5937. * Writing non-zero to intr-mbox-0 additional tells the
  5938. * NIC to stop sending us irqs, engaging "in-intr-handler"
  5939. * event coalescing.
  5940. */
  5941. tw32_mailbox(tnapi->int_mbox, 0x00000001);
  5942. if (likely(!tg3_irq_sync(tp)))
  5943. napi_schedule(&tnapi->napi);
  5944. return IRQ_RETVAL(1);
  5945. }
  5946. static irqreturn_t tg3_interrupt(int irq, void *dev_id)
  5947. {
  5948. struct tg3_napi *tnapi = dev_id;
  5949. struct tg3 *tp = tnapi->tp;
  5950. struct tg3_hw_status *sblk = tnapi->hw_status;
  5951. unsigned int handled = 1;
  5952. /* In INTx mode, it is possible for the interrupt to arrive at
  5953. * the CPU before the status block posted prior to the interrupt.
  5954. * Reading the PCI State register will confirm whether the
  5955. * interrupt is ours and will flush the status block.
  5956. */
  5957. if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
  5958. if (tg3_flag(tp, CHIP_RESETTING) ||
  5959. (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
  5960. handled = 0;
  5961. goto out;
  5962. }
  5963. }
  5964. /*
  5965. * Writing any value to intr-mbox-0 clears PCI INTA# and
  5966. * chip-internal interrupt pending events.
  5967. * Writing non-zero to intr-mbox-0 additional tells the
  5968. * NIC to stop sending us irqs, engaging "in-intr-handler"
  5969. * event coalescing.
  5970. *
  5971. * Flush the mailbox to de-assert the IRQ immediately to prevent
  5972. * spurious interrupts. The flush impacts performance but
  5973. * excessive spurious interrupts can be worse in some cases.
  5974. */
  5975. tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
  5976. if (tg3_irq_sync(tp))
  5977. goto out;
  5978. sblk->status &= ~SD_STATUS_UPDATED;
  5979. if (likely(tg3_has_work(tnapi))) {
  5980. prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
  5981. napi_schedule(&tnapi->napi);
  5982. } else {
  5983. /* No work, shared interrupt perhaps? re-enable
  5984. * interrupts, and flush that PCI write
  5985. */
  5986. tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
  5987. 0x00000000);
  5988. }
  5989. out:
  5990. return IRQ_RETVAL(handled);
  5991. }
  5992. static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
  5993. {
  5994. struct tg3_napi *tnapi = dev_id;
  5995. struct tg3 *tp = tnapi->tp;
  5996. struct tg3_hw_status *sblk = tnapi->hw_status;
  5997. unsigned int handled = 1;
  5998. /* In INTx mode, it is possible for the interrupt to arrive at
  5999. * the CPU before the status block posted prior to the interrupt.
  6000. * Reading the PCI State register will confirm whether the
  6001. * interrupt is ours and will flush the status block.
  6002. */
  6003. if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
  6004. if (tg3_flag(tp, CHIP_RESETTING) ||
  6005. (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
  6006. handled = 0;
  6007. goto out;
  6008. }
  6009. }
  6010. /*
  6011. * writing any value to intr-mbox-0 clears PCI INTA# and
  6012. * chip-internal interrupt pending events.
  6013. * writing non-zero to intr-mbox-0 additional tells the
  6014. * NIC to stop sending us irqs, engaging "in-intr-handler"
  6015. * event coalescing.
  6016. *
  6017. * Flush the mailbox to de-assert the IRQ immediately to prevent
  6018. * spurious interrupts. The flush impacts performance but
  6019. * excessive spurious interrupts can be worse in some cases.
  6020. */
  6021. tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
  6022. /*
  6023. * In a shared interrupt configuration, sometimes other devices'
  6024. * interrupts will scream. We record the current status tag here
  6025. * so that the above check can report that the screaming interrupts
  6026. * are unhandled. Eventually they will be silenced.
  6027. */
  6028. tnapi->last_irq_tag = sblk->status_tag;
  6029. if (tg3_irq_sync(tp))
  6030. goto out;
  6031. prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
  6032. napi_schedule(&tnapi->napi);
  6033. out:
  6034. return IRQ_RETVAL(handled);
  6035. }
  6036. /* ISR for interrupt test */
  6037. static irqreturn_t tg3_test_isr(int irq, void *dev_id)
  6038. {
  6039. struct tg3_napi *tnapi = dev_id;
  6040. struct tg3 *tp = tnapi->tp;
  6041. struct tg3_hw_status *sblk = tnapi->hw_status;
  6042. if ((sblk->status & SD_STATUS_UPDATED) ||
  6043. !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
  6044. tg3_disable_ints(tp);
  6045. return IRQ_RETVAL(1);
  6046. }
  6047. return IRQ_RETVAL(0);
  6048. }
  6049. #ifdef CONFIG_NET_POLL_CONTROLLER
  6050. static void tg3_poll_controller(struct net_device *dev)
  6051. {
  6052. int i;
  6053. struct tg3 *tp = netdev_priv(dev);
  6054. if (tg3_irq_sync(tp))
  6055. return;
  6056. for (i = 0; i < tp->irq_cnt; i++)
  6057. tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
  6058. }
  6059. #endif
  6060. static void tg3_tx_timeout(struct net_device *dev)
  6061. {
  6062. struct tg3 *tp = netdev_priv(dev);
  6063. if (netif_msg_tx_err(tp)) {
  6064. netdev_err(dev, "transmit timed out, resetting\n");
  6065. tg3_dump_state(tp);
  6066. }
  6067. tg3_reset_task_schedule(tp);
  6068. }
  6069. /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
  6070. static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
  6071. {
  6072. u32 base = (u32) mapping & 0xffffffff;
  6073. return (base > 0xffffdcc0) && (base + len + 8 < base);
  6074. }
  6075. /* Test for DMA addresses > 40-bit */
  6076. static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
  6077. int len)
  6078. {
  6079. #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
  6080. if (tg3_flag(tp, 40BIT_DMA_BUG))
  6081. return ((u64) mapping + len) > DMA_BIT_MASK(40);
  6082. return 0;
  6083. #else
  6084. return 0;
  6085. #endif
  6086. }
  6087. static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
  6088. dma_addr_t mapping, u32 len, u32 flags,
  6089. u32 mss, u32 vlan)
  6090. {
  6091. txbd->addr_hi = ((u64) mapping >> 32);
  6092. txbd->addr_lo = ((u64) mapping & 0xffffffff);
  6093. txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
  6094. txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
  6095. }
  6096. static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
  6097. dma_addr_t map, u32 len, u32 flags,
  6098. u32 mss, u32 vlan)
  6099. {
  6100. struct tg3 *tp = tnapi->tp;
  6101. bool hwbug = false;
  6102. if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
  6103. hwbug = true;
  6104. if (tg3_4g_overflow_test(map, len))
  6105. hwbug = true;
  6106. if (tg3_40bit_overflow_test(tp, map, len))
  6107. hwbug = true;
  6108. if (tp->dma_limit) {
  6109. u32 prvidx = *entry;
  6110. u32 tmp_flag = flags & ~TXD_FLAG_END;
  6111. while (len > tp->dma_limit && *budget) {
  6112. u32 frag_len = tp->dma_limit;
  6113. len -= tp->dma_limit;
  6114. /* Avoid the 8byte DMA problem */
  6115. if (len <= 8) {
  6116. len += tp->dma_limit / 2;
  6117. frag_len = tp->dma_limit / 2;
  6118. }
  6119. tnapi->tx_buffers[*entry].fragmented = true;
  6120. tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
  6121. frag_len, tmp_flag, mss, vlan);
  6122. *budget -= 1;
  6123. prvidx = *entry;
  6124. *entry = NEXT_TX(*entry);
  6125. map += frag_len;
  6126. }
  6127. if (len) {
  6128. if (*budget) {
  6129. tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
  6130. len, flags, mss, vlan);
  6131. *budget -= 1;
  6132. *entry = NEXT_TX(*entry);
  6133. } else {
  6134. hwbug = true;
  6135. tnapi->tx_buffers[prvidx].fragmented = false;
  6136. }
  6137. }
  6138. } else {
  6139. tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
  6140. len, flags, mss, vlan);
  6141. *entry = NEXT_TX(*entry);
  6142. }
  6143. return hwbug;
  6144. }
  6145. static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
  6146. {
  6147. int i;
  6148. struct sk_buff *skb;
  6149. struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
  6150. skb = txb->skb;
  6151. txb->skb = NULL;
  6152. pci_unmap_single(tnapi->tp->pdev,
  6153. dma_unmap_addr(txb, mapping),
  6154. skb_headlen(skb),
  6155. PCI_DMA_TODEVICE);
  6156. while (txb->fragmented) {
  6157. txb->fragmented = false;
  6158. entry = NEXT_TX(entry);
  6159. txb = &tnapi->tx_buffers[entry];
  6160. }
  6161. for (i = 0; i <= last; i++) {
  6162. const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  6163. entry = NEXT_TX(entry);
  6164. txb = &tnapi->tx_buffers[entry];
  6165. pci_unmap_page(tnapi->tp->pdev,
  6166. dma_unmap_addr(txb, mapping),
  6167. skb_frag_size(frag), PCI_DMA_TODEVICE);
  6168. while (txb->fragmented) {
  6169. txb->fragmented = false;
  6170. entry = NEXT_TX(entry);
  6171. txb = &tnapi->tx_buffers[entry];
  6172. }
  6173. }
  6174. }
  6175. /* Workaround 4GB and 40-bit hardware DMA bugs. */
  6176. static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
  6177. struct sk_buff **pskb,
  6178. u32 *entry, u32 *budget,
  6179. u32 base_flags, u32 mss, u32 vlan)
  6180. {
  6181. struct tg3 *tp = tnapi->tp;
  6182. struct sk_buff *new_skb, *skb = *pskb;
  6183. dma_addr_t new_addr = 0;
  6184. int ret = 0;
  6185. if (tg3_asic_rev(tp) != ASIC_REV_5701)
  6186. new_skb = skb_copy(skb, GFP_ATOMIC);
  6187. else {
  6188. int more_headroom = 4 - ((unsigned long)skb->data & 3);
  6189. new_skb = skb_copy_expand(skb,
  6190. skb_headroom(skb) + more_headroom,
  6191. skb_tailroom(skb), GFP_ATOMIC);
  6192. }
  6193. if (!new_skb) {
  6194. ret = -1;
  6195. } else {
  6196. /* New SKB is guaranteed to be linear. */
  6197. new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
  6198. PCI_DMA_TODEVICE);
  6199. /* Make sure the mapping succeeded */
  6200. if (pci_dma_mapping_error(tp->pdev, new_addr)) {
  6201. dev_kfree_skb(new_skb);
  6202. ret = -1;
  6203. } else {
  6204. u32 save_entry = *entry;
  6205. base_flags |= TXD_FLAG_END;
  6206. tnapi->tx_buffers[*entry].skb = new_skb;
  6207. dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
  6208. mapping, new_addr);
  6209. if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
  6210. new_skb->len, base_flags,
  6211. mss, vlan)) {
  6212. tg3_tx_skb_unmap(tnapi, save_entry, -1);
  6213. dev_kfree_skb(new_skb);
  6214. ret = -1;
  6215. }
  6216. }
  6217. }
  6218. dev_kfree_skb(skb);
  6219. *pskb = new_skb;
  6220. return ret;
  6221. }
  6222. static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
  6223. /* Use GSO to workaround a rare TSO bug that may be triggered when the
  6224. * TSO header is greater than 80 bytes.
  6225. */
  6226. static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
  6227. {
  6228. struct sk_buff *segs, *nskb;
  6229. u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
  6230. /* Estimate the number of fragments in the worst case */
  6231. if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
  6232. netif_stop_queue(tp->dev);
  6233. /* netif_tx_stop_queue() must be done before checking
  6234. * checking tx index in tg3_tx_avail() below, because in
  6235. * tg3_tx(), we update tx index before checking for
  6236. * netif_tx_queue_stopped().
  6237. */
  6238. smp_mb();
  6239. if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
  6240. return NETDEV_TX_BUSY;
  6241. netif_wake_queue(tp->dev);
  6242. }
  6243. segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
  6244. if (IS_ERR(segs))
  6245. goto tg3_tso_bug_end;
  6246. do {
  6247. nskb = segs;
  6248. segs = segs->next;
  6249. nskb->next = NULL;
  6250. tg3_start_xmit(nskb, tp->dev);
  6251. } while (segs);
  6252. tg3_tso_bug_end:
  6253. dev_kfree_skb(skb);
  6254. return NETDEV_TX_OK;
  6255. }
  6256. /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
  6257. * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
  6258. */
  6259. static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
  6260. {
  6261. struct tg3 *tp = netdev_priv(dev);
  6262. u32 len, entry, base_flags, mss, vlan = 0;
  6263. u32 budget;
  6264. int i = -1, would_hit_hwbug;
  6265. dma_addr_t mapping;
  6266. struct tg3_napi *tnapi;
  6267. struct netdev_queue *txq;
  6268. unsigned int last;
  6269. txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
  6270. tnapi = &tp->napi[skb_get_queue_mapping(skb)];
  6271. if (tg3_flag(tp, ENABLE_TSS))
  6272. tnapi++;
  6273. budget = tg3_tx_avail(tnapi);
  6274. /* We are running in BH disabled context with netif_tx_lock
  6275. * and TX reclaim runs via tp->napi.poll inside of a software
  6276. * interrupt. Furthermore, IRQ processing runs lockless so we have
  6277. * no IRQ context deadlocks to worry about either. Rejoice!
  6278. */
  6279. if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
  6280. if (!netif_tx_queue_stopped(txq)) {
  6281. netif_tx_stop_queue(txq);
  6282. /* This is a hard error, log it. */
  6283. netdev_err(dev,
  6284. "BUG! Tx Ring full when queue awake!\n");
  6285. }
  6286. return NETDEV_TX_BUSY;
  6287. }
  6288. entry = tnapi->tx_prod;
  6289. base_flags = 0;
  6290. if (skb->ip_summed == CHECKSUM_PARTIAL)
  6291. base_flags |= TXD_FLAG_TCPUDP_CSUM;
  6292. mss = skb_shinfo(skb)->gso_size;
  6293. if (mss) {
  6294. struct iphdr *iph;
  6295. u32 tcp_opt_len, hdr_len;
  6296. if (skb_header_cloned(skb) &&
  6297. pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
  6298. goto drop;
  6299. iph = ip_hdr(skb);
  6300. tcp_opt_len = tcp_optlen(skb);
  6301. hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
  6302. if (!skb_is_gso_v6(skb)) {
  6303. iph->check = 0;
  6304. iph->tot_len = htons(mss + hdr_len);
  6305. }
  6306. if (unlikely((ETH_HLEN + hdr_len) > 80) &&
  6307. tg3_flag(tp, TSO_BUG))
  6308. return tg3_tso_bug(tp, skb);
  6309. base_flags |= (TXD_FLAG_CPU_PRE_DMA |
  6310. TXD_FLAG_CPU_POST_DMA);
  6311. if (tg3_flag(tp, HW_TSO_1) ||
  6312. tg3_flag(tp, HW_TSO_2) ||
  6313. tg3_flag(tp, HW_TSO_3)) {
  6314. tcp_hdr(skb)->check = 0;
  6315. base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
  6316. } else
  6317. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  6318. iph->daddr, 0,
  6319. IPPROTO_TCP,
  6320. 0);
  6321. if (tg3_flag(tp, HW_TSO_3)) {
  6322. mss |= (hdr_len & 0xc) << 12;
  6323. if (hdr_len & 0x10)
  6324. base_flags |= 0x00000010;
  6325. base_flags |= (hdr_len & 0x3e0) << 5;
  6326. } else if (tg3_flag(tp, HW_TSO_2))
  6327. mss |= hdr_len << 9;
  6328. else if (tg3_flag(tp, HW_TSO_1) ||
  6329. tg3_asic_rev(tp) == ASIC_REV_5705) {
  6330. if (tcp_opt_len || iph->ihl > 5) {
  6331. int tsflags;
  6332. tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
  6333. mss |= (tsflags << 11);
  6334. }
  6335. } else {
  6336. if (tcp_opt_len || iph->ihl > 5) {
  6337. int tsflags;
  6338. tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
  6339. base_flags |= tsflags << 12;
  6340. }
  6341. }
  6342. }
  6343. if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
  6344. !mss && skb->len > VLAN_ETH_FRAME_LEN)
  6345. base_flags |= TXD_FLAG_JMB_PKT;
  6346. if (vlan_tx_tag_present(skb)) {
  6347. base_flags |= TXD_FLAG_VLAN;
  6348. vlan = vlan_tx_tag_get(skb);
  6349. }
  6350. if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
  6351. tg3_flag(tp, TX_TSTAMP_EN)) {
  6352. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  6353. base_flags |= TXD_FLAG_HWTSTAMP;
  6354. }
  6355. len = skb_headlen(skb);
  6356. mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
  6357. if (pci_dma_mapping_error(tp->pdev, mapping))
  6358. goto drop;
  6359. tnapi->tx_buffers[entry].skb = skb;
  6360. dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
  6361. would_hit_hwbug = 0;
  6362. if (tg3_flag(tp, 5701_DMA_BUG))
  6363. would_hit_hwbug = 1;
  6364. if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
  6365. ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
  6366. mss, vlan)) {
  6367. would_hit_hwbug = 1;
  6368. } else if (skb_shinfo(skb)->nr_frags > 0) {
  6369. u32 tmp_mss = mss;
  6370. if (!tg3_flag(tp, HW_TSO_1) &&
  6371. !tg3_flag(tp, HW_TSO_2) &&
  6372. !tg3_flag(tp, HW_TSO_3))
  6373. tmp_mss = 0;
  6374. /* Now loop through additional data
  6375. * fragments, and queue them.
  6376. */
  6377. last = skb_shinfo(skb)->nr_frags - 1;
  6378. for (i = 0; i <= last; i++) {
  6379. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  6380. len = skb_frag_size(frag);
  6381. mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
  6382. len, DMA_TO_DEVICE);
  6383. tnapi->tx_buffers[entry].skb = NULL;
  6384. dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
  6385. mapping);
  6386. if (dma_mapping_error(&tp->pdev->dev, mapping))
  6387. goto dma_error;
  6388. if (!budget ||
  6389. tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
  6390. len, base_flags |
  6391. ((i == last) ? TXD_FLAG_END : 0),
  6392. tmp_mss, vlan)) {
  6393. would_hit_hwbug = 1;
  6394. break;
  6395. }
  6396. }
  6397. }
  6398. if (would_hit_hwbug) {
  6399. tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
  6400. /* If the workaround fails due to memory/mapping
  6401. * failure, silently drop this packet.
  6402. */
  6403. entry = tnapi->tx_prod;
  6404. budget = tg3_tx_avail(tnapi);
  6405. if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
  6406. base_flags, mss, vlan))
  6407. goto drop_nofree;
  6408. }
  6409. skb_tx_timestamp(skb);
  6410. netdev_tx_sent_queue(txq, skb->len);
  6411. /* Sync BD data before updating mailbox */
  6412. wmb();
  6413. /* Packets are ready, update Tx producer idx local and on card. */
  6414. tw32_tx_mbox(tnapi->prodmbox, entry);
  6415. tnapi->tx_prod = entry;
  6416. if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
  6417. netif_tx_stop_queue(txq);
  6418. /* netif_tx_stop_queue() must be done before checking
  6419. * checking tx index in tg3_tx_avail() below, because in
  6420. * tg3_tx(), we update tx index before checking for
  6421. * netif_tx_queue_stopped().
  6422. */
  6423. smp_mb();
  6424. if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
  6425. netif_tx_wake_queue(txq);
  6426. }
  6427. mmiowb();
  6428. return NETDEV_TX_OK;
  6429. dma_error:
  6430. tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
  6431. tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
  6432. drop:
  6433. dev_kfree_skb(skb);
  6434. drop_nofree:
  6435. tp->tx_dropped++;
  6436. return NETDEV_TX_OK;
  6437. }
  6438. static void tg3_mac_loopback(struct tg3 *tp, bool enable)
  6439. {
  6440. if (enable) {
  6441. tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
  6442. MAC_MODE_PORT_MODE_MASK);
  6443. tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
  6444. if (!tg3_flag(tp, 5705_PLUS))
  6445. tp->mac_mode |= MAC_MODE_LINK_POLARITY;
  6446. if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
  6447. tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
  6448. else
  6449. tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
  6450. } else {
  6451. tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
  6452. if (tg3_flag(tp, 5705_PLUS) ||
  6453. (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
  6454. tg3_asic_rev(tp) == ASIC_REV_5700)
  6455. tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
  6456. }
  6457. tw32(MAC_MODE, tp->mac_mode);
  6458. udelay(40);
  6459. }
  6460. static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
  6461. {
  6462. u32 val, bmcr, mac_mode, ptest = 0;
  6463. tg3_phy_toggle_apd(tp, false);
  6464. tg3_phy_toggle_automdix(tp, 0);
  6465. if (extlpbk && tg3_phy_set_extloopbk(tp))
  6466. return -EIO;
  6467. bmcr = BMCR_FULLDPLX;
  6468. switch (speed) {
  6469. case SPEED_10:
  6470. break;
  6471. case SPEED_100:
  6472. bmcr |= BMCR_SPEED100;
  6473. break;
  6474. case SPEED_1000:
  6475. default:
  6476. if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
  6477. speed = SPEED_100;
  6478. bmcr |= BMCR_SPEED100;
  6479. } else {
  6480. speed = SPEED_1000;
  6481. bmcr |= BMCR_SPEED1000;
  6482. }
  6483. }
  6484. if (extlpbk) {
  6485. if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
  6486. tg3_readphy(tp, MII_CTRL1000, &val);
  6487. val |= CTL1000_AS_MASTER |
  6488. CTL1000_ENABLE_MASTER;
  6489. tg3_writephy(tp, MII_CTRL1000, val);
  6490. } else {
  6491. ptest = MII_TG3_FET_PTEST_TRIM_SEL |
  6492. MII_TG3_FET_PTEST_TRIM_2;
  6493. tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
  6494. }
  6495. } else
  6496. bmcr |= BMCR_LOOPBACK;
  6497. tg3_writephy(tp, MII_BMCR, bmcr);
  6498. /* The write needs to be flushed for the FETs */
  6499. if (tp->phy_flags & TG3_PHYFLG_IS_FET)
  6500. tg3_readphy(tp, MII_BMCR, &bmcr);
  6501. udelay(40);
  6502. if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
  6503. tg3_asic_rev(tp) == ASIC_REV_5785) {
  6504. tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
  6505. MII_TG3_FET_PTEST_FRC_TX_LINK |
  6506. MII_TG3_FET_PTEST_FRC_TX_LOCK);
  6507. /* The write needs to be flushed for the AC131 */
  6508. tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
  6509. }
  6510. /* Reset to prevent losing 1st rx packet intermittently */
  6511. if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
  6512. tg3_flag(tp, 5780_CLASS)) {
  6513. tw32_f(MAC_RX_MODE, RX_MODE_RESET);
  6514. udelay(10);
  6515. tw32_f(MAC_RX_MODE, tp->rx_mode);
  6516. }
  6517. mac_mode = tp->mac_mode &
  6518. ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
  6519. if (speed == SPEED_1000)
  6520. mac_mode |= MAC_MODE_PORT_MODE_GMII;
  6521. else
  6522. mac_mode |= MAC_MODE_PORT_MODE_MII;
  6523. if (tg3_asic_rev(tp) == ASIC_REV_5700) {
  6524. u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
  6525. if (masked_phy_id == TG3_PHY_ID_BCM5401)
  6526. mac_mode &= ~MAC_MODE_LINK_POLARITY;
  6527. else if (masked_phy_id == TG3_PHY_ID_BCM5411)
  6528. mac_mode |= MAC_MODE_LINK_POLARITY;
  6529. tg3_writephy(tp, MII_TG3_EXT_CTRL,
  6530. MII_TG3_EXT_CTRL_LNK3_LED_MODE);
  6531. }
  6532. tw32(MAC_MODE, mac_mode);
  6533. udelay(40);
  6534. return 0;
  6535. }
  6536. static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
  6537. {
  6538. struct tg3 *tp = netdev_priv(dev);
  6539. if (features & NETIF_F_LOOPBACK) {
  6540. if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
  6541. return;
  6542. spin_lock_bh(&tp->lock);
  6543. tg3_mac_loopback(tp, true);
  6544. netif_carrier_on(tp->dev);
  6545. spin_unlock_bh(&tp->lock);
  6546. netdev_info(dev, "Internal MAC loopback mode enabled.\n");
  6547. } else {
  6548. if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
  6549. return;
  6550. spin_lock_bh(&tp->lock);
  6551. tg3_mac_loopback(tp, false);
  6552. /* Force link status check */
  6553. tg3_setup_phy(tp, 1);
  6554. spin_unlock_bh(&tp->lock);
  6555. netdev_info(dev, "Internal MAC loopback mode disabled.\n");
  6556. }
  6557. }
  6558. static netdev_features_t tg3_fix_features(struct net_device *dev,
  6559. netdev_features_t features)
  6560. {
  6561. struct tg3 *tp = netdev_priv(dev);
  6562. if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
  6563. features &= ~NETIF_F_ALL_TSO;
  6564. return features;
  6565. }
  6566. static int tg3_set_features(struct net_device *dev, netdev_features_t features)
  6567. {
  6568. netdev_features_t changed = dev->features ^ features;
  6569. if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
  6570. tg3_set_loopback(dev, features);
  6571. return 0;
  6572. }
  6573. static void tg3_rx_prodring_free(struct tg3 *tp,
  6574. struct tg3_rx_prodring_set *tpr)
  6575. {
  6576. int i;
  6577. if (tpr != &tp->napi[0].prodring) {
  6578. for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
  6579. i = (i + 1) & tp->rx_std_ring_mask)
  6580. tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
  6581. tp->rx_pkt_map_sz);
  6582. if (tg3_flag(tp, JUMBO_CAPABLE)) {
  6583. for (i = tpr->rx_jmb_cons_idx;
  6584. i != tpr->rx_jmb_prod_idx;
  6585. i = (i + 1) & tp->rx_jmb_ring_mask) {
  6586. tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
  6587. TG3_RX_JMB_MAP_SZ);
  6588. }
  6589. }
  6590. return;
  6591. }
  6592. for (i = 0; i <= tp->rx_std_ring_mask; i++)
  6593. tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
  6594. tp->rx_pkt_map_sz);
  6595. if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
  6596. for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
  6597. tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
  6598. TG3_RX_JMB_MAP_SZ);
  6599. }
  6600. }
  6601. /* Initialize rx rings for packet processing.
  6602. *
  6603. * The chip has been shut down and the driver detached from
  6604. * the networking, so no interrupts or new tx packets will
  6605. * end up in the driver. tp->{tx,}lock are held and thus
  6606. * we may not sleep.
  6607. */
  6608. static int tg3_rx_prodring_alloc(struct tg3 *tp,
  6609. struct tg3_rx_prodring_set *tpr)
  6610. {
  6611. u32 i, rx_pkt_dma_sz;
  6612. tpr->rx_std_cons_idx = 0;
  6613. tpr->rx_std_prod_idx = 0;
  6614. tpr->rx_jmb_cons_idx = 0;
  6615. tpr->rx_jmb_prod_idx = 0;
  6616. if (tpr != &tp->napi[0].prodring) {
  6617. memset(&tpr->rx_std_buffers[0], 0,
  6618. TG3_RX_STD_BUFF_RING_SIZE(tp));
  6619. if (tpr->rx_jmb_buffers)
  6620. memset(&tpr->rx_jmb_buffers[0], 0,
  6621. TG3_RX_JMB_BUFF_RING_SIZE(tp));
  6622. goto done;
  6623. }
  6624. /* Zero out all descriptors. */
  6625. memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
  6626. rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
  6627. if (tg3_flag(tp, 5780_CLASS) &&
  6628. tp->dev->mtu > ETH_DATA_LEN)
  6629. rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
  6630. tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
  6631. /* Initialize invariants of the rings, we only set this
  6632. * stuff once. This works because the card does not
  6633. * write into the rx buffer posting rings.
  6634. */
  6635. for (i = 0; i <= tp->rx_std_ring_mask; i++) {
  6636. struct tg3_rx_buffer_desc *rxd;
  6637. rxd = &tpr->rx_std[i];
  6638. rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
  6639. rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
  6640. rxd->opaque = (RXD_OPAQUE_RING_STD |
  6641. (i << RXD_OPAQUE_INDEX_SHIFT));
  6642. }
  6643. /* Now allocate fresh SKBs for each rx ring. */
  6644. for (i = 0; i < tp->rx_pending; i++) {
  6645. unsigned int frag_size;
  6646. if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
  6647. &frag_size) < 0) {
  6648. netdev_warn(tp->dev,
  6649. "Using a smaller RX standard ring. Only "
  6650. "%d out of %d buffers were allocated "
  6651. "successfully\n", i, tp->rx_pending);
  6652. if (i == 0)
  6653. goto initfail;
  6654. tp->rx_pending = i;
  6655. break;
  6656. }
  6657. }
  6658. if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
  6659. goto done;
  6660. memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
  6661. if (!tg3_flag(tp, JUMBO_RING_ENABLE))
  6662. goto done;
  6663. for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
  6664. struct tg3_rx_buffer_desc *rxd;
  6665. rxd = &tpr->rx_jmb[i].std;
  6666. rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
  6667. rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
  6668. RXD_FLAG_JUMBO;
  6669. rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
  6670. (i << RXD_OPAQUE_INDEX_SHIFT));
  6671. }
  6672. for (i = 0; i < tp->rx_jumbo_pending; i++) {
  6673. unsigned int frag_size;
  6674. if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
  6675. &frag_size) < 0) {
  6676. netdev_warn(tp->dev,
  6677. "Using a smaller RX jumbo ring. Only %d "
  6678. "out of %d buffers were allocated "
  6679. "successfully\n", i, tp->rx_jumbo_pending);
  6680. if (i == 0)
  6681. goto initfail;
  6682. tp->rx_jumbo_pending = i;
  6683. break;
  6684. }
  6685. }
  6686. done:
  6687. return 0;
  6688. initfail:
  6689. tg3_rx_prodring_free(tp, tpr);
  6690. return -ENOMEM;
  6691. }
  6692. static void tg3_rx_prodring_fini(struct tg3 *tp,
  6693. struct tg3_rx_prodring_set *tpr)
  6694. {
  6695. kfree(tpr->rx_std_buffers);
  6696. tpr->rx_std_buffers = NULL;
  6697. kfree(tpr->rx_jmb_buffers);
  6698. tpr->rx_jmb_buffers = NULL;
  6699. if (tpr->rx_std) {
  6700. dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
  6701. tpr->rx_std, tpr->rx_std_mapping);
  6702. tpr->rx_std = NULL;
  6703. }
  6704. if (tpr->rx_jmb) {
  6705. dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
  6706. tpr->rx_jmb, tpr->rx_jmb_mapping);
  6707. tpr->rx_jmb = NULL;
  6708. }
  6709. }
  6710. static int tg3_rx_prodring_init(struct tg3 *tp,
  6711. struct tg3_rx_prodring_set *tpr)
  6712. {
  6713. tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
  6714. GFP_KERNEL);
  6715. if (!tpr->rx_std_buffers)
  6716. return -ENOMEM;
  6717. tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
  6718. TG3_RX_STD_RING_BYTES(tp),
  6719. &tpr->rx_std_mapping,
  6720. GFP_KERNEL);
  6721. if (!tpr->rx_std)
  6722. goto err_out;
  6723. if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
  6724. tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
  6725. GFP_KERNEL);
  6726. if (!tpr->rx_jmb_buffers)
  6727. goto err_out;
  6728. tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
  6729. TG3_RX_JMB_RING_BYTES(tp),
  6730. &tpr->rx_jmb_mapping,
  6731. GFP_KERNEL);
  6732. if (!tpr->rx_jmb)
  6733. goto err_out;
  6734. }
  6735. return 0;
  6736. err_out:
  6737. tg3_rx_prodring_fini(tp, tpr);
  6738. return -ENOMEM;
  6739. }
  6740. /* Free up pending packets in all rx/tx rings.
  6741. *
  6742. * The chip has been shut down and the driver detached from
  6743. * the networking, so no interrupts or new tx packets will
  6744. * end up in the driver. tp->{tx,}lock is not held and we are not
  6745. * in an interrupt context and thus may sleep.
  6746. */
  6747. static void tg3_free_rings(struct tg3 *tp)
  6748. {
  6749. int i, j;
  6750. for (j = 0; j < tp->irq_cnt; j++) {
  6751. struct tg3_napi *tnapi = &tp->napi[j];
  6752. tg3_rx_prodring_free(tp, &tnapi->prodring);
  6753. if (!tnapi->tx_buffers)
  6754. continue;
  6755. for (i = 0; i < TG3_TX_RING_SIZE; i++) {
  6756. struct sk_buff *skb = tnapi->tx_buffers[i].skb;
  6757. if (!skb)
  6758. continue;
  6759. tg3_tx_skb_unmap(tnapi, i,
  6760. skb_shinfo(skb)->nr_frags - 1);
  6761. dev_kfree_skb_any(skb);
  6762. }
  6763. netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
  6764. }
  6765. }
  6766. /* Initialize tx/rx rings for packet processing.
  6767. *
  6768. * The chip has been shut down and the driver detached from
  6769. * the networking, so no interrupts or new tx packets will
  6770. * end up in the driver. tp->{tx,}lock are held and thus
  6771. * we may not sleep.
  6772. */
  6773. static int tg3_init_rings(struct tg3 *tp)
  6774. {
  6775. int i;
  6776. /* Free up all the SKBs. */
  6777. tg3_free_rings(tp);
  6778. for (i = 0; i < tp->irq_cnt; i++) {
  6779. struct tg3_napi *tnapi = &tp->napi[i];
  6780. tnapi->last_tag = 0;
  6781. tnapi->last_irq_tag = 0;
  6782. tnapi->hw_status->status = 0;
  6783. tnapi->hw_status->status_tag = 0;
  6784. memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
  6785. tnapi->tx_prod = 0;
  6786. tnapi->tx_cons = 0;
  6787. if (tnapi->tx_ring)
  6788. memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
  6789. tnapi->rx_rcb_ptr = 0;
  6790. if (tnapi->rx_rcb)
  6791. memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
  6792. if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
  6793. tg3_free_rings(tp);
  6794. return -ENOMEM;
  6795. }
  6796. }
  6797. return 0;
  6798. }
  6799. static void tg3_mem_tx_release(struct tg3 *tp)
  6800. {
  6801. int i;
  6802. for (i = 0; i < tp->irq_max; i++) {
  6803. struct tg3_napi *tnapi = &tp->napi[i];
  6804. if (tnapi->tx_ring) {
  6805. dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
  6806. tnapi->tx_ring, tnapi->tx_desc_mapping);
  6807. tnapi->tx_ring = NULL;
  6808. }
  6809. kfree(tnapi->tx_buffers);
  6810. tnapi->tx_buffers = NULL;
  6811. }
  6812. }
  6813. static int tg3_mem_tx_acquire(struct tg3 *tp)
  6814. {
  6815. int i;
  6816. struct tg3_napi *tnapi = &tp->napi[0];
  6817. /* If multivector TSS is enabled, vector 0 does not handle
  6818. * tx interrupts. Don't allocate any resources for it.
  6819. */
  6820. if (tg3_flag(tp, ENABLE_TSS))
  6821. tnapi++;
  6822. for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
  6823. tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
  6824. TG3_TX_RING_SIZE, GFP_KERNEL);
  6825. if (!tnapi->tx_buffers)
  6826. goto err_out;
  6827. tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
  6828. TG3_TX_RING_BYTES,
  6829. &tnapi->tx_desc_mapping,
  6830. GFP_KERNEL);
  6831. if (!tnapi->tx_ring)
  6832. goto err_out;
  6833. }
  6834. return 0;
  6835. err_out:
  6836. tg3_mem_tx_release(tp);
  6837. return -ENOMEM;
  6838. }
  6839. static void tg3_mem_rx_release(struct tg3 *tp)
  6840. {
  6841. int i;
  6842. for (i = 0; i < tp->irq_max; i++) {
  6843. struct tg3_napi *tnapi = &tp->napi[i];
  6844. tg3_rx_prodring_fini(tp, &tnapi->prodring);
  6845. if (!tnapi->rx_rcb)
  6846. continue;
  6847. dma_free_coherent(&tp->pdev->dev,
  6848. TG3_RX_RCB_RING_BYTES(tp),
  6849. tnapi->rx_rcb,
  6850. tnapi->rx_rcb_mapping);
  6851. tnapi->rx_rcb = NULL;
  6852. }
  6853. }
  6854. static int tg3_mem_rx_acquire(struct tg3 *tp)
  6855. {
  6856. unsigned int i, limit;
  6857. limit = tp->rxq_cnt;
  6858. /* If RSS is enabled, we need a (dummy) producer ring
  6859. * set on vector zero. This is the true hw prodring.
  6860. */
  6861. if (tg3_flag(tp, ENABLE_RSS))
  6862. limit++;
  6863. for (i = 0; i < limit; i++) {
  6864. struct tg3_napi *tnapi = &tp->napi[i];
  6865. if (tg3_rx_prodring_init(tp, &tnapi->prodring))
  6866. goto err_out;
  6867. /* If multivector RSS is enabled, vector 0
  6868. * does not handle rx or tx interrupts.
  6869. * Don't allocate any resources for it.
  6870. */
  6871. if (!i && tg3_flag(tp, ENABLE_RSS))
  6872. continue;
  6873. tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
  6874. TG3_RX_RCB_RING_BYTES(tp),
  6875. &tnapi->rx_rcb_mapping,
  6876. GFP_KERNEL | __GFP_ZERO);
  6877. if (!tnapi->rx_rcb)
  6878. goto err_out;
  6879. }
  6880. return 0;
  6881. err_out:
  6882. tg3_mem_rx_release(tp);
  6883. return -ENOMEM;
  6884. }
  6885. /*
  6886. * Must not be invoked with interrupt sources disabled and
  6887. * the hardware shutdown down.
  6888. */
  6889. static void tg3_free_consistent(struct tg3 *tp)
  6890. {
  6891. int i;
  6892. for (i = 0; i < tp->irq_cnt; i++) {
  6893. struct tg3_napi *tnapi = &tp->napi[i];
  6894. if (tnapi->hw_status) {
  6895. dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
  6896. tnapi->hw_status,
  6897. tnapi->status_mapping);
  6898. tnapi->hw_status = NULL;
  6899. }
  6900. }
  6901. tg3_mem_rx_release(tp);
  6902. tg3_mem_tx_release(tp);
  6903. if (tp->hw_stats) {
  6904. dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
  6905. tp->hw_stats, tp->stats_mapping);
  6906. tp->hw_stats = NULL;
  6907. }
  6908. }
  6909. /*
  6910. * Must not be invoked with interrupt sources disabled and
  6911. * the hardware shutdown down. Can sleep.
  6912. */
  6913. static int tg3_alloc_consistent(struct tg3 *tp)
  6914. {
  6915. int i;
  6916. tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
  6917. sizeof(struct tg3_hw_stats),
  6918. &tp->stats_mapping,
  6919. GFP_KERNEL | __GFP_ZERO);
  6920. if (!tp->hw_stats)
  6921. goto err_out;
  6922. for (i = 0; i < tp->irq_cnt; i++) {
  6923. struct tg3_napi *tnapi = &tp->napi[i];
  6924. struct tg3_hw_status *sblk;
  6925. tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
  6926. TG3_HW_STATUS_SIZE,
  6927. &tnapi->status_mapping,
  6928. GFP_KERNEL | __GFP_ZERO);
  6929. if (!tnapi->hw_status)
  6930. goto err_out;
  6931. sblk = tnapi->hw_status;
  6932. if (tg3_flag(tp, ENABLE_RSS)) {
  6933. u16 *prodptr = NULL;
  6934. /*
  6935. * When RSS is enabled, the status block format changes
  6936. * slightly. The "rx_jumbo_consumer", "reserved",
  6937. * and "rx_mini_consumer" members get mapped to the
  6938. * other three rx return ring producer indexes.
  6939. */
  6940. switch (i) {
  6941. case 1:
  6942. prodptr = &sblk->idx[0].rx_producer;
  6943. break;
  6944. case 2:
  6945. prodptr = &sblk->rx_jumbo_consumer;
  6946. break;
  6947. case 3:
  6948. prodptr = &sblk->reserved;
  6949. break;
  6950. case 4:
  6951. prodptr = &sblk->rx_mini_consumer;
  6952. break;
  6953. }
  6954. tnapi->rx_rcb_prod_idx = prodptr;
  6955. } else {
  6956. tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
  6957. }
  6958. }
  6959. if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
  6960. goto err_out;
  6961. return 0;
  6962. err_out:
  6963. tg3_free_consistent(tp);
  6964. return -ENOMEM;
  6965. }
  6966. #define MAX_WAIT_CNT 1000
  6967. /* To stop a block, clear the enable bit and poll till it
  6968. * clears. tp->lock is held.
  6969. */
  6970. static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
  6971. {
  6972. unsigned int i;
  6973. u32 val;
  6974. if (tg3_flag(tp, 5705_PLUS)) {
  6975. switch (ofs) {
  6976. case RCVLSC_MODE:
  6977. case DMAC_MODE:
  6978. case MBFREE_MODE:
  6979. case BUFMGR_MODE:
  6980. case MEMARB_MODE:
  6981. /* We can't enable/disable these bits of the
  6982. * 5705/5750, just say success.
  6983. */
  6984. return 0;
  6985. default:
  6986. break;
  6987. }
  6988. }
  6989. val = tr32(ofs);
  6990. val &= ~enable_bit;
  6991. tw32_f(ofs, val);
  6992. for (i = 0; i < MAX_WAIT_CNT; i++) {
  6993. udelay(100);
  6994. val = tr32(ofs);
  6995. if ((val & enable_bit) == 0)
  6996. break;
  6997. }
  6998. if (i == MAX_WAIT_CNT && !silent) {
  6999. dev_err(&tp->pdev->dev,
  7000. "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
  7001. ofs, enable_bit);
  7002. return -ENODEV;
  7003. }
  7004. return 0;
  7005. }
  7006. /* tp->lock is held. */
  7007. static int tg3_abort_hw(struct tg3 *tp, int silent)
  7008. {
  7009. int i, err;
  7010. tg3_disable_ints(tp);
  7011. tp->rx_mode &= ~RX_MODE_ENABLE;
  7012. tw32_f(MAC_RX_MODE, tp->rx_mode);
  7013. udelay(10);
  7014. err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
  7015. err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
  7016. err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
  7017. err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
  7018. err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
  7019. err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
  7020. err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
  7021. err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
  7022. err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
  7023. err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
  7024. err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
  7025. err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
  7026. err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
  7027. tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
  7028. tw32_f(MAC_MODE, tp->mac_mode);
  7029. udelay(40);
  7030. tp->tx_mode &= ~TX_MODE_ENABLE;
  7031. tw32_f(MAC_TX_MODE, tp->tx_mode);
  7032. for (i = 0; i < MAX_WAIT_CNT; i++) {
  7033. udelay(100);
  7034. if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
  7035. break;
  7036. }
  7037. if (i >= MAX_WAIT_CNT) {
  7038. dev_err(&tp->pdev->dev,
  7039. "%s timed out, TX_MODE_ENABLE will not clear "
  7040. "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
  7041. err |= -ENODEV;
  7042. }
  7043. err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
  7044. err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
  7045. err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
  7046. tw32(FTQ_RESET, 0xffffffff);
  7047. tw32(FTQ_RESET, 0x00000000);
  7048. err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
  7049. err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
  7050. for (i = 0; i < tp->irq_cnt; i++) {
  7051. struct tg3_napi *tnapi = &tp->napi[i];
  7052. if (tnapi->hw_status)
  7053. memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
  7054. }
  7055. return err;
  7056. }
  7057. /* Save PCI command register before chip reset */
  7058. static void tg3_save_pci_state(struct tg3 *tp)
  7059. {
  7060. pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
  7061. }
  7062. /* Restore PCI state after chip reset */
  7063. static void tg3_restore_pci_state(struct tg3 *tp)
  7064. {
  7065. u32 val;
  7066. /* Re-enable indirect register accesses. */
  7067. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  7068. tp->misc_host_ctrl);
  7069. /* Set MAX PCI retry to zero. */
  7070. val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
  7071. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
  7072. tg3_flag(tp, PCIX_MODE))
  7073. val |= PCISTATE_RETRY_SAME_DMA;
  7074. /* Allow reads and writes to the APE register and memory space. */
  7075. if (tg3_flag(tp, ENABLE_APE))
  7076. val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
  7077. PCISTATE_ALLOW_APE_SHMEM_WR |
  7078. PCISTATE_ALLOW_APE_PSPACE_WR;
  7079. pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
  7080. pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
  7081. if (!tg3_flag(tp, PCI_EXPRESS)) {
  7082. pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
  7083. tp->pci_cacheline_sz);
  7084. pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  7085. tp->pci_lat_timer);
  7086. }
  7087. /* Make sure PCI-X relaxed ordering bit is clear. */
  7088. if (tg3_flag(tp, PCIX_MODE)) {
  7089. u16 pcix_cmd;
  7090. pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  7091. &pcix_cmd);
  7092. pcix_cmd &= ~PCI_X_CMD_ERO;
  7093. pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  7094. pcix_cmd);
  7095. }
  7096. if (tg3_flag(tp, 5780_CLASS)) {
  7097. /* Chip reset on 5780 will reset MSI enable bit,
  7098. * so need to restore it.
  7099. */
  7100. if (tg3_flag(tp, USING_MSI)) {
  7101. u16 ctrl;
  7102. pci_read_config_word(tp->pdev,
  7103. tp->msi_cap + PCI_MSI_FLAGS,
  7104. &ctrl);
  7105. pci_write_config_word(tp->pdev,
  7106. tp->msi_cap + PCI_MSI_FLAGS,
  7107. ctrl | PCI_MSI_FLAGS_ENABLE);
  7108. val = tr32(MSGINT_MODE);
  7109. tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
  7110. }
  7111. }
  7112. }
  7113. /* tp->lock is held. */
  7114. static int tg3_chip_reset(struct tg3 *tp)
  7115. {
  7116. u32 val;
  7117. void (*write_op)(struct tg3 *, u32, u32);
  7118. int i, err;
  7119. tg3_nvram_lock(tp);
  7120. tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
  7121. /* No matching tg3_nvram_unlock() after this because
  7122. * chip reset below will undo the nvram lock.
  7123. */
  7124. tp->nvram_lock_cnt = 0;
  7125. /* GRC_MISC_CFG core clock reset will clear the memory
  7126. * enable bit in PCI register 4 and the MSI enable bit
  7127. * on some chips, so we save relevant registers here.
  7128. */
  7129. tg3_save_pci_state(tp);
  7130. if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
  7131. tg3_flag(tp, 5755_PLUS))
  7132. tw32(GRC_FASTBOOT_PC, 0);
  7133. /*
  7134. * We must avoid the readl() that normally takes place.
  7135. * It locks machines, causes machine checks, and other
  7136. * fun things. So, temporarily disable the 5701
  7137. * hardware workaround, while we do the reset.
  7138. */
  7139. write_op = tp->write32;
  7140. if (write_op == tg3_write_flush_reg32)
  7141. tp->write32 = tg3_write32;
  7142. /* Prevent the irq handler from reading or writing PCI registers
  7143. * during chip reset when the memory enable bit in the PCI command
  7144. * register may be cleared. The chip does not generate interrupt
  7145. * at this time, but the irq handler may still be called due to irq
  7146. * sharing or irqpoll.
  7147. */
  7148. tg3_flag_set(tp, CHIP_RESETTING);
  7149. for (i = 0; i < tp->irq_cnt; i++) {
  7150. struct tg3_napi *tnapi = &tp->napi[i];
  7151. if (tnapi->hw_status) {
  7152. tnapi->hw_status->status = 0;
  7153. tnapi->hw_status->status_tag = 0;
  7154. }
  7155. tnapi->last_tag = 0;
  7156. tnapi->last_irq_tag = 0;
  7157. }
  7158. smp_mb();
  7159. for (i = 0; i < tp->irq_cnt; i++)
  7160. synchronize_irq(tp->napi[i].irq_vec);
  7161. if (tg3_asic_rev(tp) == ASIC_REV_57780) {
  7162. val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
  7163. tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
  7164. }
  7165. /* do the reset */
  7166. val = GRC_MISC_CFG_CORECLK_RESET;
  7167. if (tg3_flag(tp, PCI_EXPRESS)) {
  7168. /* Force PCIe 1.0a mode */
  7169. if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
  7170. !tg3_flag(tp, 57765_PLUS) &&
  7171. tr32(TG3_PCIE_PHY_TSTCTL) ==
  7172. (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
  7173. tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
  7174. if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
  7175. tw32(GRC_MISC_CFG, (1 << 29));
  7176. val |= (1 << 29);
  7177. }
  7178. }
  7179. if (tg3_asic_rev(tp) == ASIC_REV_5906) {
  7180. tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
  7181. tw32(GRC_VCPU_EXT_CTRL,
  7182. tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
  7183. }
  7184. /* Manage gphy power for all CPMU absent PCIe devices. */
  7185. if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
  7186. val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
  7187. tw32(GRC_MISC_CFG, val);
  7188. /* restore 5701 hardware bug workaround write method */
  7189. tp->write32 = write_op;
  7190. /* Unfortunately, we have to delay before the PCI read back.
  7191. * Some 575X chips even will not respond to a PCI cfg access
  7192. * when the reset command is given to the chip.
  7193. *
  7194. * How do these hardware designers expect things to work
  7195. * properly if the PCI write is posted for a long period
  7196. * of time? It is always necessary to have some method by
  7197. * which a register read back can occur to push the write
  7198. * out which does the reset.
  7199. *
  7200. * For most tg3 variants the trick below was working.
  7201. * Ho hum...
  7202. */
  7203. udelay(120);
  7204. /* Flush PCI posted writes. The normal MMIO registers
  7205. * are inaccessible at this time so this is the only
  7206. * way to make this reliably (actually, this is no longer
  7207. * the case, see above). I tried to use indirect
  7208. * register read/write but this upset some 5701 variants.
  7209. */
  7210. pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
  7211. udelay(120);
  7212. if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
  7213. u16 val16;
  7214. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
  7215. int j;
  7216. u32 cfg_val;
  7217. /* Wait for link training to complete. */
  7218. for (j = 0; j < 5000; j++)
  7219. udelay(100);
  7220. pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
  7221. pci_write_config_dword(tp->pdev, 0xc4,
  7222. cfg_val | (1 << 15));
  7223. }
  7224. /* Clear the "no snoop" and "relaxed ordering" bits. */
  7225. val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
  7226. /*
  7227. * Older PCIe devices only support the 128 byte
  7228. * MPS setting. Enforce the restriction.
  7229. */
  7230. if (!tg3_flag(tp, CPMU_PRESENT))
  7231. val16 |= PCI_EXP_DEVCTL_PAYLOAD;
  7232. pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
  7233. /* Clear error status */
  7234. pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
  7235. PCI_EXP_DEVSTA_CED |
  7236. PCI_EXP_DEVSTA_NFED |
  7237. PCI_EXP_DEVSTA_FED |
  7238. PCI_EXP_DEVSTA_URD);
  7239. }
  7240. tg3_restore_pci_state(tp);
  7241. tg3_flag_clear(tp, CHIP_RESETTING);
  7242. tg3_flag_clear(tp, ERROR_PROCESSED);
  7243. val = 0;
  7244. if (tg3_flag(tp, 5780_CLASS))
  7245. val = tr32(MEMARB_MODE);
  7246. tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
  7247. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
  7248. tg3_stop_fw(tp);
  7249. tw32(0x5000, 0x400);
  7250. }
  7251. if (tg3_flag(tp, IS_SSB_CORE)) {
  7252. /*
  7253. * BCM4785: In order to avoid repercussions from using
  7254. * potentially defective internal ROM, stop the Rx RISC CPU,
  7255. * which is not required.
  7256. */
  7257. tg3_stop_fw(tp);
  7258. tg3_halt_cpu(tp, RX_CPU_BASE);
  7259. }
  7260. tw32(GRC_MODE, tp->grc_mode);
  7261. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
  7262. val = tr32(0xc4);
  7263. tw32(0xc4, val | (1 << 15));
  7264. }
  7265. if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
  7266. tg3_asic_rev(tp) == ASIC_REV_5705) {
  7267. tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
  7268. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
  7269. tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
  7270. tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
  7271. }
  7272. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  7273. tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
  7274. val = tp->mac_mode;
  7275. } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
  7276. tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
  7277. val = tp->mac_mode;
  7278. } else
  7279. val = 0;
  7280. tw32_f(MAC_MODE, val);
  7281. udelay(40);
  7282. tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
  7283. err = tg3_poll_fw(tp);
  7284. if (err)
  7285. return err;
  7286. tg3_mdio_start(tp);
  7287. if (tg3_flag(tp, PCI_EXPRESS) &&
  7288. tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
  7289. tg3_asic_rev(tp) != ASIC_REV_5785 &&
  7290. !tg3_flag(tp, 57765_PLUS)) {
  7291. val = tr32(0x7c00);
  7292. tw32(0x7c00, val | (1 << 25));
  7293. }
  7294. if (tg3_asic_rev(tp) == ASIC_REV_5720) {
  7295. val = tr32(TG3_CPMU_CLCK_ORIDE);
  7296. tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
  7297. }
  7298. /* Reprobe ASF enable state. */
  7299. tg3_flag_clear(tp, ENABLE_ASF);
  7300. tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
  7301. TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
  7302. tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
  7303. tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
  7304. if (val == NIC_SRAM_DATA_SIG_MAGIC) {
  7305. u32 nic_cfg;
  7306. tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
  7307. if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
  7308. tg3_flag_set(tp, ENABLE_ASF);
  7309. tp->last_event_jiffies = jiffies;
  7310. if (tg3_flag(tp, 5750_PLUS))
  7311. tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
  7312. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
  7313. if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
  7314. tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
  7315. if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
  7316. tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
  7317. }
  7318. }
  7319. return 0;
  7320. }
  7321. static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
  7322. static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
  7323. /* tp->lock is held. */
  7324. static int tg3_halt(struct tg3 *tp, int kind, int silent)
  7325. {
  7326. int err;
  7327. tg3_stop_fw(tp);
  7328. tg3_write_sig_pre_reset(tp, kind);
  7329. tg3_abort_hw(tp, silent);
  7330. err = tg3_chip_reset(tp);
  7331. __tg3_set_mac_addr(tp, 0);
  7332. tg3_write_sig_legacy(tp, kind);
  7333. tg3_write_sig_post_reset(tp, kind);
  7334. if (tp->hw_stats) {
  7335. /* Save the stats across chip resets... */
  7336. tg3_get_nstats(tp, &tp->net_stats_prev);
  7337. tg3_get_estats(tp, &tp->estats_prev);
  7338. /* And make sure the next sample is new data */
  7339. memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
  7340. }
  7341. if (err)
  7342. return err;
  7343. return 0;
  7344. }
  7345. static int tg3_set_mac_addr(struct net_device *dev, void *p)
  7346. {
  7347. struct tg3 *tp = netdev_priv(dev);
  7348. struct sockaddr *addr = p;
  7349. int err = 0, skip_mac_1 = 0;
  7350. if (!is_valid_ether_addr(addr->sa_data))
  7351. return -EADDRNOTAVAIL;
  7352. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  7353. if (!netif_running(dev))
  7354. return 0;
  7355. if (tg3_flag(tp, ENABLE_ASF)) {
  7356. u32 addr0_high, addr0_low, addr1_high, addr1_low;
  7357. addr0_high = tr32(MAC_ADDR_0_HIGH);
  7358. addr0_low = tr32(MAC_ADDR_0_LOW);
  7359. addr1_high = tr32(MAC_ADDR_1_HIGH);
  7360. addr1_low = tr32(MAC_ADDR_1_LOW);
  7361. /* Skip MAC addr 1 if ASF is using it. */
  7362. if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
  7363. !(addr1_high == 0 && addr1_low == 0))
  7364. skip_mac_1 = 1;
  7365. }
  7366. spin_lock_bh(&tp->lock);
  7367. __tg3_set_mac_addr(tp, skip_mac_1);
  7368. spin_unlock_bh(&tp->lock);
  7369. return err;
  7370. }
  7371. /* tp->lock is held. */
  7372. static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
  7373. dma_addr_t mapping, u32 maxlen_flags,
  7374. u32 nic_addr)
  7375. {
  7376. tg3_write_mem(tp,
  7377. (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
  7378. ((u64) mapping >> 32));
  7379. tg3_write_mem(tp,
  7380. (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
  7381. ((u64) mapping & 0xffffffff));
  7382. tg3_write_mem(tp,
  7383. (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
  7384. maxlen_flags);
  7385. if (!tg3_flag(tp, 5705_PLUS))
  7386. tg3_write_mem(tp,
  7387. (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
  7388. nic_addr);
  7389. }
  7390. static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
  7391. {
  7392. int i = 0;
  7393. if (!tg3_flag(tp, ENABLE_TSS)) {
  7394. tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
  7395. tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
  7396. tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
  7397. } else {
  7398. tw32(HOSTCC_TXCOL_TICKS, 0);
  7399. tw32(HOSTCC_TXMAX_FRAMES, 0);
  7400. tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
  7401. for (; i < tp->txq_cnt; i++) {
  7402. u32 reg;
  7403. reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
  7404. tw32(reg, ec->tx_coalesce_usecs);
  7405. reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
  7406. tw32(reg, ec->tx_max_coalesced_frames);
  7407. reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
  7408. tw32(reg, ec->tx_max_coalesced_frames_irq);
  7409. }
  7410. }
  7411. for (; i < tp->irq_max - 1; i++) {
  7412. tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
  7413. tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
  7414. tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
  7415. }
  7416. }
  7417. static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
  7418. {
  7419. int i = 0;
  7420. u32 limit = tp->rxq_cnt;
  7421. if (!tg3_flag(tp, ENABLE_RSS)) {
  7422. tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
  7423. tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
  7424. tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
  7425. limit--;
  7426. } else {
  7427. tw32(HOSTCC_RXCOL_TICKS, 0);
  7428. tw32(HOSTCC_RXMAX_FRAMES, 0);
  7429. tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
  7430. }
  7431. for (; i < limit; i++) {
  7432. u32 reg;
  7433. reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
  7434. tw32(reg, ec->rx_coalesce_usecs);
  7435. reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
  7436. tw32(reg, ec->rx_max_coalesced_frames);
  7437. reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
  7438. tw32(reg, ec->rx_max_coalesced_frames_irq);
  7439. }
  7440. for (; i < tp->irq_max - 1; i++) {
  7441. tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
  7442. tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
  7443. tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
  7444. }
  7445. }
  7446. static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
  7447. {
  7448. tg3_coal_tx_init(tp, ec);
  7449. tg3_coal_rx_init(tp, ec);
  7450. if (!tg3_flag(tp, 5705_PLUS)) {
  7451. u32 val = ec->stats_block_coalesce_usecs;
  7452. tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
  7453. tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
  7454. if (!tp->link_up)
  7455. val = 0;
  7456. tw32(HOSTCC_STAT_COAL_TICKS, val);
  7457. }
  7458. }
  7459. /* tp->lock is held. */
  7460. static void tg3_rings_reset(struct tg3 *tp)
  7461. {
  7462. int i;
  7463. u32 stblk, txrcb, rxrcb, limit;
  7464. struct tg3_napi *tnapi = &tp->napi[0];
  7465. /* Disable all transmit rings but the first. */
  7466. if (!tg3_flag(tp, 5705_PLUS))
  7467. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
  7468. else if (tg3_flag(tp, 5717_PLUS))
  7469. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
  7470. else if (tg3_flag(tp, 57765_CLASS) ||
  7471. tg3_asic_rev(tp) == ASIC_REV_5762)
  7472. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
  7473. else
  7474. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
  7475. for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
  7476. txrcb < limit; txrcb += TG3_BDINFO_SIZE)
  7477. tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
  7478. BDINFO_FLAGS_DISABLED);
  7479. /* Disable all receive return rings but the first. */
  7480. if (tg3_flag(tp, 5717_PLUS))
  7481. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
  7482. else if (!tg3_flag(tp, 5705_PLUS))
  7483. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
  7484. else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
  7485. tg3_asic_rev(tp) == ASIC_REV_5762 ||
  7486. tg3_flag(tp, 57765_CLASS))
  7487. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
  7488. else
  7489. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
  7490. for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
  7491. rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
  7492. tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
  7493. BDINFO_FLAGS_DISABLED);
  7494. /* Disable interrupts */
  7495. tw32_mailbox_f(tp->napi[0].int_mbox, 1);
  7496. tp->napi[0].chk_msi_cnt = 0;
  7497. tp->napi[0].last_rx_cons = 0;
  7498. tp->napi[0].last_tx_cons = 0;
  7499. /* Zero mailbox registers. */
  7500. if (tg3_flag(tp, SUPPORT_MSIX)) {
  7501. for (i = 1; i < tp->irq_max; i++) {
  7502. tp->napi[i].tx_prod = 0;
  7503. tp->napi[i].tx_cons = 0;
  7504. if (tg3_flag(tp, ENABLE_TSS))
  7505. tw32_mailbox(tp->napi[i].prodmbox, 0);
  7506. tw32_rx_mbox(tp->napi[i].consmbox, 0);
  7507. tw32_mailbox_f(tp->napi[i].int_mbox, 1);
  7508. tp->napi[i].chk_msi_cnt = 0;
  7509. tp->napi[i].last_rx_cons = 0;
  7510. tp->napi[i].last_tx_cons = 0;
  7511. }
  7512. if (!tg3_flag(tp, ENABLE_TSS))
  7513. tw32_mailbox(tp->napi[0].prodmbox, 0);
  7514. } else {
  7515. tp->napi[0].tx_prod = 0;
  7516. tp->napi[0].tx_cons = 0;
  7517. tw32_mailbox(tp->napi[0].prodmbox, 0);
  7518. tw32_rx_mbox(tp->napi[0].consmbox, 0);
  7519. }
  7520. /* Make sure the NIC-based send BD rings are disabled. */
  7521. if (!tg3_flag(tp, 5705_PLUS)) {
  7522. u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
  7523. for (i = 0; i < 16; i++)
  7524. tw32_tx_mbox(mbox + i * 8, 0);
  7525. }
  7526. txrcb = NIC_SRAM_SEND_RCB;
  7527. rxrcb = NIC_SRAM_RCV_RET_RCB;
  7528. /* Clear status block in ram. */
  7529. memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
  7530. /* Set status block DMA address */
  7531. tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
  7532. ((u64) tnapi->status_mapping >> 32));
  7533. tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
  7534. ((u64) tnapi->status_mapping & 0xffffffff));
  7535. if (tnapi->tx_ring) {
  7536. tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
  7537. (TG3_TX_RING_SIZE <<
  7538. BDINFO_FLAGS_MAXLEN_SHIFT),
  7539. NIC_SRAM_TX_BUFFER_DESC);
  7540. txrcb += TG3_BDINFO_SIZE;
  7541. }
  7542. if (tnapi->rx_rcb) {
  7543. tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
  7544. (tp->rx_ret_ring_mask + 1) <<
  7545. BDINFO_FLAGS_MAXLEN_SHIFT, 0);
  7546. rxrcb += TG3_BDINFO_SIZE;
  7547. }
  7548. stblk = HOSTCC_STATBLCK_RING1;
  7549. for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
  7550. u64 mapping = (u64)tnapi->status_mapping;
  7551. tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
  7552. tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
  7553. /* Clear status block in ram. */
  7554. memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
  7555. if (tnapi->tx_ring) {
  7556. tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
  7557. (TG3_TX_RING_SIZE <<
  7558. BDINFO_FLAGS_MAXLEN_SHIFT),
  7559. NIC_SRAM_TX_BUFFER_DESC);
  7560. txrcb += TG3_BDINFO_SIZE;
  7561. }
  7562. tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
  7563. ((tp->rx_ret_ring_mask + 1) <<
  7564. BDINFO_FLAGS_MAXLEN_SHIFT), 0);
  7565. stblk += 8;
  7566. rxrcb += TG3_BDINFO_SIZE;
  7567. }
  7568. }
  7569. static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
  7570. {
  7571. u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
  7572. if (!tg3_flag(tp, 5750_PLUS) ||
  7573. tg3_flag(tp, 5780_CLASS) ||
  7574. tg3_asic_rev(tp) == ASIC_REV_5750 ||
  7575. tg3_asic_rev(tp) == ASIC_REV_5752 ||
  7576. tg3_flag(tp, 57765_PLUS))
  7577. bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
  7578. else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
  7579. tg3_asic_rev(tp) == ASIC_REV_5787)
  7580. bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
  7581. else
  7582. bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
  7583. nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
  7584. host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
  7585. val = min(nic_rep_thresh, host_rep_thresh);
  7586. tw32(RCVBDI_STD_THRESH, val);
  7587. if (tg3_flag(tp, 57765_PLUS))
  7588. tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
  7589. if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
  7590. return;
  7591. bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
  7592. host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
  7593. val = min(bdcache_maxcnt / 2, host_rep_thresh);
  7594. tw32(RCVBDI_JUMBO_THRESH, val);
  7595. if (tg3_flag(tp, 57765_PLUS))
  7596. tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
  7597. }
  7598. static inline u32 calc_crc(unsigned char *buf, int len)
  7599. {
  7600. u32 reg;
  7601. u32 tmp;
  7602. int j, k;
  7603. reg = 0xffffffff;
  7604. for (j = 0; j < len; j++) {
  7605. reg ^= buf[j];
  7606. for (k = 0; k < 8; k++) {
  7607. tmp = reg & 0x01;
  7608. reg >>= 1;
  7609. if (tmp)
  7610. reg ^= 0xedb88320;
  7611. }
  7612. }
  7613. return ~reg;
  7614. }
  7615. static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
  7616. {
  7617. /* accept or reject all multicast frames */
  7618. tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
  7619. tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
  7620. tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
  7621. tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
  7622. }
  7623. static void __tg3_set_rx_mode(struct net_device *dev)
  7624. {
  7625. struct tg3 *tp = netdev_priv(dev);
  7626. u32 rx_mode;
  7627. rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
  7628. RX_MODE_KEEP_VLAN_TAG);
  7629. #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
  7630. /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
  7631. * flag clear.
  7632. */
  7633. if (!tg3_flag(tp, ENABLE_ASF))
  7634. rx_mode |= RX_MODE_KEEP_VLAN_TAG;
  7635. #endif
  7636. if (dev->flags & IFF_PROMISC) {
  7637. /* Promiscuous mode. */
  7638. rx_mode |= RX_MODE_PROMISC;
  7639. } else if (dev->flags & IFF_ALLMULTI) {
  7640. /* Accept all multicast. */
  7641. tg3_set_multi(tp, 1);
  7642. } else if (netdev_mc_empty(dev)) {
  7643. /* Reject all multicast. */
  7644. tg3_set_multi(tp, 0);
  7645. } else {
  7646. /* Accept one or more multicast(s). */
  7647. struct netdev_hw_addr *ha;
  7648. u32 mc_filter[4] = { 0, };
  7649. u32 regidx;
  7650. u32 bit;
  7651. u32 crc;
  7652. netdev_for_each_mc_addr(ha, dev) {
  7653. crc = calc_crc(ha->addr, ETH_ALEN);
  7654. bit = ~crc & 0x7f;
  7655. regidx = (bit & 0x60) >> 5;
  7656. bit &= 0x1f;
  7657. mc_filter[regidx] |= (1 << bit);
  7658. }
  7659. tw32(MAC_HASH_REG_0, mc_filter[0]);
  7660. tw32(MAC_HASH_REG_1, mc_filter[1]);
  7661. tw32(MAC_HASH_REG_2, mc_filter[2]);
  7662. tw32(MAC_HASH_REG_3, mc_filter[3]);
  7663. }
  7664. if (rx_mode != tp->rx_mode) {
  7665. tp->rx_mode = rx_mode;
  7666. tw32_f(MAC_RX_MODE, rx_mode);
  7667. udelay(10);
  7668. }
  7669. }
  7670. static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
  7671. {
  7672. int i;
  7673. for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
  7674. tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
  7675. }
  7676. static void tg3_rss_check_indir_tbl(struct tg3 *tp)
  7677. {
  7678. int i;
  7679. if (!tg3_flag(tp, SUPPORT_MSIX))
  7680. return;
  7681. if (tp->rxq_cnt == 1) {
  7682. memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
  7683. return;
  7684. }
  7685. /* Validate table against current IRQ count */
  7686. for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
  7687. if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
  7688. break;
  7689. }
  7690. if (i != TG3_RSS_INDIR_TBL_SIZE)
  7691. tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
  7692. }
  7693. static void tg3_rss_write_indir_tbl(struct tg3 *tp)
  7694. {
  7695. int i = 0;
  7696. u32 reg = MAC_RSS_INDIR_TBL_0;
  7697. while (i < TG3_RSS_INDIR_TBL_SIZE) {
  7698. u32 val = tp->rss_ind_tbl[i];
  7699. i++;
  7700. for (; i % 8; i++) {
  7701. val <<= 4;
  7702. val |= tp->rss_ind_tbl[i];
  7703. }
  7704. tw32(reg, val);
  7705. reg += 4;
  7706. }
  7707. }
  7708. /* tp->lock is held. */
  7709. static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
  7710. {
  7711. u32 val, rdmac_mode;
  7712. int i, err, limit;
  7713. struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
  7714. tg3_disable_ints(tp);
  7715. tg3_stop_fw(tp);
  7716. tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
  7717. if (tg3_flag(tp, INIT_COMPLETE))
  7718. tg3_abort_hw(tp, 1);
  7719. /* Enable MAC control of LPI */
  7720. if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
  7721. val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
  7722. TG3_CPMU_EEE_LNKIDL_UART_IDL;
  7723. if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
  7724. val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
  7725. tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
  7726. tw32_f(TG3_CPMU_EEE_CTRL,
  7727. TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
  7728. val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
  7729. TG3_CPMU_EEEMD_LPI_IN_TX |
  7730. TG3_CPMU_EEEMD_LPI_IN_RX |
  7731. TG3_CPMU_EEEMD_EEE_ENABLE;
  7732. if (tg3_asic_rev(tp) != ASIC_REV_5717)
  7733. val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
  7734. if (tg3_flag(tp, ENABLE_APE))
  7735. val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
  7736. tw32_f(TG3_CPMU_EEE_MODE, val);
  7737. tw32_f(TG3_CPMU_EEE_DBTMR1,
  7738. TG3_CPMU_DBTMR1_PCIEXIT_2047US |
  7739. TG3_CPMU_DBTMR1_LNKIDLE_2047US);
  7740. tw32_f(TG3_CPMU_EEE_DBTMR2,
  7741. TG3_CPMU_DBTMR2_APE_TX_2047US |
  7742. TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
  7743. }
  7744. if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
  7745. !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
  7746. tg3_phy_pull_config(tp);
  7747. tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
  7748. }
  7749. if (reset_phy)
  7750. tg3_phy_reset(tp);
  7751. err = tg3_chip_reset(tp);
  7752. if (err)
  7753. return err;
  7754. tg3_write_sig_legacy(tp, RESET_KIND_INIT);
  7755. if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
  7756. val = tr32(TG3_CPMU_CTRL);
  7757. val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
  7758. tw32(TG3_CPMU_CTRL, val);
  7759. val = tr32(TG3_CPMU_LSPD_10MB_CLK);
  7760. val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
  7761. val |= CPMU_LSPD_10MB_MACCLK_6_25;
  7762. tw32(TG3_CPMU_LSPD_10MB_CLK, val);
  7763. val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
  7764. val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
  7765. val |= CPMU_LNK_AWARE_MACCLK_6_25;
  7766. tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
  7767. val = tr32(TG3_CPMU_HST_ACC);
  7768. val &= ~CPMU_HST_ACC_MACCLK_MASK;
  7769. val |= CPMU_HST_ACC_MACCLK_6_25;
  7770. tw32(TG3_CPMU_HST_ACC, val);
  7771. }
  7772. if (tg3_asic_rev(tp) == ASIC_REV_57780) {
  7773. val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
  7774. val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
  7775. PCIE_PWR_MGMT_L1_THRESH_4MS;
  7776. tw32(PCIE_PWR_MGMT_THRESH, val);
  7777. val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
  7778. tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
  7779. tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
  7780. val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
  7781. tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
  7782. }
  7783. if (tg3_flag(tp, L1PLLPD_EN)) {
  7784. u32 grc_mode = tr32(GRC_MODE);
  7785. /* Access the lower 1K of PL PCIE block registers. */
  7786. val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  7787. tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
  7788. val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
  7789. tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
  7790. val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
  7791. tw32(GRC_MODE, grc_mode);
  7792. }
  7793. if (tg3_flag(tp, 57765_CLASS)) {
  7794. if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
  7795. u32 grc_mode = tr32(GRC_MODE);
  7796. /* Access the lower 1K of PL PCIE block registers. */
  7797. val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  7798. tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
  7799. val = tr32(TG3_PCIE_TLDLPL_PORT +
  7800. TG3_PCIE_PL_LO_PHYCTL5);
  7801. tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
  7802. val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
  7803. tw32(GRC_MODE, grc_mode);
  7804. }
  7805. if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
  7806. u32 grc_mode;
  7807. /* Fix transmit hangs */
  7808. val = tr32(TG3_CPMU_PADRNG_CTL);
  7809. val |= TG3_CPMU_PADRNG_CTL_RDIV2;
  7810. tw32(TG3_CPMU_PADRNG_CTL, val);
  7811. grc_mode = tr32(GRC_MODE);
  7812. /* Access the lower 1K of DL PCIE block registers. */
  7813. val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  7814. tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
  7815. val = tr32(TG3_PCIE_TLDLPL_PORT +
  7816. TG3_PCIE_DL_LO_FTSMAX);
  7817. val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
  7818. tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
  7819. val | TG3_PCIE_DL_LO_FTSMAX_VAL);
  7820. tw32(GRC_MODE, grc_mode);
  7821. }
  7822. val = tr32(TG3_CPMU_LSPD_10MB_CLK);
  7823. val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
  7824. val |= CPMU_LSPD_10MB_MACCLK_6_25;
  7825. tw32(TG3_CPMU_LSPD_10MB_CLK, val);
  7826. }
  7827. /* This works around an issue with Athlon chipsets on
  7828. * B3 tigon3 silicon. This bit has no effect on any
  7829. * other revision. But do not set this on PCI Express
  7830. * chips and don't even touch the clocks if the CPMU is present.
  7831. */
  7832. if (!tg3_flag(tp, CPMU_PRESENT)) {
  7833. if (!tg3_flag(tp, PCI_EXPRESS))
  7834. tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
  7835. tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
  7836. }
  7837. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
  7838. tg3_flag(tp, PCIX_MODE)) {
  7839. val = tr32(TG3PCI_PCISTATE);
  7840. val |= PCISTATE_RETRY_SAME_DMA;
  7841. tw32(TG3PCI_PCISTATE, val);
  7842. }
  7843. if (tg3_flag(tp, ENABLE_APE)) {
  7844. /* Allow reads and writes to the
  7845. * APE register and memory space.
  7846. */
  7847. val = tr32(TG3PCI_PCISTATE);
  7848. val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
  7849. PCISTATE_ALLOW_APE_SHMEM_WR |
  7850. PCISTATE_ALLOW_APE_PSPACE_WR;
  7851. tw32(TG3PCI_PCISTATE, val);
  7852. }
  7853. if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
  7854. /* Enable some hw fixes. */
  7855. val = tr32(TG3PCI_MSI_DATA);
  7856. val |= (1 << 26) | (1 << 28) | (1 << 29);
  7857. tw32(TG3PCI_MSI_DATA, val);
  7858. }
  7859. /* Descriptor ring init may make accesses to the
  7860. * NIC SRAM area to setup the TX descriptors, so we
  7861. * can only do this after the hardware has been
  7862. * successfully reset.
  7863. */
  7864. err = tg3_init_rings(tp);
  7865. if (err)
  7866. return err;
  7867. if (tg3_flag(tp, 57765_PLUS)) {
  7868. val = tr32(TG3PCI_DMA_RW_CTRL) &
  7869. ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
  7870. if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
  7871. val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
  7872. if (!tg3_flag(tp, 57765_CLASS) &&
  7873. tg3_asic_rev(tp) != ASIC_REV_5717 &&
  7874. tg3_asic_rev(tp) != ASIC_REV_5762)
  7875. val |= DMA_RWCTRL_TAGGED_STAT_WA;
  7876. tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
  7877. } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
  7878. tg3_asic_rev(tp) != ASIC_REV_5761) {
  7879. /* This value is determined during the probe time DMA
  7880. * engine test, tg3_test_dma.
  7881. */
  7882. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  7883. }
  7884. tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
  7885. GRC_MODE_4X_NIC_SEND_RINGS |
  7886. GRC_MODE_NO_TX_PHDR_CSUM |
  7887. GRC_MODE_NO_RX_PHDR_CSUM);
  7888. tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
  7889. /* Pseudo-header checksum is done by hardware logic and not
  7890. * the offload processers, so make the chip do the pseudo-
  7891. * header checksums on receive. For transmit it is more
  7892. * convenient to do the pseudo-header checksum in software
  7893. * as Linux does that on transmit for us in all cases.
  7894. */
  7895. tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
  7896. val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
  7897. if (tp->rxptpctl)
  7898. tw32(TG3_RX_PTP_CTL,
  7899. tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
  7900. if (tg3_flag(tp, PTP_CAPABLE))
  7901. val |= GRC_MODE_TIME_SYNC_ENABLE;
  7902. tw32(GRC_MODE, tp->grc_mode | val);
  7903. /* Setup the timer prescalar register. Clock is always 66Mhz. */
  7904. val = tr32(GRC_MISC_CFG);
  7905. val &= ~0xff;
  7906. val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
  7907. tw32(GRC_MISC_CFG, val);
  7908. /* Initialize MBUF/DESC pool. */
  7909. if (tg3_flag(tp, 5750_PLUS)) {
  7910. /* Do nothing. */
  7911. } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
  7912. tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
  7913. if (tg3_asic_rev(tp) == ASIC_REV_5704)
  7914. tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
  7915. else
  7916. tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
  7917. tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
  7918. tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
  7919. } else if (tg3_flag(tp, TSO_CAPABLE)) {
  7920. int fw_len;
  7921. fw_len = tp->fw_len;
  7922. fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
  7923. tw32(BUFMGR_MB_POOL_ADDR,
  7924. NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
  7925. tw32(BUFMGR_MB_POOL_SIZE,
  7926. NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
  7927. }
  7928. if (tp->dev->mtu <= ETH_DATA_LEN) {
  7929. tw32(BUFMGR_MB_RDMA_LOW_WATER,
  7930. tp->bufmgr_config.mbuf_read_dma_low_water);
  7931. tw32(BUFMGR_MB_MACRX_LOW_WATER,
  7932. tp->bufmgr_config.mbuf_mac_rx_low_water);
  7933. tw32(BUFMGR_MB_HIGH_WATER,
  7934. tp->bufmgr_config.mbuf_high_water);
  7935. } else {
  7936. tw32(BUFMGR_MB_RDMA_LOW_WATER,
  7937. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
  7938. tw32(BUFMGR_MB_MACRX_LOW_WATER,
  7939. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
  7940. tw32(BUFMGR_MB_HIGH_WATER,
  7941. tp->bufmgr_config.mbuf_high_water_jumbo);
  7942. }
  7943. tw32(BUFMGR_DMA_LOW_WATER,
  7944. tp->bufmgr_config.dma_low_water);
  7945. tw32(BUFMGR_DMA_HIGH_WATER,
  7946. tp->bufmgr_config.dma_high_water);
  7947. val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
  7948. if (tg3_asic_rev(tp) == ASIC_REV_5719)
  7949. val |= BUFMGR_MODE_NO_TX_UNDERRUN;
  7950. if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
  7951. tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
  7952. tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
  7953. val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
  7954. tw32(BUFMGR_MODE, val);
  7955. for (i = 0; i < 2000; i++) {
  7956. if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
  7957. break;
  7958. udelay(10);
  7959. }
  7960. if (i >= 2000) {
  7961. netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
  7962. return -ENODEV;
  7963. }
  7964. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
  7965. tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
  7966. tg3_setup_rxbd_thresholds(tp);
  7967. /* Initialize TG3_BDINFO's at:
  7968. * RCVDBDI_STD_BD: standard eth size rx ring
  7969. * RCVDBDI_JUMBO_BD: jumbo frame rx ring
  7970. * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
  7971. *
  7972. * like so:
  7973. * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
  7974. * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
  7975. * ring attribute flags
  7976. * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
  7977. *
  7978. * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
  7979. * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
  7980. *
  7981. * The size of each ring is fixed in the firmware, but the location is
  7982. * configurable.
  7983. */
  7984. tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
  7985. ((u64) tpr->rx_std_mapping >> 32));
  7986. tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
  7987. ((u64) tpr->rx_std_mapping & 0xffffffff));
  7988. if (!tg3_flag(tp, 5717_PLUS))
  7989. tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
  7990. NIC_SRAM_RX_BUFFER_DESC);
  7991. /* Disable the mini ring */
  7992. if (!tg3_flag(tp, 5705_PLUS))
  7993. tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
  7994. BDINFO_FLAGS_DISABLED);
  7995. /* Program the jumbo buffer descriptor ring control
  7996. * blocks on those devices that have them.
  7997. */
  7998. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
  7999. (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
  8000. if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
  8001. tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
  8002. ((u64) tpr->rx_jmb_mapping >> 32));
  8003. tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
  8004. ((u64) tpr->rx_jmb_mapping & 0xffffffff));
  8005. val = TG3_RX_JMB_RING_SIZE(tp) <<
  8006. BDINFO_FLAGS_MAXLEN_SHIFT;
  8007. tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
  8008. val | BDINFO_FLAGS_USE_EXT_RECV);
  8009. if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
  8010. tg3_flag(tp, 57765_CLASS) ||
  8011. tg3_asic_rev(tp) == ASIC_REV_5762)
  8012. tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
  8013. NIC_SRAM_RX_JUMBO_BUFFER_DESC);
  8014. } else {
  8015. tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
  8016. BDINFO_FLAGS_DISABLED);
  8017. }
  8018. if (tg3_flag(tp, 57765_PLUS)) {
  8019. val = TG3_RX_STD_RING_SIZE(tp);
  8020. val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
  8021. val |= (TG3_RX_STD_DMA_SZ << 2);
  8022. } else
  8023. val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
  8024. } else
  8025. val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
  8026. tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
  8027. tpr->rx_std_prod_idx = tp->rx_pending;
  8028. tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
  8029. tpr->rx_jmb_prod_idx =
  8030. tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
  8031. tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
  8032. tg3_rings_reset(tp);
  8033. /* Initialize MAC address and backoff seed. */
  8034. __tg3_set_mac_addr(tp, 0);
  8035. /* MTU + ethernet header + FCS + optional VLAN tag */
  8036. tw32(MAC_RX_MTU_SIZE,
  8037. tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
  8038. /* The slot time is changed by tg3_setup_phy if we
  8039. * run at gigabit with half duplex.
  8040. */
  8041. val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
  8042. (6 << TX_LENGTHS_IPG_SHIFT) |
  8043. (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
  8044. if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
  8045. tg3_asic_rev(tp) == ASIC_REV_5762)
  8046. val |= tr32(MAC_TX_LENGTHS) &
  8047. (TX_LENGTHS_JMB_FRM_LEN_MSK |
  8048. TX_LENGTHS_CNT_DWN_VAL_MSK);
  8049. tw32(MAC_TX_LENGTHS, val);
  8050. /* Receive rules. */
  8051. tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
  8052. tw32(RCVLPC_CONFIG, 0x0181);
  8053. /* Calculate RDMAC_MODE setting early, we need it to determine
  8054. * the RCVLPC_STATE_ENABLE mask.
  8055. */
  8056. rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
  8057. RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
  8058. RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
  8059. RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
  8060. RDMAC_MODE_LNGREAD_ENAB);
  8061. if (tg3_asic_rev(tp) == ASIC_REV_5717)
  8062. rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
  8063. if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
  8064. tg3_asic_rev(tp) == ASIC_REV_5785 ||
  8065. tg3_asic_rev(tp) == ASIC_REV_57780)
  8066. rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
  8067. RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
  8068. RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
  8069. if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
  8070. tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
  8071. if (tg3_flag(tp, TSO_CAPABLE) &&
  8072. tg3_asic_rev(tp) == ASIC_REV_5705) {
  8073. rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
  8074. } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
  8075. !tg3_flag(tp, IS_5788)) {
  8076. rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
  8077. }
  8078. }
  8079. if (tg3_flag(tp, PCI_EXPRESS))
  8080. rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
  8081. if (tg3_asic_rev(tp) == ASIC_REV_57766) {
  8082. tp->dma_limit = 0;
  8083. if (tp->dev->mtu <= ETH_DATA_LEN) {
  8084. rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
  8085. tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
  8086. }
  8087. }
  8088. if (tg3_flag(tp, HW_TSO_1) ||
  8089. tg3_flag(tp, HW_TSO_2) ||
  8090. tg3_flag(tp, HW_TSO_3))
  8091. rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
  8092. if (tg3_flag(tp, 57765_PLUS) ||
  8093. tg3_asic_rev(tp) == ASIC_REV_5785 ||
  8094. tg3_asic_rev(tp) == ASIC_REV_57780)
  8095. rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
  8096. if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
  8097. tg3_asic_rev(tp) == ASIC_REV_5762)
  8098. rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
  8099. if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
  8100. tg3_asic_rev(tp) == ASIC_REV_5784 ||
  8101. tg3_asic_rev(tp) == ASIC_REV_5785 ||
  8102. tg3_asic_rev(tp) == ASIC_REV_57780 ||
  8103. tg3_flag(tp, 57765_PLUS)) {
  8104. u32 tgtreg;
  8105. if (tg3_asic_rev(tp) == ASIC_REV_5762)
  8106. tgtreg = TG3_RDMA_RSRVCTRL_REG2;
  8107. else
  8108. tgtreg = TG3_RDMA_RSRVCTRL_REG;
  8109. val = tr32(tgtreg);
  8110. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
  8111. tg3_asic_rev(tp) == ASIC_REV_5762) {
  8112. val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
  8113. TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
  8114. TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
  8115. val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
  8116. TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
  8117. TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
  8118. }
  8119. tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
  8120. }
  8121. if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
  8122. tg3_asic_rev(tp) == ASIC_REV_5720 ||
  8123. tg3_asic_rev(tp) == ASIC_REV_5762) {
  8124. u32 tgtreg;
  8125. if (tg3_asic_rev(tp) == ASIC_REV_5762)
  8126. tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
  8127. else
  8128. tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
  8129. val = tr32(tgtreg);
  8130. tw32(tgtreg, val |
  8131. TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
  8132. TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
  8133. }
  8134. /* Receive/send statistics. */
  8135. if (tg3_flag(tp, 5750_PLUS)) {
  8136. val = tr32(RCVLPC_STATS_ENABLE);
  8137. val &= ~RCVLPC_STATSENAB_DACK_FIX;
  8138. tw32(RCVLPC_STATS_ENABLE, val);
  8139. } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
  8140. tg3_flag(tp, TSO_CAPABLE)) {
  8141. val = tr32(RCVLPC_STATS_ENABLE);
  8142. val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
  8143. tw32(RCVLPC_STATS_ENABLE, val);
  8144. } else {
  8145. tw32(RCVLPC_STATS_ENABLE, 0xffffff);
  8146. }
  8147. tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
  8148. tw32(SNDDATAI_STATSENAB, 0xffffff);
  8149. tw32(SNDDATAI_STATSCTRL,
  8150. (SNDDATAI_SCTRL_ENABLE |
  8151. SNDDATAI_SCTRL_FASTUPD));
  8152. /* Setup host coalescing engine. */
  8153. tw32(HOSTCC_MODE, 0);
  8154. for (i = 0; i < 2000; i++) {
  8155. if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
  8156. break;
  8157. udelay(10);
  8158. }
  8159. __tg3_set_coalesce(tp, &tp->coal);
  8160. if (!tg3_flag(tp, 5705_PLUS)) {
  8161. /* Status/statistics block address. See tg3_timer,
  8162. * the tg3_periodic_fetch_stats call there, and
  8163. * tg3_get_stats to see how this works for 5705/5750 chips.
  8164. */
  8165. tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
  8166. ((u64) tp->stats_mapping >> 32));
  8167. tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
  8168. ((u64) tp->stats_mapping & 0xffffffff));
  8169. tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
  8170. tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
  8171. /* Clear statistics and status block memory areas */
  8172. for (i = NIC_SRAM_STATS_BLK;
  8173. i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
  8174. i += sizeof(u32)) {
  8175. tg3_write_mem(tp, i, 0);
  8176. udelay(40);
  8177. }
  8178. }
  8179. tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
  8180. tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
  8181. tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
  8182. if (!tg3_flag(tp, 5705_PLUS))
  8183. tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
  8184. if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
  8185. tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
  8186. /* reset to prevent losing 1st rx packet intermittently */
  8187. tw32_f(MAC_RX_MODE, RX_MODE_RESET);
  8188. udelay(10);
  8189. }
  8190. tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
  8191. MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
  8192. MAC_MODE_FHDE_ENABLE;
  8193. if (tg3_flag(tp, ENABLE_APE))
  8194. tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
  8195. if (!tg3_flag(tp, 5705_PLUS) &&
  8196. !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
  8197. tg3_asic_rev(tp) != ASIC_REV_5700)
  8198. tp->mac_mode |= MAC_MODE_LINK_POLARITY;
  8199. tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
  8200. udelay(40);
  8201. /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
  8202. * If TG3_FLAG_IS_NIC is zero, we should read the
  8203. * register to preserve the GPIO settings for LOMs. The GPIOs,
  8204. * whether used as inputs or outputs, are set by boot code after
  8205. * reset.
  8206. */
  8207. if (!tg3_flag(tp, IS_NIC)) {
  8208. u32 gpio_mask;
  8209. gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
  8210. GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
  8211. GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
  8212. if (tg3_asic_rev(tp) == ASIC_REV_5752)
  8213. gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
  8214. GRC_LCLCTRL_GPIO_OUTPUT3;
  8215. if (tg3_asic_rev(tp) == ASIC_REV_5755)
  8216. gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
  8217. tp->grc_local_ctrl &= ~gpio_mask;
  8218. tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
  8219. /* GPIO1 must be driven high for eeprom write protect */
  8220. if (tg3_flag(tp, EEPROM_WRITE_PROT))
  8221. tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
  8222. GRC_LCLCTRL_GPIO_OUTPUT1);
  8223. }
  8224. tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
  8225. udelay(100);
  8226. if (tg3_flag(tp, USING_MSIX)) {
  8227. val = tr32(MSGINT_MODE);
  8228. val |= MSGINT_MODE_ENABLE;
  8229. if (tp->irq_cnt > 1)
  8230. val |= MSGINT_MODE_MULTIVEC_EN;
  8231. if (!tg3_flag(tp, 1SHOT_MSI))
  8232. val |= MSGINT_MODE_ONE_SHOT_DISABLE;
  8233. tw32(MSGINT_MODE, val);
  8234. }
  8235. if (!tg3_flag(tp, 5705_PLUS)) {
  8236. tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
  8237. udelay(40);
  8238. }
  8239. val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
  8240. WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
  8241. WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
  8242. WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
  8243. WDMAC_MODE_LNGREAD_ENAB);
  8244. if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
  8245. tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
  8246. if (tg3_flag(tp, TSO_CAPABLE) &&
  8247. (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
  8248. tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
  8249. /* nothing */
  8250. } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
  8251. !tg3_flag(tp, IS_5788)) {
  8252. val |= WDMAC_MODE_RX_ACCEL;
  8253. }
  8254. }
  8255. /* Enable host coalescing bug fix */
  8256. if (tg3_flag(tp, 5755_PLUS))
  8257. val |= WDMAC_MODE_STATUS_TAG_FIX;
  8258. if (tg3_asic_rev(tp) == ASIC_REV_5785)
  8259. val |= WDMAC_MODE_BURST_ALL_DATA;
  8260. tw32_f(WDMAC_MODE, val);
  8261. udelay(40);
  8262. if (tg3_flag(tp, PCIX_MODE)) {
  8263. u16 pcix_cmd;
  8264. pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  8265. &pcix_cmd);
  8266. if (tg3_asic_rev(tp) == ASIC_REV_5703) {
  8267. pcix_cmd &= ~PCI_X_CMD_MAX_READ;
  8268. pcix_cmd |= PCI_X_CMD_READ_2K;
  8269. } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
  8270. pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
  8271. pcix_cmd |= PCI_X_CMD_READ_2K;
  8272. }
  8273. pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  8274. pcix_cmd);
  8275. }
  8276. tw32_f(RDMAC_MODE, rdmac_mode);
  8277. udelay(40);
  8278. if (tg3_asic_rev(tp) == ASIC_REV_5719) {
  8279. for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
  8280. if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
  8281. break;
  8282. }
  8283. if (i < TG3_NUM_RDMA_CHANNELS) {
  8284. val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
  8285. val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
  8286. tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
  8287. tg3_flag_set(tp, 5719_RDMA_BUG);
  8288. }
  8289. }
  8290. tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
  8291. if (!tg3_flag(tp, 5705_PLUS))
  8292. tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
  8293. if (tg3_asic_rev(tp) == ASIC_REV_5761)
  8294. tw32(SNDDATAC_MODE,
  8295. SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
  8296. else
  8297. tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
  8298. tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
  8299. tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
  8300. val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
  8301. if (tg3_flag(tp, LRG_PROD_RING_CAP))
  8302. val |= RCVDBDI_MODE_LRG_RING_SZ;
  8303. tw32(RCVDBDI_MODE, val);
  8304. tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
  8305. if (tg3_flag(tp, HW_TSO_1) ||
  8306. tg3_flag(tp, HW_TSO_2) ||
  8307. tg3_flag(tp, HW_TSO_3))
  8308. tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
  8309. val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
  8310. if (tg3_flag(tp, ENABLE_TSS))
  8311. val |= SNDBDI_MODE_MULTI_TXQ_EN;
  8312. tw32(SNDBDI_MODE, val);
  8313. tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
  8314. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
  8315. err = tg3_load_5701_a0_firmware_fix(tp);
  8316. if (err)
  8317. return err;
  8318. }
  8319. if (tg3_asic_rev(tp) == ASIC_REV_57766) {
  8320. /* Ignore any errors for the firmware download. If download
  8321. * fails, the device will operate with EEE disabled
  8322. */
  8323. tg3_load_57766_firmware(tp);
  8324. }
  8325. if (tg3_flag(tp, TSO_CAPABLE)) {
  8326. err = tg3_load_tso_firmware(tp);
  8327. if (err)
  8328. return err;
  8329. }
  8330. tp->tx_mode = TX_MODE_ENABLE;
  8331. if (tg3_flag(tp, 5755_PLUS) ||
  8332. tg3_asic_rev(tp) == ASIC_REV_5906)
  8333. tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
  8334. if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
  8335. tg3_asic_rev(tp) == ASIC_REV_5762) {
  8336. val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
  8337. tp->tx_mode &= ~val;
  8338. tp->tx_mode |= tr32(MAC_TX_MODE) & val;
  8339. }
  8340. tw32_f(MAC_TX_MODE, tp->tx_mode);
  8341. udelay(100);
  8342. if (tg3_flag(tp, ENABLE_RSS)) {
  8343. tg3_rss_write_indir_tbl(tp);
  8344. /* Setup the "secret" hash key. */
  8345. tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
  8346. tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
  8347. tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
  8348. tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
  8349. tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
  8350. tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
  8351. tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
  8352. tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
  8353. tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
  8354. tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
  8355. }
  8356. tp->rx_mode = RX_MODE_ENABLE;
  8357. if (tg3_flag(tp, 5755_PLUS))
  8358. tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
  8359. if (tg3_flag(tp, ENABLE_RSS))
  8360. tp->rx_mode |= RX_MODE_RSS_ENABLE |
  8361. RX_MODE_RSS_ITBL_HASH_BITS_7 |
  8362. RX_MODE_RSS_IPV6_HASH_EN |
  8363. RX_MODE_RSS_TCP_IPV6_HASH_EN |
  8364. RX_MODE_RSS_IPV4_HASH_EN |
  8365. RX_MODE_RSS_TCP_IPV4_HASH_EN;
  8366. tw32_f(MAC_RX_MODE, tp->rx_mode);
  8367. udelay(10);
  8368. tw32(MAC_LED_CTRL, tp->led_ctrl);
  8369. tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
  8370. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  8371. tw32_f(MAC_RX_MODE, RX_MODE_RESET);
  8372. udelay(10);
  8373. }
  8374. tw32_f(MAC_RX_MODE, tp->rx_mode);
  8375. udelay(10);
  8376. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  8377. if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
  8378. !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
  8379. /* Set drive transmission level to 1.2V */
  8380. /* only if the signal pre-emphasis bit is not set */
  8381. val = tr32(MAC_SERDES_CFG);
  8382. val &= 0xfffff000;
  8383. val |= 0x880;
  8384. tw32(MAC_SERDES_CFG, val);
  8385. }
  8386. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
  8387. tw32(MAC_SERDES_CFG, 0x616000);
  8388. }
  8389. /* Prevent chip from dropping frames when flow control
  8390. * is enabled.
  8391. */
  8392. if (tg3_flag(tp, 57765_CLASS))
  8393. val = 1;
  8394. else
  8395. val = 2;
  8396. tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
  8397. if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
  8398. (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
  8399. /* Use hardware link auto-negotiation */
  8400. tg3_flag_set(tp, HW_AUTONEG);
  8401. }
  8402. if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
  8403. tg3_asic_rev(tp) == ASIC_REV_5714) {
  8404. u32 tmp;
  8405. tmp = tr32(SERDES_RX_CTRL);
  8406. tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
  8407. tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
  8408. tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
  8409. tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
  8410. }
  8411. if (!tg3_flag(tp, USE_PHYLIB)) {
  8412. if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
  8413. tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
  8414. err = tg3_setup_phy(tp, 0);
  8415. if (err)
  8416. return err;
  8417. if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
  8418. !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
  8419. u32 tmp;
  8420. /* Clear CRC stats. */
  8421. if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
  8422. tg3_writephy(tp, MII_TG3_TEST1,
  8423. tmp | MII_TG3_TEST1_CRC_EN);
  8424. tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
  8425. }
  8426. }
  8427. }
  8428. __tg3_set_rx_mode(tp->dev);
  8429. /* Initialize receive rules. */
  8430. tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
  8431. tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
  8432. tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
  8433. tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
  8434. if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
  8435. limit = 8;
  8436. else
  8437. limit = 16;
  8438. if (tg3_flag(tp, ENABLE_ASF))
  8439. limit -= 4;
  8440. switch (limit) {
  8441. case 16:
  8442. tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
  8443. case 15:
  8444. tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
  8445. case 14:
  8446. tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
  8447. case 13:
  8448. tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
  8449. case 12:
  8450. tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
  8451. case 11:
  8452. tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
  8453. case 10:
  8454. tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
  8455. case 9:
  8456. tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
  8457. case 8:
  8458. tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
  8459. case 7:
  8460. tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
  8461. case 6:
  8462. tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
  8463. case 5:
  8464. tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
  8465. case 4:
  8466. /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
  8467. case 3:
  8468. /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
  8469. case 2:
  8470. case 1:
  8471. default:
  8472. break;
  8473. }
  8474. if (tg3_flag(tp, ENABLE_APE))
  8475. /* Write our heartbeat update interval to APE. */
  8476. tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
  8477. APE_HOST_HEARTBEAT_INT_DISABLE);
  8478. tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
  8479. return 0;
  8480. }
  8481. /* Called at device open time to get the chip ready for
  8482. * packet processing. Invoked with tp->lock held.
  8483. */
  8484. static int tg3_init_hw(struct tg3 *tp, int reset_phy)
  8485. {
  8486. tg3_switch_clocks(tp);
  8487. tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  8488. return tg3_reset_hw(tp, reset_phy);
  8489. }
  8490. static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
  8491. {
  8492. int i;
  8493. for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
  8494. u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
  8495. tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
  8496. off += len;
  8497. if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
  8498. !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
  8499. memset(ocir, 0, TG3_OCIR_LEN);
  8500. }
  8501. }
  8502. /* sysfs attributes for hwmon */
  8503. static ssize_t tg3_show_temp(struct device *dev,
  8504. struct device_attribute *devattr, char *buf)
  8505. {
  8506. struct pci_dev *pdev = to_pci_dev(dev);
  8507. struct net_device *netdev = pci_get_drvdata(pdev);
  8508. struct tg3 *tp = netdev_priv(netdev);
  8509. struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
  8510. u32 temperature;
  8511. spin_lock_bh(&tp->lock);
  8512. tg3_ape_scratchpad_read(tp, &temperature, attr->index,
  8513. sizeof(temperature));
  8514. spin_unlock_bh(&tp->lock);
  8515. return sprintf(buf, "%u\n", temperature);
  8516. }
  8517. static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
  8518. TG3_TEMP_SENSOR_OFFSET);
  8519. static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
  8520. TG3_TEMP_CAUTION_OFFSET);
  8521. static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
  8522. TG3_TEMP_MAX_OFFSET);
  8523. static struct attribute *tg3_attributes[] = {
  8524. &sensor_dev_attr_temp1_input.dev_attr.attr,
  8525. &sensor_dev_attr_temp1_crit.dev_attr.attr,
  8526. &sensor_dev_attr_temp1_max.dev_attr.attr,
  8527. NULL
  8528. };
  8529. static const struct attribute_group tg3_group = {
  8530. .attrs = tg3_attributes,
  8531. };
  8532. static void tg3_hwmon_close(struct tg3 *tp)
  8533. {
  8534. if (tp->hwmon_dev) {
  8535. hwmon_device_unregister(tp->hwmon_dev);
  8536. tp->hwmon_dev = NULL;
  8537. sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
  8538. }
  8539. }
  8540. static void tg3_hwmon_open(struct tg3 *tp)
  8541. {
  8542. int i, err;
  8543. u32 size = 0;
  8544. struct pci_dev *pdev = tp->pdev;
  8545. struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
  8546. tg3_sd_scan_scratchpad(tp, ocirs);
  8547. for (i = 0; i < TG3_SD_NUM_RECS; i++) {
  8548. if (!ocirs[i].src_data_length)
  8549. continue;
  8550. size += ocirs[i].src_hdr_length;
  8551. size += ocirs[i].src_data_length;
  8552. }
  8553. if (!size)
  8554. return;
  8555. /* Register hwmon sysfs hooks */
  8556. err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
  8557. if (err) {
  8558. dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
  8559. return;
  8560. }
  8561. tp->hwmon_dev = hwmon_device_register(&pdev->dev);
  8562. if (IS_ERR(tp->hwmon_dev)) {
  8563. tp->hwmon_dev = NULL;
  8564. dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
  8565. sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
  8566. }
  8567. }
  8568. #define TG3_STAT_ADD32(PSTAT, REG) \
  8569. do { u32 __val = tr32(REG); \
  8570. (PSTAT)->low += __val; \
  8571. if ((PSTAT)->low < __val) \
  8572. (PSTAT)->high += 1; \
  8573. } while (0)
  8574. static void tg3_periodic_fetch_stats(struct tg3 *tp)
  8575. {
  8576. struct tg3_hw_stats *sp = tp->hw_stats;
  8577. if (!tp->link_up)
  8578. return;
  8579. TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
  8580. TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
  8581. TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
  8582. TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
  8583. TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
  8584. TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
  8585. TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
  8586. TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
  8587. TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
  8588. TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
  8589. TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
  8590. TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
  8591. TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
  8592. if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
  8593. (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
  8594. sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
  8595. u32 val;
  8596. val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
  8597. val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
  8598. tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
  8599. tg3_flag_clear(tp, 5719_RDMA_BUG);
  8600. }
  8601. TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
  8602. TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
  8603. TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
  8604. TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
  8605. TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
  8606. TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
  8607. TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
  8608. TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
  8609. TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
  8610. TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
  8611. TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
  8612. TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
  8613. TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
  8614. TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
  8615. TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
  8616. if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
  8617. tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
  8618. tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
  8619. TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
  8620. } else {
  8621. u32 val = tr32(HOSTCC_FLOW_ATTN);
  8622. val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
  8623. if (val) {
  8624. tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
  8625. sp->rx_discards.low += val;
  8626. if (sp->rx_discards.low < val)
  8627. sp->rx_discards.high += 1;
  8628. }
  8629. sp->mbuf_lwm_thresh_hit = sp->rx_discards;
  8630. }
  8631. TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
  8632. }
  8633. static void tg3_chk_missed_msi(struct tg3 *tp)
  8634. {
  8635. u32 i;
  8636. for (i = 0; i < tp->irq_cnt; i++) {
  8637. struct tg3_napi *tnapi = &tp->napi[i];
  8638. if (tg3_has_work(tnapi)) {
  8639. if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
  8640. tnapi->last_tx_cons == tnapi->tx_cons) {
  8641. if (tnapi->chk_msi_cnt < 1) {
  8642. tnapi->chk_msi_cnt++;
  8643. return;
  8644. }
  8645. tg3_msi(0, tnapi);
  8646. }
  8647. }
  8648. tnapi->chk_msi_cnt = 0;
  8649. tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
  8650. tnapi->last_tx_cons = tnapi->tx_cons;
  8651. }
  8652. }
  8653. static void tg3_timer(unsigned long __opaque)
  8654. {
  8655. struct tg3 *tp = (struct tg3 *) __opaque;
  8656. if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
  8657. goto restart_timer;
  8658. spin_lock(&tp->lock);
  8659. if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
  8660. tg3_flag(tp, 57765_CLASS))
  8661. tg3_chk_missed_msi(tp);
  8662. if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
  8663. /* BCM4785: Flush posted writes from GbE to host memory. */
  8664. tr32(HOSTCC_MODE);
  8665. }
  8666. if (!tg3_flag(tp, TAGGED_STATUS)) {
  8667. /* All of this garbage is because when using non-tagged
  8668. * IRQ status the mailbox/status_block protocol the chip
  8669. * uses with the cpu is race prone.
  8670. */
  8671. if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
  8672. tw32(GRC_LOCAL_CTRL,
  8673. tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
  8674. } else {
  8675. tw32(HOSTCC_MODE, tp->coalesce_mode |
  8676. HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
  8677. }
  8678. if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
  8679. spin_unlock(&tp->lock);
  8680. tg3_reset_task_schedule(tp);
  8681. goto restart_timer;
  8682. }
  8683. }
  8684. /* This part only runs once per second. */
  8685. if (!--tp->timer_counter) {
  8686. if (tg3_flag(tp, 5705_PLUS))
  8687. tg3_periodic_fetch_stats(tp);
  8688. if (tp->setlpicnt && !--tp->setlpicnt)
  8689. tg3_phy_eee_enable(tp);
  8690. if (tg3_flag(tp, USE_LINKCHG_REG)) {
  8691. u32 mac_stat;
  8692. int phy_event;
  8693. mac_stat = tr32(MAC_STATUS);
  8694. phy_event = 0;
  8695. if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
  8696. if (mac_stat & MAC_STATUS_MI_INTERRUPT)
  8697. phy_event = 1;
  8698. } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
  8699. phy_event = 1;
  8700. if (phy_event)
  8701. tg3_setup_phy(tp, 0);
  8702. } else if (tg3_flag(tp, POLL_SERDES)) {
  8703. u32 mac_stat = tr32(MAC_STATUS);
  8704. int need_setup = 0;
  8705. if (tp->link_up &&
  8706. (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
  8707. need_setup = 1;
  8708. }
  8709. if (!tp->link_up &&
  8710. (mac_stat & (MAC_STATUS_PCS_SYNCED |
  8711. MAC_STATUS_SIGNAL_DET))) {
  8712. need_setup = 1;
  8713. }
  8714. if (need_setup) {
  8715. if (!tp->serdes_counter) {
  8716. tw32_f(MAC_MODE,
  8717. (tp->mac_mode &
  8718. ~MAC_MODE_PORT_MODE_MASK));
  8719. udelay(40);
  8720. tw32_f(MAC_MODE, tp->mac_mode);
  8721. udelay(40);
  8722. }
  8723. tg3_setup_phy(tp, 0);
  8724. }
  8725. } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
  8726. tg3_flag(tp, 5780_CLASS)) {
  8727. tg3_serdes_parallel_detect(tp);
  8728. }
  8729. tp->timer_counter = tp->timer_multiplier;
  8730. }
  8731. /* Heartbeat is only sent once every 2 seconds.
  8732. *
  8733. * The heartbeat is to tell the ASF firmware that the host
  8734. * driver is still alive. In the event that the OS crashes,
  8735. * ASF needs to reset the hardware to free up the FIFO space
  8736. * that may be filled with rx packets destined for the host.
  8737. * If the FIFO is full, ASF will no longer function properly.
  8738. *
  8739. * Unintended resets have been reported on real time kernels
  8740. * where the timer doesn't run on time. Netpoll will also have
  8741. * same problem.
  8742. *
  8743. * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
  8744. * to check the ring condition when the heartbeat is expiring
  8745. * before doing the reset. This will prevent most unintended
  8746. * resets.
  8747. */
  8748. if (!--tp->asf_counter) {
  8749. if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
  8750. tg3_wait_for_event_ack(tp);
  8751. tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
  8752. FWCMD_NICDRV_ALIVE3);
  8753. tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
  8754. tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
  8755. TG3_FW_UPDATE_TIMEOUT_SEC);
  8756. tg3_generate_fw_event(tp);
  8757. }
  8758. tp->asf_counter = tp->asf_multiplier;
  8759. }
  8760. spin_unlock(&tp->lock);
  8761. restart_timer:
  8762. tp->timer.expires = jiffies + tp->timer_offset;
  8763. add_timer(&tp->timer);
  8764. }
  8765. static void tg3_timer_init(struct tg3 *tp)
  8766. {
  8767. if (tg3_flag(tp, TAGGED_STATUS) &&
  8768. tg3_asic_rev(tp) != ASIC_REV_5717 &&
  8769. !tg3_flag(tp, 57765_CLASS))
  8770. tp->timer_offset = HZ;
  8771. else
  8772. tp->timer_offset = HZ / 10;
  8773. BUG_ON(tp->timer_offset > HZ);
  8774. tp->timer_multiplier = (HZ / tp->timer_offset);
  8775. tp->asf_multiplier = (HZ / tp->timer_offset) *
  8776. TG3_FW_UPDATE_FREQ_SEC;
  8777. init_timer(&tp->timer);
  8778. tp->timer.data = (unsigned long) tp;
  8779. tp->timer.function = tg3_timer;
  8780. }
  8781. static void tg3_timer_start(struct tg3 *tp)
  8782. {
  8783. tp->asf_counter = tp->asf_multiplier;
  8784. tp->timer_counter = tp->timer_multiplier;
  8785. tp->timer.expires = jiffies + tp->timer_offset;
  8786. add_timer(&tp->timer);
  8787. }
  8788. static void tg3_timer_stop(struct tg3 *tp)
  8789. {
  8790. del_timer_sync(&tp->timer);
  8791. }
  8792. /* Restart hardware after configuration changes, self-test, etc.
  8793. * Invoked with tp->lock held.
  8794. */
  8795. static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
  8796. __releases(tp->lock)
  8797. __acquires(tp->lock)
  8798. {
  8799. int err;
  8800. err = tg3_init_hw(tp, reset_phy);
  8801. if (err) {
  8802. netdev_err(tp->dev,
  8803. "Failed to re-initialize device, aborting\n");
  8804. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  8805. tg3_full_unlock(tp);
  8806. tg3_timer_stop(tp);
  8807. tp->irq_sync = 0;
  8808. tg3_napi_enable(tp);
  8809. dev_close(tp->dev);
  8810. tg3_full_lock(tp, 0);
  8811. }
  8812. return err;
  8813. }
  8814. static void tg3_reset_task(struct work_struct *work)
  8815. {
  8816. struct tg3 *tp = container_of(work, struct tg3, reset_task);
  8817. int err;
  8818. tg3_full_lock(tp, 0);
  8819. if (!netif_running(tp->dev)) {
  8820. tg3_flag_clear(tp, RESET_TASK_PENDING);
  8821. tg3_full_unlock(tp);
  8822. return;
  8823. }
  8824. tg3_full_unlock(tp);
  8825. tg3_phy_stop(tp);
  8826. tg3_netif_stop(tp);
  8827. tg3_full_lock(tp, 1);
  8828. if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
  8829. tp->write32_tx_mbox = tg3_write32_tx_mbox;
  8830. tp->write32_rx_mbox = tg3_write_flush_reg32;
  8831. tg3_flag_set(tp, MBOX_WRITE_REORDER);
  8832. tg3_flag_clear(tp, TX_RECOVERY_PENDING);
  8833. }
  8834. tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
  8835. err = tg3_init_hw(tp, 1);
  8836. if (err)
  8837. goto out;
  8838. tg3_netif_start(tp);
  8839. out:
  8840. tg3_full_unlock(tp);
  8841. if (!err)
  8842. tg3_phy_start(tp);
  8843. tg3_flag_clear(tp, RESET_TASK_PENDING);
  8844. }
  8845. static int tg3_request_irq(struct tg3 *tp, int irq_num)
  8846. {
  8847. irq_handler_t fn;
  8848. unsigned long flags;
  8849. char *name;
  8850. struct tg3_napi *tnapi = &tp->napi[irq_num];
  8851. if (tp->irq_cnt == 1)
  8852. name = tp->dev->name;
  8853. else {
  8854. name = &tnapi->irq_lbl[0];
  8855. snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
  8856. name[IFNAMSIZ-1] = 0;
  8857. }
  8858. if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
  8859. fn = tg3_msi;
  8860. if (tg3_flag(tp, 1SHOT_MSI))
  8861. fn = tg3_msi_1shot;
  8862. flags = 0;
  8863. } else {
  8864. fn = tg3_interrupt;
  8865. if (tg3_flag(tp, TAGGED_STATUS))
  8866. fn = tg3_interrupt_tagged;
  8867. flags = IRQF_SHARED;
  8868. }
  8869. return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
  8870. }
  8871. static int tg3_test_interrupt(struct tg3 *tp)
  8872. {
  8873. struct tg3_napi *tnapi = &tp->napi[0];
  8874. struct net_device *dev = tp->dev;
  8875. int err, i, intr_ok = 0;
  8876. u32 val;
  8877. if (!netif_running(dev))
  8878. return -ENODEV;
  8879. tg3_disable_ints(tp);
  8880. free_irq(tnapi->irq_vec, tnapi);
  8881. /*
  8882. * Turn off MSI one shot mode. Otherwise this test has no
  8883. * observable way to know whether the interrupt was delivered.
  8884. */
  8885. if (tg3_flag(tp, 57765_PLUS)) {
  8886. val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
  8887. tw32(MSGINT_MODE, val);
  8888. }
  8889. err = request_irq(tnapi->irq_vec, tg3_test_isr,
  8890. IRQF_SHARED, dev->name, tnapi);
  8891. if (err)
  8892. return err;
  8893. tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
  8894. tg3_enable_ints(tp);
  8895. tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
  8896. tnapi->coal_now);
  8897. for (i = 0; i < 5; i++) {
  8898. u32 int_mbox, misc_host_ctrl;
  8899. int_mbox = tr32_mailbox(tnapi->int_mbox);
  8900. misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
  8901. if ((int_mbox != 0) ||
  8902. (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
  8903. intr_ok = 1;
  8904. break;
  8905. }
  8906. if (tg3_flag(tp, 57765_PLUS) &&
  8907. tnapi->hw_status->status_tag != tnapi->last_tag)
  8908. tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
  8909. msleep(10);
  8910. }
  8911. tg3_disable_ints(tp);
  8912. free_irq(tnapi->irq_vec, tnapi);
  8913. err = tg3_request_irq(tp, 0);
  8914. if (err)
  8915. return err;
  8916. if (intr_ok) {
  8917. /* Reenable MSI one shot mode. */
  8918. if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
  8919. val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
  8920. tw32(MSGINT_MODE, val);
  8921. }
  8922. return 0;
  8923. }
  8924. return -EIO;
  8925. }
  8926. /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
  8927. * successfully restored
  8928. */
  8929. static int tg3_test_msi(struct tg3 *tp)
  8930. {
  8931. int err;
  8932. u16 pci_cmd;
  8933. if (!tg3_flag(tp, USING_MSI))
  8934. return 0;
  8935. /* Turn off SERR reporting in case MSI terminates with Master
  8936. * Abort.
  8937. */
  8938. pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
  8939. pci_write_config_word(tp->pdev, PCI_COMMAND,
  8940. pci_cmd & ~PCI_COMMAND_SERR);
  8941. err = tg3_test_interrupt(tp);
  8942. pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
  8943. if (!err)
  8944. return 0;
  8945. /* other failures */
  8946. if (err != -EIO)
  8947. return err;
  8948. /* MSI test failed, go back to INTx mode */
  8949. netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
  8950. "to INTx mode. Please report this failure to the PCI "
  8951. "maintainer and include system chipset information\n");
  8952. free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
  8953. pci_disable_msi(tp->pdev);
  8954. tg3_flag_clear(tp, USING_MSI);
  8955. tp->napi[0].irq_vec = tp->pdev->irq;
  8956. err = tg3_request_irq(tp, 0);
  8957. if (err)
  8958. return err;
  8959. /* Need to reset the chip because the MSI cycle may have terminated
  8960. * with Master Abort.
  8961. */
  8962. tg3_full_lock(tp, 1);
  8963. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  8964. err = tg3_init_hw(tp, 1);
  8965. tg3_full_unlock(tp);
  8966. if (err)
  8967. free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
  8968. return err;
  8969. }
  8970. static int tg3_request_firmware(struct tg3 *tp)
  8971. {
  8972. const struct tg3_firmware_hdr *fw_hdr;
  8973. if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
  8974. netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
  8975. tp->fw_needed);
  8976. return -ENOENT;
  8977. }
  8978. fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
  8979. /* Firmware blob starts with version numbers, followed by
  8980. * start address and _full_ length including BSS sections
  8981. * (which must be longer than the actual data, of course
  8982. */
  8983. tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
  8984. if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
  8985. netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
  8986. tp->fw_len, tp->fw_needed);
  8987. release_firmware(tp->fw);
  8988. tp->fw = NULL;
  8989. return -EINVAL;
  8990. }
  8991. /* We no longer need firmware; we have it. */
  8992. tp->fw_needed = NULL;
  8993. return 0;
  8994. }
  8995. static u32 tg3_irq_count(struct tg3 *tp)
  8996. {
  8997. u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
  8998. if (irq_cnt > 1) {
  8999. /* We want as many rx rings enabled as there are cpus.
  9000. * In multiqueue MSI-X mode, the first MSI-X vector
  9001. * only deals with link interrupts, etc, so we add
  9002. * one to the number of vectors we are requesting.
  9003. */
  9004. irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
  9005. }
  9006. return irq_cnt;
  9007. }
  9008. static bool tg3_enable_msix(struct tg3 *tp)
  9009. {
  9010. int i, rc;
  9011. struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
  9012. tp->txq_cnt = tp->txq_req;
  9013. tp->rxq_cnt = tp->rxq_req;
  9014. if (!tp->rxq_cnt)
  9015. tp->rxq_cnt = netif_get_num_default_rss_queues();
  9016. if (tp->rxq_cnt > tp->rxq_max)
  9017. tp->rxq_cnt = tp->rxq_max;
  9018. /* Disable multiple TX rings by default. Simple round-robin hardware
  9019. * scheduling of the TX rings can cause starvation of rings with
  9020. * small packets when other rings have TSO or jumbo packets.
  9021. */
  9022. if (!tp->txq_req)
  9023. tp->txq_cnt = 1;
  9024. tp->irq_cnt = tg3_irq_count(tp);
  9025. for (i = 0; i < tp->irq_max; i++) {
  9026. msix_ent[i].entry = i;
  9027. msix_ent[i].vector = 0;
  9028. }
  9029. rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
  9030. if (rc < 0) {
  9031. return false;
  9032. } else if (rc != 0) {
  9033. if (pci_enable_msix(tp->pdev, msix_ent, rc))
  9034. return false;
  9035. netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
  9036. tp->irq_cnt, rc);
  9037. tp->irq_cnt = rc;
  9038. tp->rxq_cnt = max(rc - 1, 1);
  9039. if (tp->txq_cnt)
  9040. tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
  9041. }
  9042. for (i = 0; i < tp->irq_max; i++)
  9043. tp->napi[i].irq_vec = msix_ent[i].vector;
  9044. if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
  9045. pci_disable_msix(tp->pdev);
  9046. return false;
  9047. }
  9048. if (tp->irq_cnt == 1)
  9049. return true;
  9050. tg3_flag_set(tp, ENABLE_RSS);
  9051. if (tp->txq_cnt > 1)
  9052. tg3_flag_set(tp, ENABLE_TSS);
  9053. netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
  9054. return true;
  9055. }
  9056. static void tg3_ints_init(struct tg3 *tp)
  9057. {
  9058. if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
  9059. !tg3_flag(tp, TAGGED_STATUS)) {
  9060. /* All MSI supporting chips should support tagged
  9061. * status. Assert that this is the case.
  9062. */
  9063. netdev_warn(tp->dev,
  9064. "MSI without TAGGED_STATUS? Not using MSI\n");
  9065. goto defcfg;
  9066. }
  9067. if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
  9068. tg3_flag_set(tp, USING_MSIX);
  9069. else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
  9070. tg3_flag_set(tp, USING_MSI);
  9071. if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
  9072. u32 msi_mode = tr32(MSGINT_MODE);
  9073. if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
  9074. msi_mode |= MSGINT_MODE_MULTIVEC_EN;
  9075. if (!tg3_flag(tp, 1SHOT_MSI))
  9076. msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
  9077. tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
  9078. }
  9079. defcfg:
  9080. if (!tg3_flag(tp, USING_MSIX)) {
  9081. tp->irq_cnt = 1;
  9082. tp->napi[0].irq_vec = tp->pdev->irq;
  9083. }
  9084. if (tp->irq_cnt == 1) {
  9085. tp->txq_cnt = 1;
  9086. tp->rxq_cnt = 1;
  9087. netif_set_real_num_tx_queues(tp->dev, 1);
  9088. netif_set_real_num_rx_queues(tp->dev, 1);
  9089. }
  9090. }
  9091. static void tg3_ints_fini(struct tg3 *tp)
  9092. {
  9093. if (tg3_flag(tp, USING_MSIX))
  9094. pci_disable_msix(tp->pdev);
  9095. else if (tg3_flag(tp, USING_MSI))
  9096. pci_disable_msi(tp->pdev);
  9097. tg3_flag_clear(tp, USING_MSI);
  9098. tg3_flag_clear(tp, USING_MSIX);
  9099. tg3_flag_clear(tp, ENABLE_RSS);
  9100. tg3_flag_clear(tp, ENABLE_TSS);
  9101. }
  9102. static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
  9103. bool init)
  9104. {
  9105. struct net_device *dev = tp->dev;
  9106. int i, err;
  9107. /*
  9108. * Setup interrupts first so we know how
  9109. * many NAPI resources to allocate
  9110. */
  9111. tg3_ints_init(tp);
  9112. tg3_rss_check_indir_tbl(tp);
  9113. /* The placement of this call is tied
  9114. * to the setup and use of Host TX descriptors.
  9115. */
  9116. err = tg3_alloc_consistent(tp);
  9117. if (err)
  9118. goto err_out1;
  9119. tg3_napi_init(tp);
  9120. tg3_napi_enable(tp);
  9121. for (i = 0; i < tp->irq_cnt; i++) {
  9122. struct tg3_napi *tnapi = &tp->napi[i];
  9123. err = tg3_request_irq(tp, i);
  9124. if (err) {
  9125. for (i--; i >= 0; i--) {
  9126. tnapi = &tp->napi[i];
  9127. free_irq(tnapi->irq_vec, tnapi);
  9128. }
  9129. goto err_out2;
  9130. }
  9131. }
  9132. tg3_full_lock(tp, 0);
  9133. err = tg3_init_hw(tp, reset_phy);
  9134. if (err) {
  9135. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  9136. tg3_free_rings(tp);
  9137. }
  9138. tg3_full_unlock(tp);
  9139. if (err)
  9140. goto err_out3;
  9141. if (test_irq && tg3_flag(tp, USING_MSI)) {
  9142. err = tg3_test_msi(tp);
  9143. if (err) {
  9144. tg3_full_lock(tp, 0);
  9145. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  9146. tg3_free_rings(tp);
  9147. tg3_full_unlock(tp);
  9148. goto err_out2;
  9149. }
  9150. if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
  9151. u32 val = tr32(PCIE_TRANSACTION_CFG);
  9152. tw32(PCIE_TRANSACTION_CFG,
  9153. val | PCIE_TRANS_CFG_1SHOT_MSI);
  9154. }
  9155. }
  9156. tg3_phy_start(tp);
  9157. tg3_hwmon_open(tp);
  9158. tg3_full_lock(tp, 0);
  9159. tg3_timer_start(tp);
  9160. tg3_flag_set(tp, INIT_COMPLETE);
  9161. tg3_enable_ints(tp);
  9162. if (init)
  9163. tg3_ptp_init(tp);
  9164. else
  9165. tg3_ptp_resume(tp);
  9166. tg3_full_unlock(tp);
  9167. netif_tx_start_all_queues(dev);
  9168. /*
  9169. * Reset loopback feature if it was turned on while the device was down
  9170. * make sure that it's installed properly now.
  9171. */
  9172. if (dev->features & NETIF_F_LOOPBACK)
  9173. tg3_set_loopback(dev, dev->features);
  9174. return 0;
  9175. err_out3:
  9176. for (i = tp->irq_cnt - 1; i >= 0; i--) {
  9177. struct tg3_napi *tnapi = &tp->napi[i];
  9178. free_irq(tnapi->irq_vec, tnapi);
  9179. }
  9180. err_out2:
  9181. tg3_napi_disable(tp);
  9182. tg3_napi_fini(tp);
  9183. tg3_free_consistent(tp);
  9184. err_out1:
  9185. tg3_ints_fini(tp);
  9186. return err;
  9187. }
  9188. static void tg3_stop(struct tg3 *tp)
  9189. {
  9190. int i;
  9191. tg3_reset_task_cancel(tp);
  9192. tg3_netif_stop(tp);
  9193. tg3_timer_stop(tp);
  9194. tg3_hwmon_close(tp);
  9195. tg3_phy_stop(tp);
  9196. tg3_full_lock(tp, 1);
  9197. tg3_disable_ints(tp);
  9198. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  9199. tg3_free_rings(tp);
  9200. tg3_flag_clear(tp, INIT_COMPLETE);
  9201. tg3_full_unlock(tp);
  9202. for (i = tp->irq_cnt - 1; i >= 0; i--) {
  9203. struct tg3_napi *tnapi = &tp->napi[i];
  9204. free_irq(tnapi->irq_vec, tnapi);
  9205. }
  9206. tg3_ints_fini(tp);
  9207. tg3_napi_fini(tp);
  9208. tg3_free_consistent(tp);
  9209. }
  9210. static int tg3_open(struct net_device *dev)
  9211. {
  9212. struct tg3 *tp = netdev_priv(dev);
  9213. int err;
  9214. if (tp->fw_needed) {
  9215. err = tg3_request_firmware(tp);
  9216. if (tg3_asic_rev(tp) == ASIC_REV_57766) {
  9217. if (err) {
  9218. netdev_warn(tp->dev, "EEE capability disabled\n");
  9219. tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
  9220. } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
  9221. netdev_warn(tp->dev, "EEE capability restored\n");
  9222. tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
  9223. }
  9224. } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
  9225. if (err)
  9226. return err;
  9227. } else if (err) {
  9228. netdev_warn(tp->dev, "TSO capability disabled\n");
  9229. tg3_flag_clear(tp, TSO_CAPABLE);
  9230. } else if (!tg3_flag(tp, TSO_CAPABLE)) {
  9231. netdev_notice(tp->dev, "TSO capability restored\n");
  9232. tg3_flag_set(tp, TSO_CAPABLE);
  9233. }
  9234. }
  9235. tg3_carrier_off(tp);
  9236. err = tg3_power_up(tp);
  9237. if (err)
  9238. return err;
  9239. tg3_full_lock(tp, 0);
  9240. tg3_disable_ints(tp);
  9241. tg3_flag_clear(tp, INIT_COMPLETE);
  9242. tg3_full_unlock(tp);
  9243. err = tg3_start(tp,
  9244. !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
  9245. true, true);
  9246. if (err) {
  9247. tg3_frob_aux_power(tp, false);
  9248. pci_set_power_state(tp->pdev, PCI_D3hot);
  9249. }
  9250. if (tg3_flag(tp, PTP_CAPABLE)) {
  9251. tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
  9252. &tp->pdev->dev);
  9253. if (IS_ERR(tp->ptp_clock))
  9254. tp->ptp_clock = NULL;
  9255. }
  9256. return err;
  9257. }
  9258. static int tg3_close(struct net_device *dev)
  9259. {
  9260. struct tg3 *tp = netdev_priv(dev);
  9261. tg3_ptp_fini(tp);
  9262. tg3_stop(tp);
  9263. /* Clear stats across close / open calls */
  9264. memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
  9265. memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
  9266. tg3_power_down(tp);
  9267. tg3_carrier_off(tp);
  9268. return 0;
  9269. }
  9270. static inline u64 get_stat64(tg3_stat64_t *val)
  9271. {
  9272. return ((u64)val->high << 32) | ((u64)val->low);
  9273. }
  9274. static u64 tg3_calc_crc_errors(struct tg3 *tp)
  9275. {
  9276. struct tg3_hw_stats *hw_stats = tp->hw_stats;
  9277. if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
  9278. (tg3_asic_rev(tp) == ASIC_REV_5700 ||
  9279. tg3_asic_rev(tp) == ASIC_REV_5701)) {
  9280. u32 val;
  9281. if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
  9282. tg3_writephy(tp, MII_TG3_TEST1,
  9283. val | MII_TG3_TEST1_CRC_EN);
  9284. tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
  9285. } else
  9286. val = 0;
  9287. tp->phy_crc_errors += val;
  9288. return tp->phy_crc_errors;
  9289. }
  9290. return get_stat64(&hw_stats->rx_fcs_errors);
  9291. }
  9292. #define ESTAT_ADD(member) \
  9293. estats->member = old_estats->member + \
  9294. get_stat64(&hw_stats->member)
  9295. static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
  9296. {
  9297. struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
  9298. struct tg3_hw_stats *hw_stats = tp->hw_stats;
  9299. ESTAT_ADD(rx_octets);
  9300. ESTAT_ADD(rx_fragments);
  9301. ESTAT_ADD(rx_ucast_packets);
  9302. ESTAT_ADD(rx_mcast_packets);
  9303. ESTAT_ADD(rx_bcast_packets);
  9304. ESTAT_ADD(rx_fcs_errors);
  9305. ESTAT_ADD(rx_align_errors);
  9306. ESTAT_ADD(rx_xon_pause_rcvd);
  9307. ESTAT_ADD(rx_xoff_pause_rcvd);
  9308. ESTAT_ADD(rx_mac_ctrl_rcvd);
  9309. ESTAT_ADD(rx_xoff_entered);
  9310. ESTAT_ADD(rx_frame_too_long_errors);
  9311. ESTAT_ADD(rx_jabbers);
  9312. ESTAT_ADD(rx_undersize_packets);
  9313. ESTAT_ADD(rx_in_length_errors);
  9314. ESTAT_ADD(rx_out_length_errors);
  9315. ESTAT_ADD(rx_64_or_less_octet_packets);
  9316. ESTAT_ADD(rx_65_to_127_octet_packets);
  9317. ESTAT_ADD(rx_128_to_255_octet_packets);
  9318. ESTAT_ADD(rx_256_to_511_octet_packets);
  9319. ESTAT_ADD(rx_512_to_1023_octet_packets);
  9320. ESTAT_ADD(rx_1024_to_1522_octet_packets);
  9321. ESTAT_ADD(rx_1523_to_2047_octet_packets);
  9322. ESTAT_ADD(rx_2048_to_4095_octet_packets);
  9323. ESTAT_ADD(rx_4096_to_8191_octet_packets);
  9324. ESTAT_ADD(rx_8192_to_9022_octet_packets);
  9325. ESTAT_ADD(tx_octets);
  9326. ESTAT_ADD(tx_collisions);
  9327. ESTAT_ADD(tx_xon_sent);
  9328. ESTAT_ADD(tx_xoff_sent);
  9329. ESTAT_ADD(tx_flow_control);
  9330. ESTAT_ADD(tx_mac_errors);
  9331. ESTAT_ADD(tx_single_collisions);
  9332. ESTAT_ADD(tx_mult_collisions);
  9333. ESTAT_ADD(tx_deferred);
  9334. ESTAT_ADD(tx_excessive_collisions);
  9335. ESTAT_ADD(tx_late_collisions);
  9336. ESTAT_ADD(tx_collide_2times);
  9337. ESTAT_ADD(tx_collide_3times);
  9338. ESTAT_ADD(tx_collide_4times);
  9339. ESTAT_ADD(tx_collide_5times);
  9340. ESTAT_ADD(tx_collide_6times);
  9341. ESTAT_ADD(tx_collide_7times);
  9342. ESTAT_ADD(tx_collide_8times);
  9343. ESTAT_ADD(tx_collide_9times);
  9344. ESTAT_ADD(tx_collide_10times);
  9345. ESTAT_ADD(tx_collide_11times);
  9346. ESTAT_ADD(tx_collide_12times);
  9347. ESTAT_ADD(tx_collide_13times);
  9348. ESTAT_ADD(tx_collide_14times);
  9349. ESTAT_ADD(tx_collide_15times);
  9350. ESTAT_ADD(tx_ucast_packets);
  9351. ESTAT_ADD(tx_mcast_packets);
  9352. ESTAT_ADD(tx_bcast_packets);
  9353. ESTAT_ADD(tx_carrier_sense_errors);
  9354. ESTAT_ADD(tx_discards);
  9355. ESTAT_ADD(tx_errors);
  9356. ESTAT_ADD(dma_writeq_full);
  9357. ESTAT_ADD(dma_write_prioq_full);
  9358. ESTAT_ADD(rxbds_empty);
  9359. ESTAT_ADD(rx_discards);
  9360. ESTAT_ADD(rx_errors);
  9361. ESTAT_ADD(rx_threshold_hit);
  9362. ESTAT_ADD(dma_readq_full);
  9363. ESTAT_ADD(dma_read_prioq_full);
  9364. ESTAT_ADD(tx_comp_queue_full);
  9365. ESTAT_ADD(ring_set_send_prod_index);
  9366. ESTAT_ADD(ring_status_update);
  9367. ESTAT_ADD(nic_irqs);
  9368. ESTAT_ADD(nic_avoided_irqs);
  9369. ESTAT_ADD(nic_tx_threshold_hit);
  9370. ESTAT_ADD(mbuf_lwm_thresh_hit);
  9371. }
  9372. static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
  9373. {
  9374. struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
  9375. struct tg3_hw_stats *hw_stats = tp->hw_stats;
  9376. stats->rx_packets = old_stats->rx_packets +
  9377. get_stat64(&hw_stats->rx_ucast_packets) +
  9378. get_stat64(&hw_stats->rx_mcast_packets) +
  9379. get_stat64(&hw_stats->rx_bcast_packets);
  9380. stats->tx_packets = old_stats->tx_packets +
  9381. get_stat64(&hw_stats->tx_ucast_packets) +
  9382. get_stat64(&hw_stats->tx_mcast_packets) +
  9383. get_stat64(&hw_stats->tx_bcast_packets);
  9384. stats->rx_bytes = old_stats->rx_bytes +
  9385. get_stat64(&hw_stats->rx_octets);
  9386. stats->tx_bytes = old_stats->tx_bytes +
  9387. get_stat64(&hw_stats->tx_octets);
  9388. stats->rx_errors = old_stats->rx_errors +
  9389. get_stat64(&hw_stats->rx_errors);
  9390. stats->tx_errors = old_stats->tx_errors +
  9391. get_stat64(&hw_stats->tx_errors) +
  9392. get_stat64(&hw_stats->tx_mac_errors) +
  9393. get_stat64(&hw_stats->tx_carrier_sense_errors) +
  9394. get_stat64(&hw_stats->tx_discards);
  9395. stats->multicast = old_stats->multicast +
  9396. get_stat64(&hw_stats->rx_mcast_packets);
  9397. stats->collisions = old_stats->collisions +
  9398. get_stat64(&hw_stats->tx_collisions);
  9399. stats->rx_length_errors = old_stats->rx_length_errors +
  9400. get_stat64(&hw_stats->rx_frame_too_long_errors) +
  9401. get_stat64(&hw_stats->rx_undersize_packets);
  9402. stats->rx_over_errors = old_stats->rx_over_errors +
  9403. get_stat64(&hw_stats->rxbds_empty);
  9404. stats->rx_frame_errors = old_stats->rx_frame_errors +
  9405. get_stat64(&hw_stats->rx_align_errors);
  9406. stats->tx_aborted_errors = old_stats->tx_aborted_errors +
  9407. get_stat64(&hw_stats->tx_discards);
  9408. stats->tx_carrier_errors = old_stats->tx_carrier_errors +
  9409. get_stat64(&hw_stats->tx_carrier_sense_errors);
  9410. stats->rx_crc_errors = old_stats->rx_crc_errors +
  9411. tg3_calc_crc_errors(tp);
  9412. stats->rx_missed_errors = old_stats->rx_missed_errors +
  9413. get_stat64(&hw_stats->rx_discards);
  9414. stats->rx_dropped = tp->rx_dropped;
  9415. stats->tx_dropped = tp->tx_dropped;
  9416. }
  9417. static int tg3_get_regs_len(struct net_device *dev)
  9418. {
  9419. return TG3_REG_BLK_SIZE;
  9420. }
  9421. static void tg3_get_regs(struct net_device *dev,
  9422. struct ethtool_regs *regs, void *_p)
  9423. {
  9424. struct tg3 *tp = netdev_priv(dev);
  9425. regs->version = 0;
  9426. memset(_p, 0, TG3_REG_BLK_SIZE);
  9427. if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
  9428. return;
  9429. tg3_full_lock(tp, 0);
  9430. tg3_dump_legacy_regs(tp, (u32 *)_p);
  9431. tg3_full_unlock(tp);
  9432. }
  9433. static int tg3_get_eeprom_len(struct net_device *dev)
  9434. {
  9435. struct tg3 *tp = netdev_priv(dev);
  9436. return tp->nvram_size;
  9437. }
  9438. static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
  9439. {
  9440. struct tg3 *tp = netdev_priv(dev);
  9441. int ret;
  9442. u8 *pd;
  9443. u32 i, offset, len, b_offset, b_count;
  9444. __be32 val;
  9445. if (tg3_flag(tp, NO_NVRAM))
  9446. return -EINVAL;
  9447. if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
  9448. return -EAGAIN;
  9449. offset = eeprom->offset;
  9450. len = eeprom->len;
  9451. eeprom->len = 0;
  9452. eeprom->magic = TG3_EEPROM_MAGIC;
  9453. if (offset & 3) {
  9454. /* adjustments to start on required 4 byte boundary */
  9455. b_offset = offset & 3;
  9456. b_count = 4 - b_offset;
  9457. if (b_count > len) {
  9458. /* i.e. offset=1 len=2 */
  9459. b_count = len;
  9460. }
  9461. ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
  9462. if (ret)
  9463. return ret;
  9464. memcpy(data, ((char *)&val) + b_offset, b_count);
  9465. len -= b_count;
  9466. offset += b_count;
  9467. eeprom->len += b_count;
  9468. }
  9469. /* read bytes up to the last 4 byte boundary */
  9470. pd = &data[eeprom->len];
  9471. for (i = 0; i < (len - (len & 3)); i += 4) {
  9472. ret = tg3_nvram_read_be32(tp, offset + i, &val);
  9473. if (ret) {
  9474. eeprom->len += i;
  9475. return ret;
  9476. }
  9477. memcpy(pd + i, &val, 4);
  9478. }
  9479. eeprom->len += i;
  9480. if (len & 3) {
  9481. /* read last bytes not ending on 4 byte boundary */
  9482. pd = &data[eeprom->len];
  9483. b_count = len & 3;
  9484. b_offset = offset + len - b_count;
  9485. ret = tg3_nvram_read_be32(tp, b_offset, &val);
  9486. if (ret)
  9487. return ret;
  9488. memcpy(pd, &val, b_count);
  9489. eeprom->len += b_count;
  9490. }
  9491. return 0;
  9492. }
  9493. static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
  9494. {
  9495. struct tg3 *tp = netdev_priv(dev);
  9496. int ret;
  9497. u32 offset, len, b_offset, odd_len;
  9498. u8 *buf;
  9499. __be32 start, end;
  9500. if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
  9501. return -EAGAIN;
  9502. if (tg3_flag(tp, NO_NVRAM) ||
  9503. eeprom->magic != TG3_EEPROM_MAGIC)
  9504. return -EINVAL;
  9505. offset = eeprom->offset;
  9506. len = eeprom->len;
  9507. if ((b_offset = (offset & 3))) {
  9508. /* adjustments to start on required 4 byte boundary */
  9509. ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
  9510. if (ret)
  9511. return ret;
  9512. len += b_offset;
  9513. offset &= ~3;
  9514. if (len < 4)
  9515. len = 4;
  9516. }
  9517. odd_len = 0;
  9518. if (len & 3) {
  9519. /* adjustments to end on required 4 byte boundary */
  9520. odd_len = 1;
  9521. len = (len + 3) & ~3;
  9522. ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
  9523. if (ret)
  9524. return ret;
  9525. }
  9526. buf = data;
  9527. if (b_offset || odd_len) {
  9528. buf = kmalloc(len, GFP_KERNEL);
  9529. if (!buf)
  9530. return -ENOMEM;
  9531. if (b_offset)
  9532. memcpy(buf, &start, 4);
  9533. if (odd_len)
  9534. memcpy(buf+len-4, &end, 4);
  9535. memcpy(buf + b_offset, data, eeprom->len);
  9536. }
  9537. ret = tg3_nvram_write_block(tp, offset, len, buf);
  9538. if (buf != data)
  9539. kfree(buf);
  9540. return ret;
  9541. }
  9542. static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  9543. {
  9544. struct tg3 *tp = netdev_priv(dev);
  9545. if (tg3_flag(tp, USE_PHYLIB)) {
  9546. struct phy_device *phydev;
  9547. if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
  9548. return -EAGAIN;
  9549. phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
  9550. return phy_ethtool_gset(phydev, cmd);
  9551. }
  9552. cmd->supported = (SUPPORTED_Autoneg);
  9553. if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
  9554. cmd->supported |= (SUPPORTED_1000baseT_Half |
  9555. SUPPORTED_1000baseT_Full);
  9556. if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
  9557. cmd->supported |= (SUPPORTED_100baseT_Half |
  9558. SUPPORTED_100baseT_Full |
  9559. SUPPORTED_10baseT_Half |
  9560. SUPPORTED_10baseT_Full |
  9561. SUPPORTED_TP);
  9562. cmd->port = PORT_TP;
  9563. } else {
  9564. cmd->supported |= SUPPORTED_FIBRE;
  9565. cmd->port = PORT_FIBRE;
  9566. }
  9567. cmd->advertising = tp->link_config.advertising;
  9568. if (tg3_flag(tp, PAUSE_AUTONEG)) {
  9569. if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
  9570. if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
  9571. cmd->advertising |= ADVERTISED_Pause;
  9572. } else {
  9573. cmd->advertising |= ADVERTISED_Pause |
  9574. ADVERTISED_Asym_Pause;
  9575. }
  9576. } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
  9577. cmd->advertising |= ADVERTISED_Asym_Pause;
  9578. }
  9579. }
  9580. if (netif_running(dev) && tp->link_up) {
  9581. ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
  9582. cmd->duplex = tp->link_config.active_duplex;
  9583. cmd->lp_advertising = tp->link_config.rmt_adv;
  9584. if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
  9585. if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
  9586. cmd->eth_tp_mdix = ETH_TP_MDI_X;
  9587. else
  9588. cmd->eth_tp_mdix = ETH_TP_MDI;
  9589. }
  9590. } else {
  9591. ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
  9592. cmd->duplex = DUPLEX_UNKNOWN;
  9593. cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
  9594. }
  9595. cmd->phy_address = tp->phy_addr;
  9596. cmd->transceiver = XCVR_INTERNAL;
  9597. cmd->autoneg = tp->link_config.autoneg;
  9598. cmd->maxtxpkt = 0;
  9599. cmd->maxrxpkt = 0;
  9600. return 0;
  9601. }
  9602. static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  9603. {
  9604. struct tg3 *tp = netdev_priv(dev);
  9605. u32 speed = ethtool_cmd_speed(cmd);
  9606. if (tg3_flag(tp, USE_PHYLIB)) {
  9607. struct phy_device *phydev;
  9608. if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
  9609. return -EAGAIN;
  9610. phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
  9611. return phy_ethtool_sset(phydev, cmd);
  9612. }
  9613. if (cmd->autoneg != AUTONEG_ENABLE &&
  9614. cmd->autoneg != AUTONEG_DISABLE)
  9615. return -EINVAL;
  9616. if (cmd->autoneg == AUTONEG_DISABLE &&
  9617. cmd->duplex != DUPLEX_FULL &&
  9618. cmd->duplex != DUPLEX_HALF)
  9619. return -EINVAL;
  9620. if (cmd->autoneg == AUTONEG_ENABLE) {
  9621. u32 mask = ADVERTISED_Autoneg |
  9622. ADVERTISED_Pause |
  9623. ADVERTISED_Asym_Pause;
  9624. if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
  9625. mask |= ADVERTISED_1000baseT_Half |
  9626. ADVERTISED_1000baseT_Full;
  9627. if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
  9628. mask |= ADVERTISED_100baseT_Half |
  9629. ADVERTISED_100baseT_Full |
  9630. ADVERTISED_10baseT_Half |
  9631. ADVERTISED_10baseT_Full |
  9632. ADVERTISED_TP;
  9633. else
  9634. mask |= ADVERTISED_FIBRE;
  9635. if (cmd->advertising & ~mask)
  9636. return -EINVAL;
  9637. mask &= (ADVERTISED_1000baseT_Half |
  9638. ADVERTISED_1000baseT_Full |
  9639. ADVERTISED_100baseT_Half |
  9640. ADVERTISED_100baseT_Full |
  9641. ADVERTISED_10baseT_Half |
  9642. ADVERTISED_10baseT_Full);
  9643. cmd->advertising &= mask;
  9644. } else {
  9645. if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
  9646. if (speed != SPEED_1000)
  9647. return -EINVAL;
  9648. if (cmd->duplex != DUPLEX_FULL)
  9649. return -EINVAL;
  9650. } else {
  9651. if (speed != SPEED_100 &&
  9652. speed != SPEED_10)
  9653. return -EINVAL;
  9654. }
  9655. }
  9656. tg3_full_lock(tp, 0);
  9657. tp->link_config.autoneg = cmd->autoneg;
  9658. if (cmd->autoneg == AUTONEG_ENABLE) {
  9659. tp->link_config.advertising = (cmd->advertising |
  9660. ADVERTISED_Autoneg);
  9661. tp->link_config.speed = SPEED_UNKNOWN;
  9662. tp->link_config.duplex = DUPLEX_UNKNOWN;
  9663. } else {
  9664. tp->link_config.advertising = 0;
  9665. tp->link_config.speed = speed;
  9666. tp->link_config.duplex = cmd->duplex;
  9667. }
  9668. tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
  9669. tg3_warn_mgmt_link_flap(tp);
  9670. if (netif_running(dev))
  9671. tg3_setup_phy(tp, 1);
  9672. tg3_full_unlock(tp);
  9673. return 0;
  9674. }
  9675. static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  9676. {
  9677. struct tg3 *tp = netdev_priv(dev);
  9678. strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
  9679. strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
  9680. strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
  9681. strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
  9682. }
  9683. static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  9684. {
  9685. struct tg3 *tp = netdev_priv(dev);
  9686. if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
  9687. wol->supported = WAKE_MAGIC;
  9688. else
  9689. wol->supported = 0;
  9690. wol->wolopts = 0;
  9691. if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
  9692. wol->wolopts = WAKE_MAGIC;
  9693. memset(&wol->sopass, 0, sizeof(wol->sopass));
  9694. }
  9695. static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  9696. {
  9697. struct tg3 *tp = netdev_priv(dev);
  9698. struct device *dp = &tp->pdev->dev;
  9699. if (wol->wolopts & ~WAKE_MAGIC)
  9700. return -EINVAL;
  9701. if ((wol->wolopts & WAKE_MAGIC) &&
  9702. !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
  9703. return -EINVAL;
  9704. device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
  9705. spin_lock_bh(&tp->lock);
  9706. if (device_may_wakeup(dp))
  9707. tg3_flag_set(tp, WOL_ENABLE);
  9708. else
  9709. tg3_flag_clear(tp, WOL_ENABLE);
  9710. spin_unlock_bh(&tp->lock);
  9711. return 0;
  9712. }
  9713. static u32 tg3_get_msglevel(struct net_device *dev)
  9714. {
  9715. struct tg3 *tp = netdev_priv(dev);
  9716. return tp->msg_enable;
  9717. }
  9718. static void tg3_set_msglevel(struct net_device *dev, u32 value)
  9719. {
  9720. struct tg3 *tp = netdev_priv(dev);
  9721. tp->msg_enable = value;
  9722. }
  9723. static int tg3_nway_reset(struct net_device *dev)
  9724. {
  9725. struct tg3 *tp = netdev_priv(dev);
  9726. int r;
  9727. if (!netif_running(dev))
  9728. return -EAGAIN;
  9729. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
  9730. return -EINVAL;
  9731. tg3_warn_mgmt_link_flap(tp);
  9732. if (tg3_flag(tp, USE_PHYLIB)) {
  9733. if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
  9734. return -EAGAIN;
  9735. r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
  9736. } else {
  9737. u32 bmcr;
  9738. spin_lock_bh(&tp->lock);
  9739. r = -EINVAL;
  9740. tg3_readphy(tp, MII_BMCR, &bmcr);
  9741. if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
  9742. ((bmcr & BMCR_ANENABLE) ||
  9743. (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
  9744. tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
  9745. BMCR_ANENABLE);
  9746. r = 0;
  9747. }
  9748. spin_unlock_bh(&tp->lock);
  9749. }
  9750. return r;
  9751. }
  9752. static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
  9753. {
  9754. struct tg3 *tp = netdev_priv(dev);
  9755. ering->rx_max_pending = tp->rx_std_ring_mask;
  9756. if (tg3_flag(tp, JUMBO_RING_ENABLE))
  9757. ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
  9758. else
  9759. ering->rx_jumbo_max_pending = 0;
  9760. ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
  9761. ering->rx_pending = tp->rx_pending;
  9762. if (tg3_flag(tp, JUMBO_RING_ENABLE))
  9763. ering->rx_jumbo_pending = tp->rx_jumbo_pending;
  9764. else
  9765. ering->rx_jumbo_pending = 0;
  9766. ering->tx_pending = tp->napi[0].tx_pending;
  9767. }
  9768. static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
  9769. {
  9770. struct tg3 *tp = netdev_priv(dev);
  9771. int i, irq_sync = 0, err = 0;
  9772. if ((ering->rx_pending > tp->rx_std_ring_mask) ||
  9773. (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
  9774. (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
  9775. (ering->tx_pending <= MAX_SKB_FRAGS) ||
  9776. (tg3_flag(tp, TSO_BUG) &&
  9777. (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
  9778. return -EINVAL;
  9779. if (netif_running(dev)) {
  9780. tg3_phy_stop(tp);
  9781. tg3_netif_stop(tp);
  9782. irq_sync = 1;
  9783. }
  9784. tg3_full_lock(tp, irq_sync);
  9785. tp->rx_pending = ering->rx_pending;
  9786. if (tg3_flag(tp, MAX_RXPEND_64) &&
  9787. tp->rx_pending > 63)
  9788. tp->rx_pending = 63;
  9789. tp->rx_jumbo_pending = ering->rx_jumbo_pending;
  9790. for (i = 0; i < tp->irq_max; i++)
  9791. tp->napi[i].tx_pending = ering->tx_pending;
  9792. if (netif_running(dev)) {
  9793. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  9794. err = tg3_restart_hw(tp, 0);
  9795. if (!err)
  9796. tg3_netif_start(tp);
  9797. }
  9798. tg3_full_unlock(tp);
  9799. if (irq_sync && !err)
  9800. tg3_phy_start(tp);
  9801. return err;
  9802. }
  9803. static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
  9804. {
  9805. struct tg3 *tp = netdev_priv(dev);
  9806. epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
  9807. if (tp->link_config.flowctrl & FLOW_CTRL_RX)
  9808. epause->rx_pause = 1;
  9809. else
  9810. epause->rx_pause = 0;
  9811. if (tp->link_config.flowctrl & FLOW_CTRL_TX)
  9812. epause->tx_pause = 1;
  9813. else
  9814. epause->tx_pause = 0;
  9815. }
  9816. static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
  9817. {
  9818. struct tg3 *tp = netdev_priv(dev);
  9819. int err = 0;
  9820. if (tp->link_config.autoneg == AUTONEG_ENABLE)
  9821. tg3_warn_mgmt_link_flap(tp);
  9822. if (tg3_flag(tp, USE_PHYLIB)) {
  9823. u32 newadv;
  9824. struct phy_device *phydev;
  9825. phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
  9826. if (!(phydev->supported & SUPPORTED_Pause) ||
  9827. (!(phydev->supported & SUPPORTED_Asym_Pause) &&
  9828. (epause->rx_pause != epause->tx_pause)))
  9829. return -EINVAL;
  9830. tp->link_config.flowctrl = 0;
  9831. if (epause->rx_pause) {
  9832. tp->link_config.flowctrl |= FLOW_CTRL_RX;
  9833. if (epause->tx_pause) {
  9834. tp->link_config.flowctrl |= FLOW_CTRL_TX;
  9835. newadv = ADVERTISED_Pause;
  9836. } else
  9837. newadv = ADVERTISED_Pause |
  9838. ADVERTISED_Asym_Pause;
  9839. } else if (epause->tx_pause) {
  9840. tp->link_config.flowctrl |= FLOW_CTRL_TX;
  9841. newadv = ADVERTISED_Asym_Pause;
  9842. } else
  9843. newadv = 0;
  9844. if (epause->autoneg)
  9845. tg3_flag_set(tp, PAUSE_AUTONEG);
  9846. else
  9847. tg3_flag_clear(tp, PAUSE_AUTONEG);
  9848. if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
  9849. u32 oldadv = phydev->advertising &
  9850. (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
  9851. if (oldadv != newadv) {
  9852. phydev->advertising &=
  9853. ~(ADVERTISED_Pause |
  9854. ADVERTISED_Asym_Pause);
  9855. phydev->advertising |= newadv;
  9856. if (phydev->autoneg) {
  9857. /*
  9858. * Always renegotiate the link to
  9859. * inform our link partner of our
  9860. * flow control settings, even if the
  9861. * flow control is forced. Let
  9862. * tg3_adjust_link() do the final
  9863. * flow control setup.
  9864. */
  9865. return phy_start_aneg(phydev);
  9866. }
  9867. }
  9868. if (!epause->autoneg)
  9869. tg3_setup_flow_control(tp, 0, 0);
  9870. } else {
  9871. tp->link_config.advertising &=
  9872. ~(ADVERTISED_Pause |
  9873. ADVERTISED_Asym_Pause);
  9874. tp->link_config.advertising |= newadv;
  9875. }
  9876. } else {
  9877. int irq_sync = 0;
  9878. if (netif_running(dev)) {
  9879. tg3_netif_stop(tp);
  9880. irq_sync = 1;
  9881. }
  9882. tg3_full_lock(tp, irq_sync);
  9883. if (epause->autoneg)
  9884. tg3_flag_set(tp, PAUSE_AUTONEG);
  9885. else
  9886. tg3_flag_clear(tp, PAUSE_AUTONEG);
  9887. if (epause->rx_pause)
  9888. tp->link_config.flowctrl |= FLOW_CTRL_RX;
  9889. else
  9890. tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
  9891. if (epause->tx_pause)
  9892. tp->link_config.flowctrl |= FLOW_CTRL_TX;
  9893. else
  9894. tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
  9895. if (netif_running(dev)) {
  9896. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  9897. err = tg3_restart_hw(tp, 0);
  9898. if (!err)
  9899. tg3_netif_start(tp);
  9900. }
  9901. tg3_full_unlock(tp);
  9902. }
  9903. tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
  9904. return err;
  9905. }
  9906. static int tg3_get_sset_count(struct net_device *dev, int sset)
  9907. {
  9908. switch (sset) {
  9909. case ETH_SS_TEST:
  9910. return TG3_NUM_TEST;
  9911. case ETH_SS_STATS:
  9912. return TG3_NUM_STATS;
  9913. default:
  9914. return -EOPNOTSUPP;
  9915. }
  9916. }
  9917. static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
  9918. u32 *rules __always_unused)
  9919. {
  9920. struct tg3 *tp = netdev_priv(dev);
  9921. if (!tg3_flag(tp, SUPPORT_MSIX))
  9922. return -EOPNOTSUPP;
  9923. switch (info->cmd) {
  9924. case ETHTOOL_GRXRINGS:
  9925. if (netif_running(tp->dev))
  9926. info->data = tp->rxq_cnt;
  9927. else {
  9928. info->data = num_online_cpus();
  9929. if (info->data > TG3_RSS_MAX_NUM_QS)
  9930. info->data = TG3_RSS_MAX_NUM_QS;
  9931. }
  9932. /* The first interrupt vector only
  9933. * handles link interrupts.
  9934. */
  9935. info->data -= 1;
  9936. return 0;
  9937. default:
  9938. return -EOPNOTSUPP;
  9939. }
  9940. }
  9941. static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
  9942. {
  9943. u32 size = 0;
  9944. struct tg3 *tp = netdev_priv(dev);
  9945. if (tg3_flag(tp, SUPPORT_MSIX))
  9946. size = TG3_RSS_INDIR_TBL_SIZE;
  9947. return size;
  9948. }
  9949. static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
  9950. {
  9951. struct tg3 *tp = netdev_priv(dev);
  9952. int i;
  9953. for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
  9954. indir[i] = tp->rss_ind_tbl[i];
  9955. return 0;
  9956. }
  9957. static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
  9958. {
  9959. struct tg3 *tp = netdev_priv(dev);
  9960. size_t i;
  9961. for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
  9962. tp->rss_ind_tbl[i] = indir[i];
  9963. if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
  9964. return 0;
  9965. /* It is legal to write the indirection
  9966. * table while the device is running.
  9967. */
  9968. tg3_full_lock(tp, 0);
  9969. tg3_rss_write_indir_tbl(tp);
  9970. tg3_full_unlock(tp);
  9971. return 0;
  9972. }
  9973. static void tg3_get_channels(struct net_device *dev,
  9974. struct ethtool_channels *channel)
  9975. {
  9976. struct tg3 *tp = netdev_priv(dev);
  9977. u32 deflt_qs = netif_get_num_default_rss_queues();
  9978. channel->max_rx = tp->rxq_max;
  9979. channel->max_tx = tp->txq_max;
  9980. if (netif_running(dev)) {
  9981. channel->rx_count = tp->rxq_cnt;
  9982. channel->tx_count = tp->txq_cnt;
  9983. } else {
  9984. if (tp->rxq_req)
  9985. channel->rx_count = tp->rxq_req;
  9986. else
  9987. channel->rx_count = min(deflt_qs, tp->rxq_max);
  9988. if (tp->txq_req)
  9989. channel->tx_count = tp->txq_req;
  9990. else
  9991. channel->tx_count = min(deflt_qs, tp->txq_max);
  9992. }
  9993. }
  9994. static int tg3_set_channels(struct net_device *dev,
  9995. struct ethtool_channels *channel)
  9996. {
  9997. struct tg3 *tp = netdev_priv(dev);
  9998. if (!tg3_flag(tp, SUPPORT_MSIX))
  9999. return -EOPNOTSUPP;
  10000. if (channel->rx_count > tp->rxq_max ||
  10001. channel->tx_count > tp->txq_max)
  10002. return -EINVAL;
  10003. tp->rxq_req = channel->rx_count;
  10004. tp->txq_req = channel->tx_count;
  10005. if (!netif_running(dev))
  10006. return 0;
  10007. tg3_stop(tp);
  10008. tg3_carrier_off(tp);
  10009. tg3_start(tp, true, false, false);
  10010. return 0;
  10011. }
  10012. static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
  10013. {
  10014. switch (stringset) {
  10015. case ETH_SS_STATS:
  10016. memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
  10017. break;
  10018. case ETH_SS_TEST:
  10019. memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
  10020. break;
  10021. default:
  10022. WARN_ON(1); /* we need a WARN() */
  10023. break;
  10024. }
  10025. }
  10026. static int tg3_set_phys_id(struct net_device *dev,
  10027. enum ethtool_phys_id_state state)
  10028. {
  10029. struct tg3 *tp = netdev_priv(dev);
  10030. if (!netif_running(tp->dev))
  10031. return -EAGAIN;
  10032. switch (state) {
  10033. case ETHTOOL_ID_ACTIVE:
  10034. return 1; /* cycle on/off once per second */
  10035. case ETHTOOL_ID_ON:
  10036. tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
  10037. LED_CTRL_1000MBPS_ON |
  10038. LED_CTRL_100MBPS_ON |
  10039. LED_CTRL_10MBPS_ON |
  10040. LED_CTRL_TRAFFIC_OVERRIDE |
  10041. LED_CTRL_TRAFFIC_BLINK |
  10042. LED_CTRL_TRAFFIC_LED);
  10043. break;
  10044. case ETHTOOL_ID_OFF:
  10045. tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
  10046. LED_CTRL_TRAFFIC_OVERRIDE);
  10047. break;
  10048. case ETHTOOL_ID_INACTIVE:
  10049. tw32(MAC_LED_CTRL, tp->led_ctrl);
  10050. break;
  10051. }
  10052. return 0;
  10053. }
  10054. static void tg3_get_ethtool_stats(struct net_device *dev,
  10055. struct ethtool_stats *estats, u64 *tmp_stats)
  10056. {
  10057. struct tg3 *tp = netdev_priv(dev);
  10058. if (tp->hw_stats)
  10059. tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
  10060. else
  10061. memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
  10062. }
  10063. static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
  10064. {
  10065. int i;
  10066. __be32 *buf;
  10067. u32 offset = 0, len = 0;
  10068. u32 magic, val;
  10069. if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
  10070. return NULL;
  10071. if (magic == TG3_EEPROM_MAGIC) {
  10072. for (offset = TG3_NVM_DIR_START;
  10073. offset < TG3_NVM_DIR_END;
  10074. offset += TG3_NVM_DIRENT_SIZE) {
  10075. if (tg3_nvram_read(tp, offset, &val))
  10076. return NULL;
  10077. if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
  10078. TG3_NVM_DIRTYPE_EXTVPD)
  10079. break;
  10080. }
  10081. if (offset != TG3_NVM_DIR_END) {
  10082. len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
  10083. if (tg3_nvram_read(tp, offset + 4, &offset))
  10084. return NULL;
  10085. offset = tg3_nvram_logical_addr(tp, offset);
  10086. }
  10087. }
  10088. if (!offset || !len) {
  10089. offset = TG3_NVM_VPD_OFF;
  10090. len = TG3_NVM_VPD_LEN;
  10091. }
  10092. buf = kmalloc(len, GFP_KERNEL);
  10093. if (buf == NULL)
  10094. return NULL;
  10095. if (magic == TG3_EEPROM_MAGIC) {
  10096. for (i = 0; i < len; i += 4) {
  10097. /* The data is in little-endian format in NVRAM.
  10098. * Use the big-endian read routines to preserve
  10099. * the byte order as it exists in NVRAM.
  10100. */
  10101. if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
  10102. goto error;
  10103. }
  10104. } else {
  10105. u8 *ptr;
  10106. ssize_t cnt;
  10107. unsigned int pos = 0;
  10108. ptr = (u8 *)&buf[0];
  10109. for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
  10110. cnt = pci_read_vpd(tp->pdev, pos,
  10111. len - pos, ptr);
  10112. if (cnt == -ETIMEDOUT || cnt == -EINTR)
  10113. cnt = 0;
  10114. else if (cnt < 0)
  10115. goto error;
  10116. }
  10117. if (pos != len)
  10118. goto error;
  10119. }
  10120. *vpdlen = len;
  10121. return buf;
  10122. error:
  10123. kfree(buf);
  10124. return NULL;
  10125. }
  10126. #define NVRAM_TEST_SIZE 0x100
  10127. #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
  10128. #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
  10129. #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
  10130. #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
  10131. #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
  10132. #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
  10133. #define NVRAM_SELFBOOT_HW_SIZE 0x20
  10134. #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
  10135. static int tg3_test_nvram(struct tg3 *tp)
  10136. {
  10137. u32 csum, magic, len;
  10138. __be32 *buf;
  10139. int i, j, k, err = 0, size;
  10140. if (tg3_flag(tp, NO_NVRAM))
  10141. return 0;
  10142. if (tg3_nvram_read(tp, 0, &magic) != 0)
  10143. return -EIO;
  10144. if (magic == TG3_EEPROM_MAGIC)
  10145. size = NVRAM_TEST_SIZE;
  10146. else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
  10147. if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
  10148. TG3_EEPROM_SB_FORMAT_1) {
  10149. switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
  10150. case TG3_EEPROM_SB_REVISION_0:
  10151. size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
  10152. break;
  10153. case TG3_EEPROM_SB_REVISION_2:
  10154. size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
  10155. break;
  10156. case TG3_EEPROM_SB_REVISION_3:
  10157. size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
  10158. break;
  10159. case TG3_EEPROM_SB_REVISION_4:
  10160. size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
  10161. break;
  10162. case TG3_EEPROM_SB_REVISION_5:
  10163. size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
  10164. break;
  10165. case TG3_EEPROM_SB_REVISION_6:
  10166. size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
  10167. break;
  10168. default:
  10169. return -EIO;
  10170. }
  10171. } else
  10172. return 0;
  10173. } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
  10174. size = NVRAM_SELFBOOT_HW_SIZE;
  10175. else
  10176. return -EIO;
  10177. buf = kmalloc(size, GFP_KERNEL);
  10178. if (buf == NULL)
  10179. return -ENOMEM;
  10180. err = -EIO;
  10181. for (i = 0, j = 0; i < size; i += 4, j++) {
  10182. err = tg3_nvram_read_be32(tp, i, &buf[j]);
  10183. if (err)
  10184. break;
  10185. }
  10186. if (i < size)
  10187. goto out;
  10188. /* Selfboot format */
  10189. magic = be32_to_cpu(buf[0]);
  10190. if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
  10191. TG3_EEPROM_MAGIC_FW) {
  10192. u8 *buf8 = (u8 *) buf, csum8 = 0;
  10193. if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
  10194. TG3_EEPROM_SB_REVISION_2) {
  10195. /* For rev 2, the csum doesn't include the MBA. */
  10196. for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
  10197. csum8 += buf8[i];
  10198. for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
  10199. csum8 += buf8[i];
  10200. } else {
  10201. for (i = 0; i < size; i++)
  10202. csum8 += buf8[i];
  10203. }
  10204. if (csum8 == 0) {
  10205. err = 0;
  10206. goto out;
  10207. }
  10208. err = -EIO;
  10209. goto out;
  10210. }
  10211. if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
  10212. TG3_EEPROM_MAGIC_HW) {
  10213. u8 data[NVRAM_SELFBOOT_DATA_SIZE];
  10214. u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
  10215. u8 *buf8 = (u8 *) buf;
  10216. /* Separate the parity bits and the data bytes. */
  10217. for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
  10218. if ((i == 0) || (i == 8)) {
  10219. int l;
  10220. u8 msk;
  10221. for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
  10222. parity[k++] = buf8[i] & msk;
  10223. i++;
  10224. } else if (i == 16) {
  10225. int l;
  10226. u8 msk;
  10227. for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
  10228. parity[k++] = buf8[i] & msk;
  10229. i++;
  10230. for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
  10231. parity[k++] = buf8[i] & msk;
  10232. i++;
  10233. }
  10234. data[j++] = buf8[i];
  10235. }
  10236. err = -EIO;
  10237. for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
  10238. u8 hw8 = hweight8(data[i]);
  10239. if ((hw8 & 0x1) && parity[i])
  10240. goto out;
  10241. else if (!(hw8 & 0x1) && !parity[i])
  10242. goto out;
  10243. }
  10244. err = 0;
  10245. goto out;
  10246. }
  10247. err = -EIO;
  10248. /* Bootstrap checksum at offset 0x10 */
  10249. csum = calc_crc((unsigned char *) buf, 0x10);
  10250. if (csum != le32_to_cpu(buf[0x10/4]))
  10251. goto out;
  10252. /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
  10253. csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
  10254. if (csum != le32_to_cpu(buf[0xfc/4]))
  10255. goto out;
  10256. kfree(buf);
  10257. buf = tg3_vpd_readblock(tp, &len);
  10258. if (!buf)
  10259. return -ENOMEM;
  10260. i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
  10261. if (i > 0) {
  10262. j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
  10263. if (j < 0)
  10264. goto out;
  10265. if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
  10266. goto out;
  10267. i += PCI_VPD_LRDT_TAG_SIZE;
  10268. j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
  10269. PCI_VPD_RO_KEYWORD_CHKSUM);
  10270. if (j > 0) {
  10271. u8 csum8 = 0;
  10272. j += PCI_VPD_INFO_FLD_HDR_SIZE;
  10273. for (i = 0; i <= j; i++)
  10274. csum8 += ((u8 *)buf)[i];
  10275. if (csum8)
  10276. goto out;
  10277. }
  10278. }
  10279. err = 0;
  10280. out:
  10281. kfree(buf);
  10282. return err;
  10283. }
  10284. #define TG3_SERDES_TIMEOUT_SEC 2
  10285. #define TG3_COPPER_TIMEOUT_SEC 6
  10286. static int tg3_test_link(struct tg3 *tp)
  10287. {
  10288. int i, max;
  10289. if (!netif_running(tp->dev))
  10290. return -ENODEV;
  10291. if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
  10292. max = TG3_SERDES_TIMEOUT_SEC;
  10293. else
  10294. max = TG3_COPPER_TIMEOUT_SEC;
  10295. for (i = 0; i < max; i++) {
  10296. if (tp->link_up)
  10297. return 0;
  10298. if (msleep_interruptible(1000))
  10299. break;
  10300. }
  10301. return -EIO;
  10302. }
  10303. /* Only test the commonly used registers */
  10304. static int tg3_test_registers(struct tg3 *tp)
  10305. {
  10306. int i, is_5705, is_5750;
  10307. u32 offset, read_mask, write_mask, val, save_val, read_val;
  10308. static struct {
  10309. u16 offset;
  10310. u16 flags;
  10311. #define TG3_FL_5705 0x1
  10312. #define TG3_FL_NOT_5705 0x2
  10313. #define TG3_FL_NOT_5788 0x4
  10314. #define TG3_FL_NOT_5750 0x8
  10315. u32 read_mask;
  10316. u32 write_mask;
  10317. } reg_tbl[] = {
  10318. /* MAC Control Registers */
  10319. { MAC_MODE, TG3_FL_NOT_5705,
  10320. 0x00000000, 0x00ef6f8c },
  10321. { MAC_MODE, TG3_FL_5705,
  10322. 0x00000000, 0x01ef6b8c },
  10323. { MAC_STATUS, TG3_FL_NOT_5705,
  10324. 0x03800107, 0x00000000 },
  10325. { MAC_STATUS, TG3_FL_5705,
  10326. 0x03800100, 0x00000000 },
  10327. { MAC_ADDR_0_HIGH, 0x0000,
  10328. 0x00000000, 0x0000ffff },
  10329. { MAC_ADDR_0_LOW, 0x0000,
  10330. 0x00000000, 0xffffffff },
  10331. { MAC_RX_MTU_SIZE, 0x0000,
  10332. 0x00000000, 0x0000ffff },
  10333. { MAC_TX_MODE, 0x0000,
  10334. 0x00000000, 0x00000070 },
  10335. { MAC_TX_LENGTHS, 0x0000,
  10336. 0x00000000, 0x00003fff },
  10337. { MAC_RX_MODE, TG3_FL_NOT_5705,
  10338. 0x00000000, 0x000007fc },
  10339. { MAC_RX_MODE, TG3_FL_5705,
  10340. 0x00000000, 0x000007dc },
  10341. { MAC_HASH_REG_0, 0x0000,
  10342. 0x00000000, 0xffffffff },
  10343. { MAC_HASH_REG_1, 0x0000,
  10344. 0x00000000, 0xffffffff },
  10345. { MAC_HASH_REG_2, 0x0000,
  10346. 0x00000000, 0xffffffff },
  10347. { MAC_HASH_REG_3, 0x0000,
  10348. 0x00000000, 0xffffffff },
  10349. /* Receive Data and Receive BD Initiator Control Registers. */
  10350. { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
  10351. 0x00000000, 0xffffffff },
  10352. { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
  10353. 0x00000000, 0xffffffff },
  10354. { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
  10355. 0x00000000, 0x00000003 },
  10356. { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
  10357. 0x00000000, 0xffffffff },
  10358. { RCVDBDI_STD_BD+0, 0x0000,
  10359. 0x00000000, 0xffffffff },
  10360. { RCVDBDI_STD_BD+4, 0x0000,
  10361. 0x00000000, 0xffffffff },
  10362. { RCVDBDI_STD_BD+8, 0x0000,
  10363. 0x00000000, 0xffff0002 },
  10364. { RCVDBDI_STD_BD+0xc, 0x0000,
  10365. 0x00000000, 0xffffffff },
  10366. /* Receive BD Initiator Control Registers. */
  10367. { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
  10368. 0x00000000, 0xffffffff },
  10369. { RCVBDI_STD_THRESH, TG3_FL_5705,
  10370. 0x00000000, 0x000003ff },
  10371. { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
  10372. 0x00000000, 0xffffffff },
  10373. /* Host Coalescing Control Registers. */
  10374. { HOSTCC_MODE, TG3_FL_NOT_5705,
  10375. 0x00000000, 0x00000004 },
  10376. { HOSTCC_MODE, TG3_FL_5705,
  10377. 0x00000000, 0x000000f6 },
  10378. { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
  10379. 0x00000000, 0xffffffff },
  10380. { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
  10381. 0x00000000, 0x000003ff },
  10382. { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
  10383. 0x00000000, 0xffffffff },
  10384. { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
  10385. 0x00000000, 0x000003ff },
  10386. { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
  10387. 0x00000000, 0xffffffff },
  10388. { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
  10389. 0x00000000, 0x000000ff },
  10390. { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
  10391. 0x00000000, 0xffffffff },
  10392. { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
  10393. 0x00000000, 0x000000ff },
  10394. { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
  10395. 0x00000000, 0xffffffff },
  10396. { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
  10397. 0x00000000, 0xffffffff },
  10398. { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
  10399. 0x00000000, 0xffffffff },
  10400. { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
  10401. 0x00000000, 0x000000ff },
  10402. { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
  10403. 0x00000000, 0xffffffff },
  10404. { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
  10405. 0x00000000, 0x000000ff },
  10406. { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
  10407. 0x00000000, 0xffffffff },
  10408. { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
  10409. 0x00000000, 0xffffffff },
  10410. { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
  10411. 0x00000000, 0xffffffff },
  10412. { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
  10413. 0x00000000, 0xffffffff },
  10414. { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
  10415. 0x00000000, 0xffffffff },
  10416. { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
  10417. 0xffffffff, 0x00000000 },
  10418. { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
  10419. 0xffffffff, 0x00000000 },
  10420. /* Buffer Manager Control Registers. */
  10421. { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
  10422. 0x00000000, 0x007fff80 },
  10423. { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
  10424. 0x00000000, 0x007fffff },
  10425. { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
  10426. 0x00000000, 0x0000003f },
  10427. { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
  10428. 0x00000000, 0x000001ff },
  10429. { BUFMGR_MB_HIGH_WATER, 0x0000,
  10430. 0x00000000, 0x000001ff },
  10431. { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
  10432. 0xffffffff, 0x00000000 },
  10433. { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
  10434. 0xffffffff, 0x00000000 },
  10435. /* Mailbox Registers */
  10436. { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
  10437. 0x00000000, 0x000001ff },
  10438. { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
  10439. 0x00000000, 0x000001ff },
  10440. { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
  10441. 0x00000000, 0x000007ff },
  10442. { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
  10443. 0x00000000, 0x000001ff },
  10444. { 0xffff, 0x0000, 0x00000000, 0x00000000 },
  10445. };
  10446. is_5705 = is_5750 = 0;
  10447. if (tg3_flag(tp, 5705_PLUS)) {
  10448. is_5705 = 1;
  10449. if (tg3_flag(tp, 5750_PLUS))
  10450. is_5750 = 1;
  10451. }
  10452. for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
  10453. if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
  10454. continue;
  10455. if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
  10456. continue;
  10457. if (tg3_flag(tp, IS_5788) &&
  10458. (reg_tbl[i].flags & TG3_FL_NOT_5788))
  10459. continue;
  10460. if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
  10461. continue;
  10462. offset = (u32) reg_tbl[i].offset;
  10463. read_mask = reg_tbl[i].read_mask;
  10464. write_mask = reg_tbl[i].write_mask;
  10465. /* Save the original register content */
  10466. save_val = tr32(offset);
  10467. /* Determine the read-only value. */
  10468. read_val = save_val & read_mask;
  10469. /* Write zero to the register, then make sure the read-only bits
  10470. * are not changed and the read/write bits are all zeros.
  10471. */
  10472. tw32(offset, 0);
  10473. val = tr32(offset);
  10474. /* Test the read-only and read/write bits. */
  10475. if (((val & read_mask) != read_val) || (val & write_mask))
  10476. goto out;
  10477. /* Write ones to all the bits defined by RdMask and WrMask, then
  10478. * make sure the read-only bits are not changed and the
  10479. * read/write bits are all ones.
  10480. */
  10481. tw32(offset, read_mask | write_mask);
  10482. val = tr32(offset);
  10483. /* Test the read-only bits. */
  10484. if ((val & read_mask) != read_val)
  10485. goto out;
  10486. /* Test the read/write bits. */
  10487. if ((val & write_mask) != write_mask)
  10488. goto out;
  10489. tw32(offset, save_val);
  10490. }
  10491. return 0;
  10492. out:
  10493. if (netif_msg_hw(tp))
  10494. netdev_err(tp->dev,
  10495. "Register test failed at offset %x\n", offset);
  10496. tw32(offset, save_val);
  10497. return -EIO;
  10498. }
  10499. static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
  10500. {
  10501. static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
  10502. int i;
  10503. u32 j;
  10504. for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
  10505. for (j = 0; j < len; j += 4) {
  10506. u32 val;
  10507. tg3_write_mem(tp, offset + j, test_pattern[i]);
  10508. tg3_read_mem(tp, offset + j, &val);
  10509. if (val != test_pattern[i])
  10510. return -EIO;
  10511. }
  10512. }
  10513. return 0;
  10514. }
  10515. static int tg3_test_memory(struct tg3 *tp)
  10516. {
  10517. static struct mem_entry {
  10518. u32 offset;
  10519. u32 len;
  10520. } mem_tbl_570x[] = {
  10521. { 0x00000000, 0x00b50},
  10522. { 0x00002000, 0x1c000},
  10523. { 0xffffffff, 0x00000}
  10524. }, mem_tbl_5705[] = {
  10525. { 0x00000100, 0x0000c},
  10526. { 0x00000200, 0x00008},
  10527. { 0x00004000, 0x00800},
  10528. { 0x00006000, 0x01000},
  10529. { 0x00008000, 0x02000},
  10530. { 0x00010000, 0x0e000},
  10531. { 0xffffffff, 0x00000}
  10532. }, mem_tbl_5755[] = {
  10533. { 0x00000200, 0x00008},
  10534. { 0x00004000, 0x00800},
  10535. { 0x00006000, 0x00800},
  10536. { 0x00008000, 0x02000},
  10537. { 0x00010000, 0x0c000},
  10538. { 0xffffffff, 0x00000}
  10539. }, mem_tbl_5906[] = {
  10540. { 0x00000200, 0x00008},
  10541. { 0x00004000, 0x00400},
  10542. { 0x00006000, 0x00400},
  10543. { 0x00008000, 0x01000},
  10544. { 0x00010000, 0x01000},
  10545. { 0xffffffff, 0x00000}
  10546. }, mem_tbl_5717[] = {
  10547. { 0x00000200, 0x00008},
  10548. { 0x00010000, 0x0a000},
  10549. { 0x00020000, 0x13c00},
  10550. { 0xffffffff, 0x00000}
  10551. }, mem_tbl_57765[] = {
  10552. { 0x00000200, 0x00008},
  10553. { 0x00004000, 0x00800},
  10554. { 0x00006000, 0x09800},
  10555. { 0x00010000, 0x0a000},
  10556. { 0xffffffff, 0x00000}
  10557. };
  10558. struct mem_entry *mem_tbl;
  10559. int err = 0;
  10560. int i;
  10561. if (tg3_flag(tp, 5717_PLUS))
  10562. mem_tbl = mem_tbl_5717;
  10563. else if (tg3_flag(tp, 57765_CLASS) ||
  10564. tg3_asic_rev(tp) == ASIC_REV_5762)
  10565. mem_tbl = mem_tbl_57765;
  10566. else if (tg3_flag(tp, 5755_PLUS))
  10567. mem_tbl = mem_tbl_5755;
  10568. else if (tg3_asic_rev(tp) == ASIC_REV_5906)
  10569. mem_tbl = mem_tbl_5906;
  10570. else if (tg3_flag(tp, 5705_PLUS))
  10571. mem_tbl = mem_tbl_5705;
  10572. else
  10573. mem_tbl = mem_tbl_570x;
  10574. for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
  10575. err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
  10576. if (err)
  10577. break;
  10578. }
  10579. return err;
  10580. }
  10581. #define TG3_TSO_MSS 500
  10582. #define TG3_TSO_IP_HDR_LEN 20
  10583. #define TG3_TSO_TCP_HDR_LEN 20
  10584. #define TG3_TSO_TCP_OPT_LEN 12
  10585. static const u8 tg3_tso_header[] = {
  10586. 0x08, 0x00,
  10587. 0x45, 0x00, 0x00, 0x00,
  10588. 0x00, 0x00, 0x40, 0x00,
  10589. 0x40, 0x06, 0x00, 0x00,
  10590. 0x0a, 0x00, 0x00, 0x01,
  10591. 0x0a, 0x00, 0x00, 0x02,
  10592. 0x0d, 0x00, 0xe0, 0x00,
  10593. 0x00, 0x00, 0x01, 0x00,
  10594. 0x00, 0x00, 0x02, 0x00,
  10595. 0x80, 0x10, 0x10, 0x00,
  10596. 0x14, 0x09, 0x00, 0x00,
  10597. 0x01, 0x01, 0x08, 0x0a,
  10598. 0x11, 0x11, 0x11, 0x11,
  10599. 0x11, 0x11, 0x11, 0x11,
  10600. };
  10601. static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
  10602. {
  10603. u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
  10604. u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
  10605. u32 budget;
  10606. struct sk_buff *skb;
  10607. u8 *tx_data, *rx_data;
  10608. dma_addr_t map;
  10609. int num_pkts, tx_len, rx_len, i, err;
  10610. struct tg3_rx_buffer_desc *desc;
  10611. struct tg3_napi *tnapi, *rnapi;
  10612. struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
  10613. tnapi = &tp->napi[0];
  10614. rnapi = &tp->napi[0];
  10615. if (tp->irq_cnt > 1) {
  10616. if (tg3_flag(tp, ENABLE_RSS))
  10617. rnapi = &tp->napi[1];
  10618. if (tg3_flag(tp, ENABLE_TSS))
  10619. tnapi = &tp->napi[1];
  10620. }
  10621. coal_now = tnapi->coal_now | rnapi->coal_now;
  10622. err = -EIO;
  10623. tx_len = pktsz;
  10624. skb = netdev_alloc_skb(tp->dev, tx_len);
  10625. if (!skb)
  10626. return -ENOMEM;
  10627. tx_data = skb_put(skb, tx_len);
  10628. memcpy(tx_data, tp->dev->dev_addr, 6);
  10629. memset(tx_data + 6, 0x0, 8);
  10630. tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
  10631. if (tso_loopback) {
  10632. struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
  10633. u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
  10634. TG3_TSO_TCP_OPT_LEN;
  10635. memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
  10636. sizeof(tg3_tso_header));
  10637. mss = TG3_TSO_MSS;
  10638. val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
  10639. num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
  10640. /* Set the total length field in the IP header */
  10641. iph->tot_len = htons((u16)(mss + hdr_len));
  10642. base_flags = (TXD_FLAG_CPU_PRE_DMA |
  10643. TXD_FLAG_CPU_POST_DMA);
  10644. if (tg3_flag(tp, HW_TSO_1) ||
  10645. tg3_flag(tp, HW_TSO_2) ||
  10646. tg3_flag(tp, HW_TSO_3)) {
  10647. struct tcphdr *th;
  10648. val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
  10649. th = (struct tcphdr *)&tx_data[val];
  10650. th->check = 0;
  10651. } else
  10652. base_flags |= TXD_FLAG_TCPUDP_CSUM;
  10653. if (tg3_flag(tp, HW_TSO_3)) {
  10654. mss |= (hdr_len & 0xc) << 12;
  10655. if (hdr_len & 0x10)
  10656. base_flags |= 0x00000010;
  10657. base_flags |= (hdr_len & 0x3e0) << 5;
  10658. } else if (tg3_flag(tp, HW_TSO_2))
  10659. mss |= hdr_len << 9;
  10660. else if (tg3_flag(tp, HW_TSO_1) ||
  10661. tg3_asic_rev(tp) == ASIC_REV_5705) {
  10662. mss |= (TG3_TSO_TCP_OPT_LEN << 9);
  10663. } else {
  10664. base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
  10665. }
  10666. data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
  10667. } else {
  10668. num_pkts = 1;
  10669. data_off = ETH_HLEN;
  10670. if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
  10671. tx_len > VLAN_ETH_FRAME_LEN)
  10672. base_flags |= TXD_FLAG_JMB_PKT;
  10673. }
  10674. for (i = data_off; i < tx_len; i++)
  10675. tx_data[i] = (u8) (i & 0xff);
  10676. map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
  10677. if (pci_dma_mapping_error(tp->pdev, map)) {
  10678. dev_kfree_skb(skb);
  10679. return -EIO;
  10680. }
  10681. val = tnapi->tx_prod;
  10682. tnapi->tx_buffers[val].skb = skb;
  10683. dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
  10684. tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
  10685. rnapi->coal_now);
  10686. udelay(10);
  10687. rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
  10688. budget = tg3_tx_avail(tnapi);
  10689. if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
  10690. base_flags | TXD_FLAG_END, mss, 0)) {
  10691. tnapi->tx_buffers[val].skb = NULL;
  10692. dev_kfree_skb(skb);
  10693. return -EIO;
  10694. }
  10695. tnapi->tx_prod++;
  10696. /* Sync BD data before updating mailbox */
  10697. wmb();
  10698. tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
  10699. tr32_mailbox(tnapi->prodmbox);
  10700. udelay(10);
  10701. /* 350 usec to allow enough time on some 10/100 Mbps devices. */
  10702. for (i = 0; i < 35; i++) {
  10703. tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
  10704. coal_now);
  10705. udelay(10);
  10706. tx_idx = tnapi->hw_status->idx[0].tx_consumer;
  10707. rx_idx = rnapi->hw_status->idx[0].rx_producer;
  10708. if ((tx_idx == tnapi->tx_prod) &&
  10709. (rx_idx == (rx_start_idx + num_pkts)))
  10710. break;
  10711. }
  10712. tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
  10713. dev_kfree_skb(skb);
  10714. if (tx_idx != tnapi->tx_prod)
  10715. goto out;
  10716. if (rx_idx != rx_start_idx + num_pkts)
  10717. goto out;
  10718. val = data_off;
  10719. while (rx_idx != rx_start_idx) {
  10720. desc = &rnapi->rx_rcb[rx_start_idx++];
  10721. desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
  10722. opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
  10723. if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
  10724. (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
  10725. goto out;
  10726. rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
  10727. - ETH_FCS_LEN;
  10728. if (!tso_loopback) {
  10729. if (rx_len != tx_len)
  10730. goto out;
  10731. if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
  10732. if (opaque_key != RXD_OPAQUE_RING_STD)
  10733. goto out;
  10734. } else {
  10735. if (opaque_key != RXD_OPAQUE_RING_JUMBO)
  10736. goto out;
  10737. }
  10738. } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
  10739. (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
  10740. >> RXD_TCPCSUM_SHIFT != 0xffff) {
  10741. goto out;
  10742. }
  10743. if (opaque_key == RXD_OPAQUE_RING_STD) {
  10744. rx_data = tpr->rx_std_buffers[desc_idx].data;
  10745. map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
  10746. mapping);
  10747. } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
  10748. rx_data = tpr->rx_jmb_buffers[desc_idx].data;
  10749. map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
  10750. mapping);
  10751. } else
  10752. goto out;
  10753. pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
  10754. PCI_DMA_FROMDEVICE);
  10755. rx_data += TG3_RX_OFFSET(tp);
  10756. for (i = data_off; i < rx_len; i++, val++) {
  10757. if (*(rx_data + i) != (u8) (val & 0xff))
  10758. goto out;
  10759. }
  10760. }
  10761. err = 0;
  10762. /* tg3_free_rings will unmap and free the rx_data */
  10763. out:
  10764. return err;
  10765. }
  10766. #define TG3_STD_LOOPBACK_FAILED 1
  10767. #define TG3_JMB_LOOPBACK_FAILED 2
  10768. #define TG3_TSO_LOOPBACK_FAILED 4
  10769. #define TG3_LOOPBACK_FAILED \
  10770. (TG3_STD_LOOPBACK_FAILED | \
  10771. TG3_JMB_LOOPBACK_FAILED | \
  10772. TG3_TSO_LOOPBACK_FAILED)
  10773. static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
  10774. {
  10775. int err = -EIO;
  10776. u32 eee_cap;
  10777. u32 jmb_pkt_sz = 9000;
  10778. if (tp->dma_limit)
  10779. jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
  10780. eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
  10781. tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
  10782. if (!netif_running(tp->dev)) {
  10783. data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
  10784. data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
  10785. if (do_extlpbk)
  10786. data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
  10787. goto done;
  10788. }
  10789. err = tg3_reset_hw(tp, 1);
  10790. if (err) {
  10791. data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
  10792. data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
  10793. if (do_extlpbk)
  10794. data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
  10795. goto done;
  10796. }
  10797. if (tg3_flag(tp, ENABLE_RSS)) {
  10798. int i;
  10799. /* Reroute all rx packets to the 1st queue */
  10800. for (i = MAC_RSS_INDIR_TBL_0;
  10801. i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
  10802. tw32(i, 0x0);
  10803. }
  10804. /* HW errata - mac loopback fails in some cases on 5780.
  10805. * Normal traffic and PHY loopback are not affected by
  10806. * errata. Also, the MAC loopback test is deprecated for
  10807. * all newer ASIC revisions.
  10808. */
  10809. if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
  10810. !tg3_flag(tp, CPMU_PRESENT)) {
  10811. tg3_mac_loopback(tp, true);
  10812. if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
  10813. data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
  10814. if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
  10815. tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
  10816. data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
  10817. tg3_mac_loopback(tp, false);
  10818. }
  10819. if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
  10820. !tg3_flag(tp, USE_PHYLIB)) {
  10821. int i;
  10822. tg3_phy_lpbk_set(tp, 0, false);
  10823. /* Wait for link */
  10824. for (i = 0; i < 100; i++) {
  10825. if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
  10826. break;
  10827. mdelay(1);
  10828. }
  10829. if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
  10830. data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
  10831. if (tg3_flag(tp, TSO_CAPABLE) &&
  10832. tg3_run_loopback(tp, ETH_FRAME_LEN, true))
  10833. data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
  10834. if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
  10835. tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
  10836. data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
  10837. if (do_extlpbk) {
  10838. tg3_phy_lpbk_set(tp, 0, true);
  10839. /* All link indications report up, but the hardware
  10840. * isn't really ready for about 20 msec. Double it
  10841. * to be sure.
  10842. */
  10843. mdelay(40);
  10844. if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
  10845. data[TG3_EXT_LOOPB_TEST] |=
  10846. TG3_STD_LOOPBACK_FAILED;
  10847. if (tg3_flag(tp, TSO_CAPABLE) &&
  10848. tg3_run_loopback(tp, ETH_FRAME_LEN, true))
  10849. data[TG3_EXT_LOOPB_TEST] |=
  10850. TG3_TSO_LOOPBACK_FAILED;
  10851. if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
  10852. tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
  10853. data[TG3_EXT_LOOPB_TEST] |=
  10854. TG3_JMB_LOOPBACK_FAILED;
  10855. }
  10856. /* Re-enable gphy autopowerdown. */
  10857. if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
  10858. tg3_phy_toggle_apd(tp, true);
  10859. }
  10860. err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
  10861. data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
  10862. done:
  10863. tp->phy_flags |= eee_cap;
  10864. return err;
  10865. }
  10866. static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
  10867. u64 *data)
  10868. {
  10869. struct tg3 *tp = netdev_priv(dev);
  10870. bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
  10871. if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
  10872. tg3_power_up(tp)) {
  10873. etest->flags |= ETH_TEST_FL_FAILED;
  10874. memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
  10875. return;
  10876. }
  10877. memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
  10878. if (tg3_test_nvram(tp) != 0) {
  10879. etest->flags |= ETH_TEST_FL_FAILED;
  10880. data[TG3_NVRAM_TEST] = 1;
  10881. }
  10882. if (!doextlpbk && tg3_test_link(tp)) {
  10883. etest->flags |= ETH_TEST_FL_FAILED;
  10884. data[TG3_LINK_TEST] = 1;
  10885. }
  10886. if (etest->flags & ETH_TEST_FL_OFFLINE) {
  10887. int err, err2 = 0, irq_sync = 0;
  10888. if (netif_running(dev)) {
  10889. tg3_phy_stop(tp);
  10890. tg3_netif_stop(tp);
  10891. irq_sync = 1;
  10892. }
  10893. tg3_full_lock(tp, irq_sync);
  10894. tg3_halt(tp, RESET_KIND_SUSPEND, 1);
  10895. err = tg3_nvram_lock(tp);
  10896. tg3_halt_cpu(tp, RX_CPU_BASE);
  10897. if (!tg3_flag(tp, 5705_PLUS))
  10898. tg3_halt_cpu(tp, TX_CPU_BASE);
  10899. if (!err)
  10900. tg3_nvram_unlock(tp);
  10901. if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
  10902. tg3_phy_reset(tp);
  10903. if (tg3_test_registers(tp) != 0) {
  10904. etest->flags |= ETH_TEST_FL_FAILED;
  10905. data[TG3_REGISTER_TEST] = 1;
  10906. }
  10907. if (tg3_test_memory(tp) != 0) {
  10908. etest->flags |= ETH_TEST_FL_FAILED;
  10909. data[TG3_MEMORY_TEST] = 1;
  10910. }
  10911. if (doextlpbk)
  10912. etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
  10913. if (tg3_test_loopback(tp, data, doextlpbk))
  10914. etest->flags |= ETH_TEST_FL_FAILED;
  10915. tg3_full_unlock(tp);
  10916. if (tg3_test_interrupt(tp) != 0) {
  10917. etest->flags |= ETH_TEST_FL_FAILED;
  10918. data[TG3_INTERRUPT_TEST] = 1;
  10919. }
  10920. tg3_full_lock(tp, 0);
  10921. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  10922. if (netif_running(dev)) {
  10923. tg3_flag_set(tp, INIT_COMPLETE);
  10924. err2 = tg3_restart_hw(tp, 1);
  10925. if (!err2)
  10926. tg3_netif_start(tp);
  10927. }
  10928. tg3_full_unlock(tp);
  10929. if (irq_sync && !err2)
  10930. tg3_phy_start(tp);
  10931. }
  10932. if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
  10933. tg3_power_down(tp);
  10934. }
  10935. static int tg3_hwtstamp_ioctl(struct net_device *dev,
  10936. struct ifreq *ifr, int cmd)
  10937. {
  10938. struct tg3 *tp = netdev_priv(dev);
  10939. struct hwtstamp_config stmpconf;
  10940. if (!tg3_flag(tp, PTP_CAPABLE))
  10941. return -EINVAL;
  10942. if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
  10943. return -EFAULT;
  10944. if (stmpconf.flags)
  10945. return -EINVAL;
  10946. switch (stmpconf.tx_type) {
  10947. case HWTSTAMP_TX_ON:
  10948. tg3_flag_set(tp, TX_TSTAMP_EN);
  10949. break;
  10950. case HWTSTAMP_TX_OFF:
  10951. tg3_flag_clear(tp, TX_TSTAMP_EN);
  10952. break;
  10953. default:
  10954. return -ERANGE;
  10955. }
  10956. switch (stmpconf.rx_filter) {
  10957. case HWTSTAMP_FILTER_NONE:
  10958. tp->rxptpctl = 0;
  10959. break;
  10960. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  10961. tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
  10962. TG3_RX_PTP_CTL_ALL_V1_EVENTS;
  10963. break;
  10964. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  10965. tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
  10966. TG3_RX_PTP_CTL_SYNC_EVNT;
  10967. break;
  10968. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  10969. tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
  10970. TG3_RX_PTP_CTL_DELAY_REQ;
  10971. break;
  10972. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  10973. tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
  10974. TG3_RX_PTP_CTL_ALL_V2_EVENTS;
  10975. break;
  10976. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  10977. tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
  10978. TG3_RX_PTP_CTL_ALL_V2_EVENTS;
  10979. break;
  10980. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  10981. tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
  10982. TG3_RX_PTP_CTL_ALL_V2_EVENTS;
  10983. break;
  10984. case HWTSTAMP_FILTER_PTP_V2_SYNC:
  10985. tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
  10986. TG3_RX_PTP_CTL_SYNC_EVNT;
  10987. break;
  10988. case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
  10989. tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
  10990. TG3_RX_PTP_CTL_SYNC_EVNT;
  10991. break;
  10992. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  10993. tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
  10994. TG3_RX_PTP_CTL_SYNC_EVNT;
  10995. break;
  10996. case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
  10997. tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
  10998. TG3_RX_PTP_CTL_DELAY_REQ;
  10999. break;
  11000. case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
  11001. tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
  11002. TG3_RX_PTP_CTL_DELAY_REQ;
  11003. break;
  11004. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  11005. tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
  11006. TG3_RX_PTP_CTL_DELAY_REQ;
  11007. break;
  11008. default:
  11009. return -ERANGE;
  11010. }
  11011. if (netif_running(dev) && tp->rxptpctl)
  11012. tw32(TG3_RX_PTP_CTL,
  11013. tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
  11014. return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
  11015. -EFAULT : 0;
  11016. }
  11017. static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  11018. {
  11019. struct mii_ioctl_data *data = if_mii(ifr);
  11020. struct tg3 *tp = netdev_priv(dev);
  11021. int err;
  11022. if (tg3_flag(tp, USE_PHYLIB)) {
  11023. struct phy_device *phydev;
  11024. if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
  11025. return -EAGAIN;
  11026. phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
  11027. return phy_mii_ioctl(phydev, ifr, cmd);
  11028. }
  11029. switch (cmd) {
  11030. case SIOCGMIIPHY:
  11031. data->phy_id = tp->phy_addr;
  11032. /* fallthru */
  11033. case SIOCGMIIREG: {
  11034. u32 mii_regval;
  11035. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
  11036. break; /* We have no PHY */
  11037. if (!netif_running(dev))
  11038. return -EAGAIN;
  11039. spin_lock_bh(&tp->lock);
  11040. err = __tg3_readphy(tp, data->phy_id & 0x1f,
  11041. data->reg_num & 0x1f, &mii_regval);
  11042. spin_unlock_bh(&tp->lock);
  11043. data->val_out = mii_regval;
  11044. return err;
  11045. }
  11046. case SIOCSMIIREG:
  11047. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
  11048. break; /* We have no PHY */
  11049. if (!netif_running(dev))
  11050. return -EAGAIN;
  11051. spin_lock_bh(&tp->lock);
  11052. err = __tg3_writephy(tp, data->phy_id & 0x1f,
  11053. data->reg_num & 0x1f, data->val_in);
  11054. spin_unlock_bh(&tp->lock);
  11055. return err;
  11056. case SIOCSHWTSTAMP:
  11057. return tg3_hwtstamp_ioctl(dev, ifr, cmd);
  11058. default:
  11059. /* do nothing */
  11060. break;
  11061. }
  11062. return -EOPNOTSUPP;
  11063. }
  11064. static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
  11065. {
  11066. struct tg3 *tp = netdev_priv(dev);
  11067. memcpy(ec, &tp->coal, sizeof(*ec));
  11068. return 0;
  11069. }
  11070. static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
  11071. {
  11072. struct tg3 *tp = netdev_priv(dev);
  11073. u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
  11074. u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
  11075. if (!tg3_flag(tp, 5705_PLUS)) {
  11076. max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
  11077. max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
  11078. max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
  11079. min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
  11080. }
  11081. if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
  11082. (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
  11083. (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
  11084. (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
  11085. (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
  11086. (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
  11087. (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
  11088. (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
  11089. (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
  11090. (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
  11091. return -EINVAL;
  11092. /* No rx interrupts will be generated if both are zero */
  11093. if ((ec->rx_coalesce_usecs == 0) &&
  11094. (ec->rx_max_coalesced_frames == 0))
  11095. return -EINVAL;
  11096. /* No tx interrupts will be generated if both are zero */
  11097. if ((ec->tx_coalesce_usecs == 0) &&
  11098. (ec->tx_max_coalesced_frames == 0))
  11099. return -EINVAL;
  11100. /* Only copy relevant parameters, ignore all others. */
  11101. tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
  11102. tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
  11103. tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
  11104. tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
  11105. tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
  11106. tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
  11107. tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
  11108. tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
  11109. tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
  11110. if (netif_running(dev)) {
  11111. tg3_full_lock(tp, 0);
  11112. __tg3_set_coalesce(tp, &tp->coal);
  11113. tg3_full_unlock(tp);
  11114. }
  11115. return 0;
  11116. }
  11117. static const struct ethtool_ops tg3_ethtool_ops = {
  11118. .get_settings = tg3_get_settings,
  11119. .set_settings = tg3_set_settings,
  11120. .get_drvinfo = tg3_get_drvinfo,
  11121. .get_regs_len = tg3_get_regs_len,
  11122. .get_regs = tg3_get_regs,
  11123. .get_wol = tg3_get_wol,
  11124. .set_wol = tg3_set_wol,
  11125. .get_msglevel = tg3_get_msglevel,
  11126. .set_msglevel = tg3_set_msglevel,
  11127. .nway_reset = tg3_nway_reset,
  11128. .get_link = ethtool_op_get_link,
  11129. .get_eeprom_len = tg3_get_eeprom_len,
  11130. .get_eeprom = tg3_get_eeprom,
  11131. .set_eeprom = tg3_set_eeprom,
  11132. .get_ringparam = tg3_get_ringparam,
  11133. .set_ringparam = tg3_set_ringparam,
  11134. .get_pauseparam = tg3_get_pauseparam,
  11135. .set_pauseparam = tg3_set_pauseparam,
  11136. .self_test = tg3_self_test,
  11137. .get_strings = tg3_get_strings,
  11138. .set_phys_id = tg3_set_phys_id,
  11139. .get_ethtool_stats = tg3_get_ethtool_stats,
  11140. .get_coalesce = tg3_get_coalesce,
  11141. .set_coalesce = tg3_set_coalesce,
  11142. .get_sset_count = tg3_get_sset_count,
  11143. .get_rxnfc = tg3_get_rxnfc,
  11144. .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
  11145. .get_rxfh_indir = tg3_get_rxfh_indir,
  11146. .set_rxfh_indir = tg3_set_rxfh_indir,
  11147. .get_channels = tg3_get_channels,
  11148. .set_channels = tg3_set_channels,
  11149. .get_ts_info = tg3_get_ts_info,
  11150. };
  11151. static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
  11152. struct rtnl_link_stats64 *stats)
  11153. {
  11154. struct tg3 *tp = netdev_priv(dev);
  11155. spin_lock_bh(&tp->lock);
  11156. if (!tp->hw_stats) {
  11157. spin_unlock_bh(&tp->lock);
  11158. return &tp->net_stats_prev;
  11159. }
  11160. tg3_get_nstats(tp, stats);
  11161. spin_unlock_bh(&tp->lock);
  11162. return stats;
  11163. }
  11164. static void tg3_set_rx_mode(struct net_device *dev)
  11165. {
  11166. struct tg3 *tp = netdev_priv(dev);
  11167. if (!netif_running(dev))
  11168. return;
  11169. tg3_full_lock(tp, 0);
  11170. __tg3_set_rx_mode(dev);
  11171. tg3_full_unlock(tp);
  11172. }
  11173. static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
  11174. int new_mtu)
  11175. {
  11176. dev->mtu = new_mtu;
  11177. if (new_mtu > ETH_DATA_LEN) {
  11178. if (tg3_flag(tp, 5780_CLASS)) {
  11179. netdev_update_features(dev);
  11180. tg3_flag_clear(tp, TSO_CAPABLE);
  11181. } else {
  11182. tg3_flag_set(tp, JUMBO_RING_ENABLE);
  11183. }
  11184. } else {
  11185. if (tg3_flag(tp, 5780_CLASS)) {
  11186. tg3_flag_set(tp, TSO_CAPABLE);
  11187. netdev_update_features(dev);
  11188. }
  11189. tg3_flag_clear(tp, JUMBO_RING_ENABLE);
  11190. }
  11191. }
  11192. static int tg3_change_mtu(struct net_device *dev, int new_mtu)
  11193. {
  11194. struct tg3 *tp = netdev_priv(dev);
  11195. int err, reset_phy = 0;
  11196. if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
  11197. return -EINVAL;
  11198. if (!netif_running(dev)) {
  11199. /* We'll just catch it later when the
  11200. * device is up'd.
  11201. */
  11202. tg3_set_mtu(dev, tp, new_mtu);
  11203. return 0;
  11204. }
  11205. tg3_phy_stop(tp);
  11206. tg3_netif_stop(tp);
  11207. tg3_full_lock(tp, 1);
  11208. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  11209. tg3_set_mtu(dev, tp, new_mtu);
  11210. /* Reset PHY, otherwise the read DMA engine will be in a mode that
  11211. * breaks all requests to 256 bytes.
  11212. */
  11213. if (tg3_asic_rev(tp) == ASIC_REV_57766)
  11214. reset_phy = 1;
  11215. err = tg3_restart_hw(tp, reset_phy);
  11216. if (!err)
  11217. tg3_netif_start(tp);
  11218. tg3_full_unlock(tp);
  11219. if (!err)
  11220. tg3_phy_start(tp);
  11221. return err;
  11222. }
  11223. static const struct net_device_ops tg3_netdev_ops = {
  11224. .ndo_open = tg3_open,
  11225. .ndo_stop = tg3_close,
  11226. .ndo_start_xmit = tg3_start_xmit,
  11227. .ndo_get_stats64 = tg3_get_stats64,
  11228. .ndo_validate_addr = eth_validate_addr,
  11229. .ndo_set_rx_mode = tg3_set_rx_mode,
  11230. .ndo_set_mac_address = tg3_set_mac_addr,
  11231. .ndo_do_ioctl = tg3_ioctl,
  11232. .ndo_tx_timeout = tg3_tx_timeout,
  11233. .ndo_change_mtu = tg3_change_mtu,
  11234. .ndo_fix_features = tg3_fix_features,
  11235. .ndo_set_features = tg3_set_features,
  11236. #ifdef CONFIG_NET_POLL_CONTROLLER
  11237. .ndo_poll_controller = tg3_poll_controller,
  11238. #endif
  11239. };
  11240. static void tg3_get_eeprom_size(struct tg3 *tp)
  11241. {
  11242. u32 cursize, val, magic;
  11243. tp->nvram_size = EEPROM_CHIP_SIZE;
  11244. if (tg3_nvram_read(tp, 0, &magic) != 0)
  11245. return;
  11246. if ((magic != TG3_EEPROM_MAGIC) &&
  11247. ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
  11248. ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
  11249. return;
  11250. /*
  11251. * Size the chip by reading offsets at increasing powers of two.
  11252. * When we encounter our validation signature, we know the addressing
  11253. * has wrapped around, and thus have our chip size.
  11254. */
  11255. cursize = 0x10;
  11256. while (cursize < tp->nvram_size) {
  11257. if (tg3_nvram_read(tp, cursize, &val) != 0)
  11258. return;
  11259. if (val == magic)
  11260. break;
  11261. cursize <<= 1;
  11262. }
  11263. tp->nvram_size = cursize;
  11264. }
  11265. static void tg3_get_nvram_size(struct tg3 *tp)
  11266. {
  11267. u32 val;
  11268. if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
  11269. return;
  11270. /* Selfboot format */
  11271. if (val != TG3_EEPROM_MAGIC) {
  11272. tg3_get_eeprom_size(tp);
  11273. return;
  11274. }
  11275. if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
  11276. if (val != 0) {
  11277. /* This is confusing. We want to operate on the
  11278. * 16-bit value at offset 0xf2. The tg3_nvram_read()
  11279. * call will read from NVRAM and byteswap the data
  11280. * according to the byteswapping settings for all
  11281. * other register accesses. This ensures the data we
  11282. * want will always reside in the lower 16-bits.
  11283. * However, the data in NVRAM is in LE format, which
  11284. * means the data from the NVRAM read will always be
  11285. * opposite the endianness of the CPU. The 16-bit
  11286. * byteswap then brings the data to CPU endianness.
  11287. */
  11288. tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
  11289. return;
  11290. }
  11291. }
  11292. tp->nvram_size = TG3_NVRAM_SIZE_512KB;
  11293. }
  11294. static void tg3_get_nvram_info(struct tg3 *tp)
  11295. {
  11296. u32 nvcfg1;
  11297. nvcfg1 = tr32(NVRAM_CFG1);
  11298. if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
  11299. tg3_flag_set(tp, FLASH);
  11300. } else {
  11301. nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
  11302. tw32(NVRAM_CFG1, nvcfg1);
  11303. }
  11304. if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
  11305. tg3_flag(tp, 5780_CLASS)) {
  11306. switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
  11307. case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
  11308. tp->nvram_jedecnum = JEDEC_ATMEL;
  11309. tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
  11310. tg3_flag_set(tp, NVRAM_BUFFERED);
  11311. break;
  11312. case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
  11313. tp->nvram_jedecnum = JEDEC_ATMEL;
  11314. tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
  11315. break;
  11316. case FLASH_VENDOR_ATMEL_EEPROM:
  11317. tp->nvram_jedecnum = JEDEC_ATMEL;
  11318. tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  11319. tg3_flag_set(tp, NVRAM_BUFFERED);
  11320. break;
  11321. case FLASH_VENDOR_ST:
  11322. tp->nvram_jedecnum = JEDEC_ST;
  11323. tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
  11324. tg3_flag_set(tp, NVRAM_BUFFERED);
  11325. break;
  11326. case FLASH_VENDOR_SAIFUN:
  11327. tp->nvram_jedecnum = JEDEC_SAIFUN;
  11328. tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
  11329. break;
  11330. case FLASH_VENDOR_SST_SMALL:
  11331. case FLASH_VENDOR_SST_LARGE:
  11332. tp->nvram_jedecnum = JEDEC_SST;
  11333. tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
  11334. break;
  11335. }
  11336. } else {
  11337. tp->nvram_jedecnum = JEDEC_ATMEL;
  11338. tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
  11339. tg3_flag_set(tp, NVRAM_BUFFERED);
  11340. }
  11341. }
  11342. static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
  11343. {
  11344. switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
  11345. case FLASH_5752PAGE_SIZE_256:
  11346. tp->nvram_pagesize = 256;
  11347. break;
  11348. case FLASH_5752PAGE_SIZE_512:
  11349. tp->nvram_pagesize = 512;
  11350. break;
  11351. case FLASH_5752PAGE_SIZE_1K:
  11352. tp->nvram_pagesize = 1024;
  11353. break;
  11354. case FLASH_5752PAGE_SIZE_2K:
  11355. tp->nvram_pagesize = 2048;
  11356. break;
  11357. case FLASH_5752PAGE_SIZE_4K:
  11358. tp->nvram_pagesize = 4096;
  11359. break;
  11360. case FLASH_5752PAGE_SIZE_264:
  11361. tp->nvram_pagesize = 264;
  11362. break;
  11363. case FLASH_5752PAGE_SIZE_528:
  11364. tp->nvram_pagesize = 528;
  11365. break;
  11366. }
  11367. }
  11368. static void tg3_get_5752_nvram_info(struct tg3 *tp)
  11369. {
  11370. u32 nvcfg1;
  11371. nvcfg1 = tr32(NVRAM_CFG1);
  11372. /* NVRAM protection for TPM */
  11373. if (nvcfg1 & (1 << 27))
  11374. tg3_flag_set(tp, PROTECTED_NVRAM);
  11375. switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
  11376. case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
  11377. case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
  11378. tp->nvram_jedecnum = JEDEC_ATMEL;
  11379. tg3_flag_set(tp, NVRAM_BUFFERED);
  11380. break;
  11381. case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
  11382. tp->nvram_jedecnum = JEDEC_ATMEL;
  11383. tg3_flag_set(tp, NVRAM_BUFFERED);
  11384. tg3_flag_set(tp, FLASH);
  11385. break;
  11386. case FLASH_5752VENDOR_ST_M45PE10:
  11387. case FLASH_5752VENDOR_ST_M45PE20:
  11388. case FLASH_5752VENDOR_ST_M45PE40:
  11389. tp->nvram_jedecnum = JEDEC_ST;
  11390. tg3_flag_set(tp, NVRAM_BUFFERED);
  11391. tg3_flag_set(tp, FLASH);
  11392. break;
  11393. }
  11394. if (tg3_flag(tp, FLASH)) {
  11395. tg3_nvram_get_pagesize(tp, nvcfg1);
  11396. } else {
  11397. /* For eeprom, set pagesize to maximum eeprom size */
  11398. tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  11399. nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
  11400. tw32(NVRAM_CFG1, nvcfg1);
  11401. }
  11402. }
  11403. static void tg3_get_5755_nvram_info(struct tg3 *tp)
  11404. {
  11405. u32 nvcfg1, protect = 0;
  11406. nvcfg1 = tr32(NVRAM_CFG1);
  11407. /* NVRAM protection for TPM */
  11408. if (nvcfg1 & (1 << 27)) {
  11409. tg3_flag_set(tp, PROTECTED_NVRAM);
  11410. protect = 1;
  11411. }
  11412. nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
  11413. switch (nvcfg1) {
  11414. case FLASH_5755VENDOR_ATMEL_FLASH_1:
  11415. case FLASH_5755VENDOR_ATMEL_FLASH_2:
  11416. case FLASH_5755VENDOR_ATMEL_FLASH_3:
  11417. case FLASH_5755VENDOR_ATMEL_FLASH_5:
  11418. tp->nvram_jedecnum = JEDEC_ATMEL;
  11419. tg3_flag_set(tp, NVRAM_BUFFERED);
  11420. tg3_flag_set(tp, FLASH);
  11421. tp->nvram_pagesize = 264;
  11422. if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
  11423. nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
  11424. tp->nvram_size = (protect ? 0x3e200 :
  11425. TG3_NVRAM_SIZE_512KB);
  11426. else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
  11427. tp->nvram_size = (protect ? 0x1f200 :
  11428. TG3_NVRAM_SIZE_256KB);
  11429. else
  11430. tp->nvram_size = (protect ? 0x1f200 :
  11431. TG3_NVRAM_SIZE_128KB);
  11432. break;
  11433. case FLASH_5752VENDOR_ST_M45PE10:
  11434. case FLASH_5752VENDOR_ST_M45PE20:
  11435. case FLASH_5752VENDOR_ST_M45PE40:
  11436. tp->nvram_jedecnum = JEDEC_ST;
  11437. tg3_flag_set(tp, NVRAM_BUFFERED);
  11438. tg3_flag_set(tp, FLASH);
  11439. tp->nvram_pagesize = 256;
  11440. if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
  11441. tp->nvram_size = (protect ?
  11442. TG3_NVRAM_SIZE_64KB :
  11443. TG3_NVRAM_SIZE_128KB);
  11444. else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
  11445. tp->nvram_size = (protect ?
  11446. TG3_NVRAM_SIZE_64KB :
  11447. TG3_NVRAM_SIZE_256KB);
  11448. else
  11449. tp->nvram_size = (protect ?
  11450. TG3_NVRAM_SIZE_128KB :
  11451. TG3_NVRAM_SIZE_512KB);
  11452. break;
  11453. }
  11454. }
  11455. static void tg3_get_5787_nvram_info(struct tg3 *tp)
  11456. {
  11457. u32 nvcfg1;
  11458. nvcfg1 = tr32(NVRAM_CFG1);
  11459. switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
  11460. case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
  11461. case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
  11462. case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
  11463. case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
  11464. tp->nvram_jedecnum = JEDEC_ATMEL;
  11465. tg3_flag_set(tp, NVRAM_BUFFERED);
  11466. tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  11467. nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
  11468. tw32(NVRAM_CFG1, nvcfg1);
  11469. break;
  11470. case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
  11471. case FLASH_5755VENDOR_ATMEL_FLASH_1:
  11472. case FLASH_5755VENDOR_ATMEL_FLASH_2:
  11473. case FLASH_5755VENDOR_ATMEL_FLASH_3:
  11474. tp->nvram_jedecnum = JEDEC_ATMEL;
  11475. tg3_flag_set(tp, NVRAM_BUFFERED);
  11476. tg3_flag_set(tp, FLASH);
  11477. tp->nvram_pagesize = 264;
  11478. break;
  11479. case FLASH_5752VENDOR_ST_M45PE10:
  11480. case FLASH_5752VENDOR_ST_M45PE20:
  11481. case FLASH_5752VENDOR_ST_M45PE40:
  11482. tp->nvram_jedecnum = JEDEC_ST;
  11483. tg3_flag_set(tp, NVRAM_BUFFERED);
  11484. tg3_flag_set(tp, FLASH);
  11485. tp->nvram_pagesize = 256;
  11486. break;
  11487. }
  11488. }
  11489. static void tg3_get_5761_nvram_info(struct tg3 *tp)
  11490. {
  11491. u32 nvcfg1, protect = 0;
  11492. nvcfg1 = tr32(NVRAM_CFG1);
  11493. /* NVRAM protection for TPM */
  11494. if (nvcfg1 & (1 << 27)) {
  11495. tg3_flag_set(tp, PROTECTED_NVRAM);
  11496. protect = 1;
  11497. }
  11498. nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
  11499. switch (nvcfg1) {
  11500. case FLASH_5761VENDOR_ATMEL_ADB021D:
  11501. case FLASH_5761VENDOR_ATMEL_ADB041D:
  11502. case FLASH_5761VENDOR_ATMEL_ADB081D:
  11503. case FLASH_5761VENDOR_ATMEL_ADB161D:
  11504. case FLASH_5761VENDOR_ATMEL_MDB021D:
  11505. case FLASH_5761VENDOR_ATMEL_MDB041D:
  11506. case FLASH_5761VENDOR_ATMEL_MDB081D:
  11507. case FLASH_5761VENDOR_ATMEL_MDB161D:
  11508. tp->nvram_jedecnum = JEDEC_ATMEL;
  11509. tg3_flag_set(tp, NVRAM_BUFFERED);
  11510. tg3_flag_set(tp, FLASH);
  11511. tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
  11512. tp->nvram_pagesize = 256;
  11513. break;
  11514. case FLASH_5761VENDOR_ST_A_M45PE20:
  11515. case FLASH_5761VENDOR_ST_A_M45PE40:
  11516. case FLASH_5761VENDOR_ST_A_M45PE80:
  11517. case FLASH_5761VENDOR_ST_A_M45PE16:
  11518. case FLASH_5761VENDOR_ST_M_M45PE20:
  11519. case FLASH_5761VENDOR_ST_M_M45PE40:
  11520. case FLASH_5761VENDOR_ST_M_M45PE80:
  11521. case FLASH_5761VENDOR_ST_M_M45PE16:
  11522. tp->nvram_jedecnum = JEDEC_ST;
  11523. tg3_flag_set(tp, NVRAM_BUFFERED);
  11524. tg3_flag_set(tp, FLASH);
  11525. tp->nvram_pagesize = 256;
  11526. break;
  11527. }
  11528. if (protect) {
  11529. tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
  11530. } else {
  11531. switch (nvcfg1) {
  11532. case FLASH_5761VENDOR_ATMEL_ADB161D:
  11533. case FLASH_5761VENDOR_ATMEL_MDB161D:
  11534. case FLASH_5761VENDOR_ST_A_M45PE16:
  11535. case FLASH_5761VENDOR_ST_M_M45PE16:
  11536. tp->nvram_size = TG3_NVRAM_SIZE_2MB;
  11537. break;
  11538. case FLASH_5761VENDOR_ATMEL_ADB081D:
  11539. case FLASH_5761VENDOR_ATMEL_MDB081D:
  11540. case FLASH_5761VENDOR_ST_A_M45PE80:
  11541. case FLASH_5761VENDOR_ST_M_M45PE80:
  11542. tp->nvram_size = TG3_NVRAM_SIZE_1MB;
  11543. break;
  11544. case FLASH_5761VENDOR_ATMEL_ADB041D:
  11545. case FLASH_5761VENDOR_ATMEL_MDB041D:
  11546. case FLASH_5761VENDOR_ST_A_M45PE40:
  11547. case FLASH_5761VENDOR_ST_M_M45PE40:
  11548. tp->nvram_size = TG3_NVRAM_SIZE_512KB;
  11549. break;
  11550. case FLASH_5761VENDOR_ATMEL_ADB021D:
  11551. case FLASH_5761VENDOR_ATMEL_MDB021D:
  11552. case FLASH_5761VENDOR_ST_A_M45PE20:
  11553. case FLASH_5761VENDOR_ST_M_M45PE20:
  11554. tp->nvram_size = TG3_NVRAM_SIZE_256KB;
  11555. break;
  11556. }
  11557. }
  11558. }
  11559. static void tg3_get_5906_nvram_info(struct tg3 *tp)
  11560. {
  11561. tp->nvram_jedecnum = JEDEC_ATMEL;
  11562. tg3_flag_set(tp, NVRAM_BUFFERED);
  11563. tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  11564. }
  11565. static void tg3_get_57780_nvram_info(struct tg3 *tp)
  11566. {
  11567. u32 nvcfg1;
  11568. nvcfg1 = tr32(NVRAM_CFG1);
  11569. switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
  11570. case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
  11571. case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
  11572. tp->nvram_jedecnum = JEDEC_ATMEL;
  11573. tg3_flag_set(tp, NVRAM_BUFFERED);
  11574. tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  11575. nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
  11576. tw32(NVRAM_CFG1, nvcfg1);
  11577. return;
  11578. case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
  11579. case FLASH_57780VENDOR_ATMEL_AT45DB011D:
  11580. case FLASH_57780VENDOR_ATMEL_AT45DB011B:
  11581. case FLASH_57780VENDOR_ATMEL_AT45DB021D:
  11582. case FLASH_57780VENDOR_ATMEL_AT45DB021B:
  11583. case FLASH_57780VENDOR_ATMEL_AT45DB041D:
  11584. case FLASH_57780VENDOR_ATMEL_AT45DB041B:
  11585. tp->nvram_jedecnum = JEDEC_ATMEL;
  11586. tg3_flag_set(tp, NVRAM_BUFFERED);
  11587. tg3_flag_set(tp, FLASH);
  11588. switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
  11589. case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
  11590. case FLASH_57780VENDOR_ATMEL_AT45DB011D:
  11591. case FLASH_57780VENDOR_ATMEL_AT45DB011B:
  11592. tp->nvram_size = TG3_NVRAM_SIZE_128KB;
  11593. break;
  11594. case FLASH_57780VENDOR_ATMEL_AT45DB021D:
  11595. case FLASH_57780VENDOR_ATMEL_AT45DB021B:
  11596. tp->nvram_size = TG3_NVRAM_SIZE_256KB;
  11597. break;
  11598. case FLASH_57780VENDOR_ATMEL_AT45DB041D:
  11599. case FLASH_57780VENDOR_ATMEL_AT45DB041B:
  11600. tp->nvram_size = TG3_NVRAM_SIZE_512KB;
  11601. break;
  11602. }
  11603. break;
  11604. case FLASH_5752VENDOR_ST_M45PE10:
  11605. case FLASH_5752VENDOR_ST_M45PE20:
  11606. case FLASH_5752VENDOR_ST_M45PE40:
  11607. tp->nvram_jedecnum = JEDEC_ST;
  11608. tg3_flag_set(tp, NVRAM_BUFFERED);
  11609. tg3_flag_set(tp, FLASH);
  11610. switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
  11611. case FLASH_5752VENDOR_ST_M45PE10:
  11612. tp->nvram_size = TG3_NVRAM_SIZE_128KB;
  11613. break;
  11614. case FLASH_5752VENDOR_ST_M45PE20:
  11615. tp->nvram_size = TG3_NVRAM_SIZE_256KB;
  11616. break;
  11617. case FLASH_5752VENDOR_ST_M45PE40:
  11618. tp->nvram_size = TG3_NVRAM_SIZE_512KB;
  11619. break;
  11620. }
  11621. break;
  11622. default:
  11623. tg3_flag_set(tp, NO_NVRAM);
  11624. return;
  11625. }
  11626. tg3_nvram_get_pagesize(tp, nvcfg1);
  11627. if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
  11628. tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
  11629. }
  11630. static void tg3_get_5717_nvram_info(struct tg3 *tp)
  11631. {
  11632. u32 nvcfg1;
  11633. nvcfg1 = tr32(NVRAM_CFG1);
  11634. switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
  11635. case FLASH_5717VENDOR_ATMEL_EEPROM:
  11636. case FLASH_5717VENDOR_MICRO_EEPROM:
  11637. tp->nvram_jedecnum = JEDEC_ATMEL;
  11638. tg3_flag_set(tp, NVRAM_BUFFERED);
  11639. tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  11640. nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
  11641. tw32(NVRAM_CFG1, nvcfg1);
  11642. return;
  11643. case FLASH_5717VENDOR_ATMEL_MDB011D:
  11644. case FLASH_5717VENDOR_ATMEL_ADB011B:
  11645. case FLASH_5717VENDOR_ATMEL_ADB011D:
  11646. case FLASH_5717VENDOR_ATMEL_MDB021D:
  11647. case FLASH_5717VENDOR_ATMEL_ADB021B:
  11648. case FLASH_5717VENDOR_ATMEL_ADB021D:
  11649. case FLASH_5717VENDOR_ATMEL_45USPT:
  11650. tp->nvram_jedecnum = JEDEC_ATMEL;
  11651. tg3_flag_set(tp, NVRAM_BUFFERED);
  11652. tg3_flag_set(tp, FLASH);
  11653. switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
  11654. case FLASH_5717VENDOR_ATMEL_MDB021D:
  11655. /* Detect size with tg3_nvram_get_size() */
  11656. break;
  11657. case FLASH_5717VENDOR_ATMEL_ADB021B:
  11658. case FLASH_5717VENDOR_ATMEL_ADB021D:
  11659. tp->nvram_size = TG3_NVRAM_SIZE_256KB;
  11660. break;
  11661. default:
  11662. tp->nvram_size = TG3_NVRAM_SIZE_128KB;
  11663. break;
  11664. }
  11665. break;
  11666. case FLASH_5717VENDOR_ST_M_M25PE10:
  11667. case FLASH_5717VENDOR_ST_A_M25PE10:
  11668. case FLASH_5717VENDOR_ST_M_M45PE10:
  11669. case FLASH_5717VENDOR_ST_A_M45PE10:
  11670. case FLASH_5717VENDOR_ST_M_M25PE20:
  11671. case FLASH_5717VENDOR_ST_A_M25PE20:
  11672. case FLASH_5717VENDOR_ST_M_M45PE20:
  11673. case FLASH_5717VENDOR_ST_A_M45PE20:
  11674. case FLASH_5717VENDOR_ST_25USPT:
  11675. case FLASH_5717VENDOR_ST_45USPT:
  11676. tp->nvram_jedecnum = JEDEC_ST;
  11677. tg3_flag_set(tp, NVRAM_BUFFERED);
  11678. tg3_flag_set(tp, FLASH);
  11679. switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
  11680. case FLASH_5717VENDOR_ST_M_M25PE20:
  11681. case FLASH_5717VENDOR_ST_M_M45PE20:
  11682. /* Detect size with tg3_nvram_get_size() */
  11683. break;
  11684. case FLASH_5717VENDOR_ST_A_M25PE20:
  11685. case FLASH_5717VENDOR_ST_A_M45PE20:
  11686. tp->nvram_size = TG3_NVRAM_SIZE_256KB;
  11687. break;
  11688. default:
  11689. tp->nvram_size = TG3_NVRAM_SIZE_128KB;
  11690. break;
  11691. }
  11692. break;
  11693. default:
  11694. tg3_flag_set(tp, NO_NVRAM);
  11695. return;
  11696. }
  11697. tg3_nvram_get_pagesize(tp, nvcfg1);
  11698. if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
  11699. tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
  11700. }
  11701. static void tg3_get_5720_nvram_info(struct tg3 *tp)
  11702. {
  11703. u32 nvcfg1, nvmpinstrp;
  11704. nvcfg1 = tr32(NVRAM_CFG1);
  11705. nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
  11706. if (tg3_asic_rev(tp) == ASIC_REV_5762) {
  11707. if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
  11708. tg3_flag_set(tp, NO_NVRAM);
  11709. return;
  11710. }
  11711. switch (nvmpinstrp) {
  11712. case FLASH_5762_EEPROM_HD:
  11713. nvmpinstrp = FLASH_5720_EEPROM_HD;
  11714. break;
  11715. case FLASH_5762_EEPROM_LD:
  11716. nvmpinstrp = FLASH_5720_EEPROM_LD;
  11717. break;
  11718. case FLASH_5720VENDOR_M_ST_M45PE20:
  11719. /* This pinstrap supports multiple sizes, so force it
  11720. * to read the actual size from location 0xf0.
  11721. */
  11722. nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
  11723. break;
  11724. }
  11725. }
  11726. switch (nvmpinstrp) {
  11727. case FLASH_5720_EEPROM_HD:
  11728. case FLASH_5720_EEPROM_LD:
  11729. tp->nvram_jedecnum = JEDEC_ATMEL;
  11730. tg3_flag_set(tp, NVRAM_BUFFERED);
  11731. nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
  11732. tw32(NVRAM_CFG1, nvcfg1);
  11733. if (nvmpinstrp == FLASH_5720_EEPROM_HD)
  11734. tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  11735. else
  11736. tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
  11737. return;
  11738. case FLASH_5720VENDOR_M_ATMEL_DB011D:
  11739. case FLASH_5720VENDOR_A_ATMEL_DB011B:
  11740. case FLASH_5720VENDOR_A_ATMEL_DB011D:
  11741. case FLASH_5720VENDOR_M_ATMEL_DB021D:
  11742. case FLASH_5720VENDOR_A_ATMEL_DB021B:
  11743. case FLASH_5720VENDOR_A_ATMEL_DB021D:
  11744. case FLASH_5720VENDOR_M_ATMEL_DB041D:
  11745. case FLASH_5720VENDOR_A_ATMEL_DB041B:
  11746. case FLASH_5720VENDOR_A_ATMEL_DB041D:
  11747. case FLASH_5720VENDOR_M_ATMEL_DB081D:
  11748. case FLASH_5720VENDOR_A_ATMEL_DB081D:
  11749. case FLASH_5720VENDOR_ATMEL_45USPT:
  11750. tp->nvram_jedecnum = JEDEC_ATMEL;
  11751. tg3_flag_set(tp, NVRAM_BUFFERED);
  11752. tg3_flag_set(tp, FLASH);
  11753. switch (nvmpinstrp) {
  11754. case FLASH_5720VENDOR_M_ATMEL_DB021D:
  11755. case FLASH_5720VENDOR_A_ATMEL_DB021B:
  11756. case FLASH_5720VENDOR_A_ATMEL_DB021D:
  11757. tp->nvram_size = TG3_NVRAM_SIZE_256KB;
  11758. break;
  11759. case FLASH_5720VENDOR_M_ATMEL_DB041D:
  11760. case FLASH_5720VENDOR_A_ATMEL_DB041B:
  11761. case FLASH_5720VENDOR_A_ATMEL_DB041D:
  11762. tp->nvram_size = TG3_NVRAM_SIZE_512KB;
  11763. break;
  11764. case FLASH_5720VENDOR_M_ATMEL_DB081D:
  11765. case FLASH_5720VENDOR_A_ATMEL_DB081D:
  11766. tp->nvram_size = TG3_NVRAM_SIZE_1MB;
  11767. break;
  11768. default:
  11769. if (tg3_asic_rev(tp) != ASIC_REV_5762)
  11770. tp->nvram_size = TG3_NVRAM_SIZE_128KB;
  11771. break;
  11772. }
  11773. break;
  11774. case FLASH_5720VENDOR_M_ST_M25PE10:
  11775. case FLASH_5720VENDOR_M_ST_M45PE10:
  11776. case FLASH_5720VENDOR_A_ST_M25PE10:
  11777. case FLASH_5720VENDOR_A_ST_M45PE10:
  11778. case FLASH_5720VENDOR_M_ST_M25PE20:
  11779. case FLASH_5720VENDOR_M_ST_M45PE20:
  11780. case FLASH_5720VENDOR_A_ST_M25PE20:
  11781. case FLASH_5720VENDOR_A_ST_M45PE20:
  11782. case FLASH_5720VENDOR_M_ST_M25PE40:
  11783. case FLASH_5720VENDOR_M_ST_M45PE40:
  11784. case FLASH_5720VENDOR_A_ST_M25PE40:
  11785. case FLASH_5720VENDOR_A_ST_M45PE40:
  11786. case FLASH_5720VENDOR_M_ST_M25PE80:
  11787. case FLASH_5720VENDOR_M_ST_M45PE80:
  11788. case FLASH_5720VENDOR_A_ST_M25PE80:
  11789. case FLASH_5720VENDOR_A_ST_M45PE80:
  11790. case FLASH_5720VENDOR_ST_25USPT:
  11791. case FLASH_5720VENDOR_ST_45USPT:
  11792. tp->nvram_jedecnum = JEDEC_ST;
  11793. tg3_flag_set(tp, NVRAM_BUFFERED);
  11794. tg3_flag_set(tp, FLASH);
  11795. switch (nvmpinstrp) {
  11796. case FLASH_5720VENDOR_M_ST_M25PE20:
  11797. case FLASH_5720VENDOR_M_ST_M45PE20:
  11798. case FLASH_5720VENDOR_A_ST_M25PE20:
  11799. case FLASH_5720VENDOR_A_ST_M45PE20:
  11800. tp->nvram_size = TG3_NVRAM_SIZE_256KB;
  11801. break;
  11802. case FLASH_5720VENDOR_M_ST_M25PE40:
  11803. case FLASH_5720VENDOR_M_ST_M45PE40:
  11804. case FLASH_5720VENDOR_A_ST_M25PE40:
  11805. case FLASH_5720VENDOR_A_ST_M45PE40:
  11806. tp->nvram_size = TG3_NVRAM_SIZE_512KB;
  11807. break;
  11808. case FLASH_5720VENDOR_M_ST_M25PE80:
  11809. case FLASH_5720VENDOR_M_ST_M45PE80:
  11810. case FLASH_5720VENDOR_A_ST_M25PE80:
  11811. case FLASH_5720VENDOR_A_ST_M45PE80:
  11812. tp->nvram_size = TG3_NVRAM_SIZE_1MB;
  11813. break;
  11814. default:
  11815. if (tg3_asic_rev(tp) != ASIC_REV_5762)
  11816. tp->nvram_size = TG3_NVRAM_SIZE_128KB;
  11817. break;
  11818. }
  11819. break;
  11820. default:
  11821. tg3_flag_set(tp, NO_NVRAM);
  11822. return;
  11823. }
  11824. tg3_nvram_get_pagesize(tp, nvcfg1);
  11825. if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
  11826. tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
  11827. if (tg3_asic_rev(tp) == ASIC_REV_5762) {
  11828. u32 val;
  11829. if (tg3_nvram_read(tp, 0, &val))
  11830. return;
  11831. if (val != TG3_EEPROM_MAGIC &&
  11832. (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
  11833. tg3_flag_set(tp, NO_NVRAM);
  11834. }
  11835. }
  11836. /* Chips other than 5700/5701 use the NVRAM for fetching info. */
  11837. static void tg3_nvram_init(struct tg3 *tp)
  11838. {
  11839. if (tg3_flag(tp, IS_SSB_CORE)) {
  11840. /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
  11841. tg3_flag_clear(tp, NVRAM);
  11842. tg3_flag_clear(tp, NVRAM_BUFFERED);
  11843. tg3_flag_set(tp, NO_NVRAM);
  11844. return;
  11845. }
  11846. tw32_f(GRC_EEPROM_ADDR,
  11847. (EEPROM_ADDR_FSM_RESET |
  11848. (EEPROM_DEFAULT_CLOCK_PERIOD <<
  11849. EEPROM_ADDR_CLKPERD_SHIFT)));
  11850. msleep(1);
  11851. /* Enable seeprom accesses. */
  11852. tw32_f(GRC_LOCAL_CTRL,
  11853. tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
  11854. udelay(100);
  11855. if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
  11856. tg3_asic_rev(tp) != ASIC_REV_5701) {
  11857. tg3_flag_set(tp, NVRAM);
  11858. if (tg3_nvram_lock(tp)) {
  11859. netdev_warn(tp->dev,
  11860. "Cannot get nvram lock, %s failed\n",
  11861. __func__);
  11862. return;
  11863. }
  11864. tg3_enable_nvram_access(tp);
  11865. tp->nvram_size = 0;
  11866. if (tg3_asic_rev(tp) == ASIC_REV_5752)
  11867. tg3_get_5752_nvram_info(tp);
  11868. else if (tg3_asic_rev(tp) == ASIC_REV_5755)
  11869. tg3_get_5755_nvram_info(tp);
  11870. else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
  11871. tg3_asic_rev(tp) == ASIC_REV_5784 ||
  11872. tg3_asic_rev(tp) == ASIC_REV_5785)
  11873. tg3_get_5787_nvram_info(tp);
  11874. else if (tg3_asic_rev(tp) == ASIC_REV_5761)
  11875. tg3_get_5761_nvram_info(tp);
  11876. else if (tg3_asic_rev(tp) == ASIC_REV_5906)
  11877. tg3_get_5906_nvram_info(tp);
  11878. else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
  11879. tg3_flag(tp, 57765_CLASS))
  11880. tg3_get_57780_nvram_info(tp);
  11881. else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
  11882. tg3_asic_rev(tp) == ASIC_REV_5719)
  11883. tg3_get_5717_nvram_info(tp);
  11884. else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
  11885. tg3_asic_rev(tp) == ASIC_REV_5762)
  11886. tg3_get_5720_nvram_info(tp);
  11887. else
  11888. tg3_get_nvram_info(tp);
  11889. if (tp->nvram_size == 0)
  11890. tg3_get_nvram_size(tp);
  11891. tg3_disable_nvram_access(tp);
  11892. tg3_nvram_unlock(tp);
  11893. } else {
  11894. tg3_flag_clear(tp, NVRAM);
  11895. tg3_flag_clear(tp, NVRAM_BUFFERED);
  11896. tg3_get_eeprom_size(tp);
  11897. }
  11898. }
  11899. struct subsys_tbl_ent {
  11900. u16 subsys_vendor, subsys_devid;
  11901. u32 phy_id;
  11902. };
  11903. static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
  11904. /* Broadcom boards. */
  11905. { TG3PCI_SUBVENDOR_ID_BROADCOM,
  11906. TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
  11907. { TG3PCI_SUBVENDOR_ID_BROADCOM,
  11908. TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
  11909. { TG3PCI_SUBVENDOR_ID_BROADCOM,
  11910. TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
  11911. { TG3PCI_SUBVENDOR_ID_BROADCOM,
  11912. TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
  11913. { TG3PCI_SUBVENDOR_ID_BROADCOM,
  11914. TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
  11915. { TG3PCI_SUBVENDOR_ID_BROADCOM,
  11916. TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
  11917. { TG3PCI_SUBVENDOR_ID_BROADCOM,
  11918. TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
  11919. { TG3PCI_SUBVENDOR_ID_BROADCOM,
  11920. TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
  11921. { TG3PCI_SUBVENDOR_ID_BROADCOM,
  11922. TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
  11923. { TG3PCI_SUBVENDOR_ID_BROADCOM,
  11924. TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
  11925. { TG3PCI_SUBVENDOR_ID_BROADCOM,
  11926. TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
  11927. /* 3com boards. */
  11928. { TG3PCI_SUBVENDOR_ID_3COM,
  11929. TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
  11930. { TG3PCI_SUBVENDOR_ID_3COM,
  11931. TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
  11932. { TG3PCI_SUBVENDOR_ID_3COM,
  11933. TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
  11934. { TG3PCI_SUBVENDOR_ID_3COM,
  11935. TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
  11936. { TG3PCI_SUBVENDOR_ID_3COM,
  11937. TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
  11938. /* DELL boards. */
  11939. { TG3PCI_SUBVENDOR_ID_DELL,
  11940. TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
  11941. { TG3PCI_SUBVENDOR_ID_DELL,
  11942. TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
  11943. { TG3PCI_SUBVENDOR_ID_DELL,
  11944. TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
  11945. { TG3PCI_SUBVENDOR_ID_DELL,
  11946. TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
  11947. /* Compaq boards. */
  11948. { TG3PCI_SUBVENDOR_ID_COMPAQ,
  11949. TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
  11950. { TG3PCI_SUBVENDOR_ID_COMPAQ,
  11951. TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
  11952. { TG3PCI_SUBVENDOR_ID_COMPAQ,
  11953. TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
  11954. { TG3PCI_SUBVENDOR_ID_COMPAQ,
  11955. TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
  11956. { TG3PCI_SUBVENDOR_ID_COMPAQ,
  11957. TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
  11958. /* IBM boards. */
  11959. { TG3PCI_SUBVENDOR_ID_IBM,
  11960. TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
  11961. };
  11962. static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
  11963. {
  11964. int i;
  11965. for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
  11966. if ((subsys_id_to_phy_id[i].subsys_vendor ==
  11967. tp->pdev->subsystem_vendor) &&
  11968. (subsys_id_to_phy_id[i].subsys_devid ==
  11969. tp->pdev->subsystem_device))
  11970. return &subsys_id_to_phy_id[i];
  11971. }
  11972. return NULL;
  11973. }
  11974. static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
  11975. {
  11976. u32 val;
  11977. tp->phy_id = TG3_PHY_ID_INVALID;
  11978. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  11979. /* Assume an onboard device and WOL capable by default. */
  11980. tg3_flag_set(tp, EEPROM_WRITE_PROT);
  11981. tg3_flag_set(tp, WOL_CAP);
  11982. if (tg3_asic_rev(tp) == ASIC_REV_5906) {
  11983. if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
  11984. tg3_flag_clear(tp, EEPROM_WRITE_PROT);
  11985. tg3_flag_set(tp, IS_NIC);
  11986. }
  11987. val = tr32(VCPU_CFGSHDW);
  11988. if (val & VCPU_CFGSHDW_ASPM_DBNC)
  11989. tg3_flag_set(tp, ASPM_WORKAROUND);
  11990. if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
  11991. (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
  11992. tg3_flag_set(tp, WOL_ENABLE);
  11993. device_set_wakeup_enable(&tp->pdev->dev, true);
  11994. }
  11995. goto done;
  11996. }
  11997. tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
  11998. if (val == NIC_SRAM_DATA_SIG_MAGIC) {
  11999. u32 nic_cfg, led_cfg;
  12000. u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
  12001. int eeprom_phy_serdes = 0;
  12002. tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
  12003. tp->nic_sram_data_cfg = nic_cfg;
  12004. tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
  12005. ver >>= NIC_SRAM_DATA_VER_SHIFT;
  12006. if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
  12007. tg3_asic_rev(tp) != ASIC_REV_5701 &&
  12008. tg3_asic_rev(tp) != ASIC_REV_5703 &&
  12009. (ver > 0) && (ver < 0x100))
  12010. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
  12011. if (tg3_asic_rev(tp) == ASIC_REV_5785)
  12012. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
  12013. if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
  12014. NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
  12015. eeprom_phy_serdes = 1;
  12016. tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
  12017. if (nic_phy_id != 0) {
  12018. u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
  12019. u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
  12020. eeprom_phy_id = (id1 >> 16) << 10;
  12021. eeprom_phy_id |= (id2 & 0xfc00) << 16;
  12022. eeprom_phy_id |= (id2 & 0x03ff) << 0;
  12023. } else
  12024. eeprom_phy_id = 0;
  12025. tp->phy_id = eeprom_phy_id;
  12026. if (eeprom_phy_serdes) {
  12027. if (!tg3_flag(tp, 5705_PLUS))
  12028. tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
  12029. else
  12030. tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
  12031. }
  12032. if (tg3_flag(tp, 5750_PLUS))
  12033. led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
  12034. SHASTA_EXT_LED_MODE_MASK);
  12035. else
  12036. led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
  12037. switch (led_cfg) {
  12038. default:
  12039. case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
  12040. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  12041. break;
  12042. case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
  12043. tp->led_ctrl = LED_CTRL_MODE_PHY_2;
  12044. break;
  12045. case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
  12046. tp->led_ctrl = LED_CTRL_MODE_MAC;
  12047. /* Default to PHY_1_MODE if 0 (MAC_MODE) is
  12048. * read on some older 5700/5701 bootcode.
  12049. */
  12050. if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
  12051. tg3_asic_rev(tp) == ASIC_REV_5701)
  12052. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  12053. break;
  12054. case SHASTA_EXT_LED_SHARED:
  12055. tp->led_ctrl = LED_CTRL_MODE_SHARED;
  12056. if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
  12057. tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
  12058. tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
  12059. LED_CTRL_MODE_PHY_2);
  12060. break;
  12061. case SHASTA_EXT_LED_MAC:
  12062. tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
  12063. break;
  12064. case SHASTA_EXT_LED_COMBO:
  12065. tp->led_ctrl = LED_CTRL_MODE_COMBO;
  12066. if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
  12067. tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
  12068. LED_CTRL_MODE_PHY_2);
  12069. break;
  12070. }
  12071. if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
  12072. tg3_asic_rev(tp) == ASIC_REV_5701) &&
  12073. tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
  12074. tp->led_ctrl = LED_CTRL_MODE_PHY_2;
  12075. if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
  12076. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  12077. if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
  12078. tg3_flag_set(tp, EEPROM_WRITE_PROT);
  12079. if ((tp->pdev->subsystem_vendor ==
  12080. PCI_VENDOR_ID_ARIMA) &&
  12081. (tp->pdev->subsystem_device == 0x205a ||
  12082. tp->pdev->subsystem_device == 0x2063))
  12083. tg3_flag_clear(tp, EEPROM_WRITE_PROT);
  12084. } else {
  12085. tg3_flag_clear(tp, EEPROM_WRITE_PROT);
  12086. tg3_flag_set(tp, IS_NIC);
  12087. }
  12088. if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
  12089. tg3_flag_set(tp, ENABLE_ASF);
  12090. if (tg3_flag(tp, 5750_PLUS))
  12091. tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
  12092. }
  12093. if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
  12094. tg3_flag(tp, 5750_PLUS))
  12095. tg3_flag_set(tp, ENABLE_APE);
  12096. if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
  12097. !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
  12098. tg3_flag_clear(tp, WOL_CAP);
  12099. if (tg3_flag(tp, WOL_CAP) &&
  12100. (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
  12101. tg3_flag_set(tp, WOL_ENABLE);
  12102. device_set_wakeup_enable(&tp->pdev->dev, true);
  12103. }
  12104. if (cfg2 & (1 << 17))
  12105. tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
  12106. /* serdes signal pre-emphasis in register 0x590 set by */
  12107. /* bootcode if bit 18 is set */
  12108. if (cfg2 & (1 << 18))
  12109. tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
  12110. if ((tg3_flag(tp, 57765_PLUS) ||
  12111. (tg3_asic_rev(tp) == ASIC_REV_5784 &&
  12112. tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
  12113. (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
  12114. tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
  12115. if (tg3_flag(tp, PCI_EXPRESS)) {
  12116. u32 cfg3;
  12117. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
  12118. if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
  12119. !tg3_flag(tp, 57765_PLUS) &&
  12120. (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
  12121. tg3_flag_set(tp, ASPM_WORKAROUND);
  12122. if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
  12123. tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
  12124. if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
  12125. tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
  12126. }
  12127. if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
  12128. tg3_flag_set(tp, RGMII_INBAND_DISABLE);
  12129. if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
  12130. tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
  12131. if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
  12132. tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
  12133. }
  12134. done:
  12135. if (tg3_flag(tp, WOL_CAP))
  12136. device_set_wakeup_enable(&tp->pdev->dev,
  12137. tg3_flag(tp, WOL_ENABLE));
  12138. else
  12139. device_set_wakeup_capable(&tp->pdev->dev, false);
  12140. }
  12141. static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
  12142. {
  12143. int i, err;
  12144. u32 val2, off = offset * 8;
  12145. err = tg3_nvram_lock(tp);
  12146. if (err)
  12147. return err;
  12148. tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
  12149. tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
  12150. APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
  12151. tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
  12152. udelay(10);
  12153. for (i = 0; i < 100; i++) {
  12154. val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
  12155. if (val2 & APE_OTP_STATUS_CMD_DONE) {
  12156. *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
  12157. break;
  12158. }
  12159. udelay(10);
  12160. }
  12161. tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
  12162. tg3_nvram_unlock(tp);
  12163. if (val2 & APE_OTP_STATUS_CMD_DONE)
  12164. return 0;
  12165. return -EBUSY;
  12166. }
  12167. static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
  12168. {
  12169. int i;
  12170. u32 val;
  12171. tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
  12172. tw32(OTP_CTRL, cmd);
  12173. /* Wait for up to 1 ms for command to execute. */
  12174. for (i = 0; i < 100; i++) {
  12175. val = tr32(OTP_STATUS);
  12176. if (val & OTP_STATUS_CMD_DONE)
  12177. break;
  12178. udelay(10);
  12179. }
  12180. return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
  12181. }
  12182. /* Read the gphy configuration from the OTP region of the chip. The gphy
  12183. * configuration is a 32-bit value that straddles the alignment boundary.
  12184. * We do two 32-bit reads and then shift and merge the results.
  12185. */
  12186. static u32 tg3_read_otp_phycfg(struct tg3 *tp)
  12187. {
  12188. u32 bhalf_otp, thalf_otp;
  12189. tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
  12190. if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
  12191. return 0;
  12192. tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
  12193. if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
  12194. return 0;
  12195. thalf_otp = tr32(OTP_READ_DATA);
  12196. tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
  12197. if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
  12198. return 0;
  12199. bhalf_otp = tr32(OTP_READ_DATA);
  12200. return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
  12201. }
  12202. static void tg3_phy_init_link_config(struct tg3 *tp)
  12203. {
  12204. u32 adv = ADVERTISED_Autoneg;
  12205. if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
  12206. adv |= ADVERTISED_1000baseT_Half |
  12207. ADVERTISED_1000baseT_Full;
  12208. if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
  12209. adv |= ADVERTISED_100baseT_Half |
  12210. ADVERTISED_100baseT_Full |
  12211. ADVERTISED_10baseT_Half |
  12212. ADVERTISED_10baseT_Full |
  12213. ADVERTISED_TP;
  12214. else
  12215. adv |= ADVERTISED_FIBRE;
  12216. tp->link_config.advertising = adv;
  12217. tp->link_config.speed = SPEED_UNKNOWN;
  12218. tp->link_config.duplex = DUPLEX_UNKNOWN;
  12219. tp->link_config.autoneg = AUTONEG_ENABLE;
  12220. tp->link_config.active_speed = SPEED_UNKNOWN;
  12221. tp->link_config.active_duplex = DUPLEX_UNKNOWN;
  12222. tp->old_link = -1;
  12223. }
  12224. static int tg3_phy_probe(struct tg3 *tp)
  12225. {
  12226. u32 hw_phy_id_1, hw_phy_id_2;
  12227. u32 hw_phy_id, hw_phy_id_masked;
  12228. int err;
  12229. /* flow control autonegotiation is default behavior */
  12230. tg3_flag_set(tp, PAUSE_AUTONEG);
  12231. tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
  12232. if (tg3_flag(tp, ENABLE_APE)) {
  12233. switch (tp->pci_fn) {
  12234. case 0:
  12235. tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
  12236. break;
  12237. case 1:
  12238. tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
  12239. break;
  12240. case 2:
  12241. tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
  12242. break;
  12243. case 3:
  12244. tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
  12245. break;
  12246. }
  12247. }
  12248. if (!tg3_flag(tp, ENABLE_ASF) &&
  12249. !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
  12250. !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
  12251. tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
  12252. TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
  12253. if (tg3_flag(tp, USE_PHYLIB))
  12254. return tg3_phy_init(tp);
  12255. /* Reading the PHY ID register can conflict with ASF
  12256. * firmware access to the PHY hardware.
  12257. */
  12258. err = 0;
  12259. if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
  12260. hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
  12261. } else {
  12262. /* Now read the physical PHY_ID from the chip and verify
  12263. * that it is sane. If it doesn't look good, we fall back
  12264. * to either the hard-coded table based PHY_ID and failing
  12265. * that the value found in the eeprom area.
  12266. */
  12267. err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
  12268. err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
  12269. hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
  12270. hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
  12271. hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
  12272. hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
  12273. }
  12274. if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
  12275. tp->phy_id = hw_phy_id;
  12276. if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
  12277. tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
  12278. else
  12279. tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
  12280. } else {
  12281. if (tp->phy_id != TG3_PHY_ID_INVALID) {
  12282. /* Do nothing, phy ID already set up in
  12283. * tg3_get_eeprom_hw_cfg().
  12284. */
  12285. } else {
  12286. struct subsys_tbl_ent *p;
  12287. /* No eeprom signature? Try the hardcoded
  12288. * subsys device table.
  12289. */
  12290. p = tg3_lookup_by_subsys(tp);
  12291. if (p) {
  12292. tp->phy_id = p->phy_id;
  12293. } else if (!tg3_flag(tp, IS_SSB_CORE)) {
  12294. /* For now we saw the IDs 0xbc050cd0,
  12295. * 0xbc050f80 and 0xbc050c30 on devices
  12296. * connected to an BCM4785 and there are
  12297. * probably more. Just assume that the phy is
  12298. * supported when it is connected to a SSB core
  12299. * for now.
  12300. */
  12301. return -ENODEV;
  12302. }
  12303. if (!tp->phy_id ||
  12304. tp->phy_id == TG3_PHY_ID_BCM8002)
  12305. tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
  12306. }
  12307. }
  12308. if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
  12309. (tg3_asic_rev(tp) == ASIC_REV_5719 ||
  12310. tg3_asic_rev(tp) == ASIC_REV_5720 ||
  12311. tg3_asic_rev(tp) == ASIC_REV_57766 ||
  12312. tg3_asic_rev(tp) == ASIC_REV_5762 ||
  12313. (tg3_asic_rev(tp) == ASIC_REV_5717 &&
  12314. tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
  12315. (tg3_asic_rev(tp) == ASIC_REV_57765 &&
  12316. tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
  12317. tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
  12318. tg3_phy_init_link_config(tp);
  12319. if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
  12320. !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
  12321. !tg3_flag(tp, ENABLE_APE) &&
  12322. !tg3_flag(tp, ENABLE_ASF)) {
  12323. u32 bmsr, dummy;
  12324. tg3_readphy(tp, MII_BMSR, &bmsr);
  12325. if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
  12326. (bmsr & BMSR_LSTATUS))
  12327. goto skip_phy_reset;
  12328. err = tg3_phy_reset(tp);
  12329. if (err)
  12330. return err;
  12331. tg3_phy_set_wirespeed(tp);
  12332. if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
  12333. tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
  12334. tp->link_config.flowctrl);
  12335. tg3_writephy(tp, MII_BMCR,
  12336. BMCR_ANENABLE | BMCR_ANRESTART);
  12337. }
  12338. }
  12339. skip_phy_reset:
  12340. if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
  12341. err = tg3_init_5401phy_dsp(tp);
  12342. if (err)
  12343. return err;
  12344. err = tg3_init_5401phy_dsp(tp);
  12345. }
  12346. return err;
  12347. }
  12348. static void tg3_read_vpd(struct tg3 *tp)
  12349. {
  12350. u8 *vpd_data;
  12351. unsigned int block_end, rosize, len;
  12352. u32 vpdlen;
  12353. int j, i = 0;
  12354. vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
  12355. if (!vpd_data)
  12356. goto out_no_vpd;
  12357. i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
  12358. if (i < 0)
  12359. goto out_not_found;
  12360. rosize = pci_vpd_lrdt_size(&vpd_data[i]);
  12361. block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
  12362. i += PCI_VPD_LRDT_TAG_SIZE;
  12363. if (block_end > vpdlen)
  12364. goto out_not_found;
  12365. j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
  12366. PCI_VPD_RO_KEYWORD_MFR_ID);
  12367. if (j > 0) {
  12368. len = pci_vpd_info_field_size(&vpd_data[j]);
  12369. j += PCI_VPD_INFO_FLD_HDR_SIZE;
  12370. if (j + len > block_end || len != 4 ||
  12371. memcmp(&vpd_data[j], "1028", 4))
  12372. goto partno;
  12373. j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
  12374. PCI_VPD_RO_KEYWORD_VENDOR0);
  12375. if (j < 0)
  12376. goto partno;
  12377. len = pci_vpd_info_field_size(&vpd_data[j]);
  12378. j += PCI_VPD_INFO_FLD_HDR_SIZE;
  12379. if (j + len > block_end)
  12380. goto partno;
  12381. if (len >= sizeof(tp->fw_ver))
  12382. len = sizeof(tp->fw_ver) - 1;
  12383. memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
  12384. snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
  12385. &vpd_data[j]);
  12386. }
  12387. partno:
  12388. i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
  12389. PCI_VPD_RO_KEYWORD_PARTNO);
  12390. if (i < 0)
  12391. goto out_not_found;
  12392. len = pci_vpd_info_field_size(&vpd_data[i]);
  12393. i += PCI_VPD_INFO_FLD_HDR_SIZE;
  12394. if (len > TG3_BPN_SIZE ||
  12395. (len + i) > vpdlen)
  12396. goto out_not_found;
  12397. memcpy(tp->board_part_number, &vpd_data[i], len);
  12398. out_not_found:
  12399. kfree(vpd_data);
  12400. if (tp->board_part_number[0])
  12401. return;
  12402. out_no_vpd:
  12403. if (tg3_asic_rev(tp) == ASIC_REV_5717) {
  12404. if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
  12405. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
  12406. strcpy(tp->board_part_number, "BCM5717");
  12407. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
  12408. strcpy(tp->board_part_number, "BCM5718");
  12409. else
  12410. goto nomatch;
  12411. } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
  12412. if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
  12413. strcpy(tp->board_part_number, "BCM57780");
  12414. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
  12415. strcpy(tp->board_part_number, "BCM57760");
  12416. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
  12417. strcpy(tp->board_part_number, "BCM57790");
  12418. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
  12419. strcpy(tp->board_part_number, "BCM57788");
  12420. else
  12421. goto nomatch;
  12422. } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
  12423. if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
  12424. strcpy(tp->board_part_number, "BCM57761");
  12425. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
  12426. strcpy(tp->board_part_number, "BCM57765");
  12427. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
  12428. strcpy(tp->board_part_number, "BCM57781");
  12429. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
  12430. strcpy(tp->board_part_number, "BCM57785");
  12431. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
  12432. strcpy(tp->board_part_number, "BCM57791");
  12433. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
  12434. strcpy(tp->board_part_number, "BCM57795");
  12435. else
  12436. goto nomatch;
  12437. } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
  12438. if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
  12439. strcpy(tp->board_part_number, "BCM57762");
  12440. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
  12441. strcpy(tp->board_part_number, "BCM57766");
  12442. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
  12443. strcpy(tp->board_part_number, "BCM57782");
  12444. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
  12445. strcpy(tp->board_part_number, "BCM57786");
  12446. else
  12447. goto nomatch;
  12448. } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
  12449. strcpy(tp->board_part_number, "BCM95906");
  12450. } else {
  12451. nomatch:
  12452. strcpy(tp->board_part_number, "none");
  12453. }
  12454. }
  12455. static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
  12456. {
  12457. u32 val;
  12458. if (tg3_nvram_read(tp, offset, &val) ||
  12459. (val & 0xfc000000) != 0x0c000000 ||
  12460. tg3_nvram_read(tp, offset + 4, &val) ||
  12461. val != 0)
  12462. return 0;
  12463. return 1;
  12464. }
  12465. static void tg3_read_bc_ver(struct tg3 *tp)
  12466. {
  12467. u32 val, offset, start, ver_offset;
  12468. int i, dst_off;
  12469. bool newver = false;
  12470. if (tg3_nvram_read(tp, 0xc, &offset) ||
  12471. tg3_nvram_read(tp, 0x4, &start))
  12472. return;
  12473. offset = tg3_nvram_logical_addr(tp, offset);
  12474. if (tg3_nvram_read(tp, offset, &val))
  12475. return;
  12476. if ((val & 0xfc000000) == 0x0c000000) {
  12477. if (tg3_nvram_read(tp, offset + 4, &val))
  12478. return;
  12479. if (val == 0)
  12480. newver = true;
  12481. }
  12482. dst_off = strlen(tp->fw_ver);
  12483. if (newver) {
  12484. if (TG3_VER_SIZE - dst_off < 16 ||
  12485. tg3_nvram_read(tp, offset + 8, &ver_offset))
  12486. return;
  12487. offset = offset + ver_offset - start;
  12488. for (i = 0; i < 16; i += 4) {
  12489. __be32 v;
  12490. if (tg3_nvram_read_be32(tp, offset + i, &v))
  12491. return;
  12492. memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
  12493. }
  12494. } else {
  12495. u32 major, minor;
  12496. if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
  12497. return;
  12498. major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
  12499. TG3_NVM_BCVER_MAJSFT;
  12500. minor = ver_offset & TG3_NVM_BCVER_MINMSK;
  12501. snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
  12502. "v%d.%02d", major, minor);
  12503. }
  12504. }
  12505. static void tg3_read_hwsb_ver(struct tg3 *tp)
  12506. {
  12507. u32 val, major, minor;
  12508. /* Use native endian representation */
  12509. if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
  12510. return;
  12511. major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
  12512. TG3_NVM_HWSB_CFG1_MAJSFT;
  12513. minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
  12514. TG3_NVM_HWSB_CFG1_MINSFT;
  12515. snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
  12516. }
  12517. static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
  12518. {
  12519. u32 offset, major, minor, build;
  12520. strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
  12521. if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
  12522. return;
  12523. switch (val & TG3_EEPROM_SB_REVISION_MASK) {
  12524. case TG3_EEPROM_SB_REVISION_0:
  12525. offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
  12526. break;
  12527. case TG3_EEPROM_SB_REVISION_2:
  12528. offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
  12529. break;
  12530. case TG3_EEPROM_SB_REVISION_3:
  12531. offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
  12532. break;
  12533. case TG3_EEPROM_SB_REVISION_4:
  12534. offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
  12535. break;
  12536. case TG3_EEPROM_SB_REVISION_5:
  12537. offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
  12538. break;
  12539. case TG3_EEPROM_SB_REVISION_6:
  12540. offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
  12541. break;
  12542. default:
  12543. return;
  12544. }
  12545. if (tg3_nvram_read(tp, offset, &val))
  12546. return;
  12547. build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
  12548. TG3_EEPROM_SB_EDH_BLD_SHFT;
  12549. major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
  12550. TG3_EEPROM_SB_EDH_MAJ_SHFT;
  12551. minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
  12552. if (minor > 99 || build > 26)
  12553. return;
  12554. offset = strlen(tp->fw_ver);
  12555. snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
  12556. " v%d.%02d", major, minor);
  12557. if (build > 0) {
  12558. offset = strlen(tp->fw_ver);
  12559. if (offset < TG3_VER_SIZE - 1)
  12560. tp->fw_ver[offset] = 'a' + build - 1;
  12561. }
  12562. }
  12563. static void tg3_read_mgmtfw_ver(struct tg3 *tp)
  12564. {
  12565. u32 val, offset, start;
  12566. int i, vlen;
  12567. for (offset = TG3_NVM_DIR_START;
  12568. offset < TG3_NVM_DIR_END;
  12569. offset += TG3_NVM_DIRENT_SIZE) {
  12570. if (tg3_nvram_read(tp, offset, &val))
  12571. return;
  12572. if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
  12573. break;
  12574. }
  12575. if (offset == TG3_NVM_DIR_END)
  12576. return;
  12577. if (!tg3_flag(tp, 5705_PLUS))
  12578. start = 0x08000000;
  12579. else if (tg3_nvram_read(tp, offset - 4, &start))
  12580. return;
  12581. if (tg3_nvram_read(tp, offset + 4, &offset) ||
  12582. !tg3_fw_img_is_valid(tp, offset) ||
  12583. tg3_nvram_read(tp, offset + 8, &val))
  12584. return;
  12585. offset += val - start;
  12586. vlen = strlen(tp->fw_ver);
  12587. tp->fw_ver[vlen++] = ',';
  12588. tp->fw_ver[vlen++] = ' ';
  12589. for (i = 0; i < 4; i++) {
  12590. __be32 v;
  12591. if (tg3_nvram_read_be32(tp, offset, &v))
  12592. return;
  12593. offset += sizeof(v);
  12594. if (vlen > TG3_VER_SIZE - sizeof(v)) {
  12595. memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
  12596. break;
  12597. }
  12598. memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
  12599. vlen += sizeof(v);
  12600. }
  12601. }
  12602. static void tg3_probe_ncsi(struct tg3 *tp)
  12603. {
  12604. u32 apedata;
  12605. apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
  12606. if (apedata != APE_SEG_SIG_MAGIC)
  12607. return;
  12608. apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
  12609. if (!(apedata & APE_FW_STATUS_READY))
  12610. return;
  12611. if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
  12612. tg3_flag_set(tp, APE_HAS_NCSI);
  12613. }
  12614. static void tg3_read_dash_ver(struct tg3 *tp)
  12615. {
  12616. int vlen;
  12617. u32 apedata;
  12618. char *fwtype;
  12619. apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
  12620. if (tg3_flag(tp, APE_HAS_NCSI))
  12621. fwtype = "NCSI";
  12622. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
  12623. fwtype = "SMASH";
  12624. else
  12625. fwtype = "DASH";
  12626. vlen = strlen(tp->fw_ver);
  12627. snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
  12628. fwtype,
  12629. (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
  12630. (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
  12631. (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
  12632. (apedata & APE_FW_VERSION_BLDMSK));
  12633. }
  12634. static void tg3_read_otp_ver(struct tg3 *tp)
  12635. {
  12636. u32 val, val2;
  12637. if (tg3_asic_rev(tp) != ASIC_REV_5762)
  12638. return;
  12639. if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
  12640. !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
  12641. TG3_OTP_MAGIC0_VALID(val)) {
  12642. u64 val64 = (u64) val << 32 | val2;
  12643. u32 ver = 0;
  12644. int i, vlen;
  12645. for (i = 0; i < 7; i++) {
  12646. if ((val64 & 0xff) == 0)
  12647. break;
  12648. ver = val64 & 0xff;
  12649. val64 >>= 8;
  12650. }
  12651. vlen = strlen(tp->fw_ver);
  12652. snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
  12653. }
  12654. }
  12655. static void tg3_read_fw_ver(struct tg3 *tp)
  12656. {
  12657. u32 val;
  12658. bool vpd_vers = false;
  12659. if (tp->fw_ver[0] != 0)
  12660. vpd_vers = true;
  12661. if (tg3_flag(tp, NO_NVRAM)) {
  12662. strcat(tp->fw_ver, "sb");
  12663. tg3_read_otp_ver(tp);
  12664. return;
  12665. }
  12666. if (tg3_nvram_read(tp, 0, &val))
  12667. return;
  12668. if (val == TG3_EEPROM_MAGIC)
  12669. tg3_read_bc_ver(tp);
  12670. else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
  12671. tg3_read_sb_ver(tp, val);
  12672. else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
  12673. tg3_read_hwsb_ver(tp);
  12674. if (tg3_flag(tp, ENABLE_ASF)) {
  12675. if (tg3_flag(tp, ENABLE_APE)) {
  12676. tg3_probe_ncsi(tp);
  12677. if (!vpd_vers)
  12678. tg3_read_dash_ver(tp);
  12679. } else if (!vpd_vers) {
  12680. tg3_read_mgmtfw_ver(tp);
  12681. }
  12682. }
  12683. tp->fw_ver[TG3_VER_SIZE - 1] = 0;
  12684. }
  12685. static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
  12686. {
  12687. if (tg3_flag(tp, LRG_PROD_RING_CAP))
  12688. return TG3_RX_RET_MAX_SIZE_5717;
  12689. else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
  12690. return TG3_RX_RET_MAX_SIZE_5700;
  12691. else
  12692. return TG3_RX_RET_MAX_SIZE_5705;
  12693. }
  12694. static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
  12695. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
  12696. { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
  12697. { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
  12698. { },
  12699. };
  12700. static struct pci_dev *tg3_find_peer(struct tg3 *tp)
  12701. {
  12702. struct pci_dev *peer;
  12703. unsigned int func, devnr = tp->pdev->devfn & ~7;
  12704. for (func = 0; func < 8; func++) {
  12705. peer = pci_get_slot(tp->pdev->bus, devnr | func);
  12706. if (peer && peer != tp->pdev)
  12707. break;
  12708. pci_dev_put(peer);
  12709. }
  12710. /* 5704 can be configured in single-port mode, set peer to
  12711. * tp->pdev in that case.
  12712. */
  12713. if (!peer) {
  12714. peer = tp->pdev;
  12715. return peer;
  12716. }
  12717. /*
  12718. * We don't need to keep the refcount elevated; there's no way
  12719. * to remove one half of this device without removing the other
  12720. */
  12721. pci_dev_put(peer);
  12722. return peer;
  12723. }
  12724. static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
  12725. {
  12726. tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
  12727. if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
  12728. u32 reg;
  12729. /* All devices that use the alternate
  12730. * ASIC REV location have a CPMU.
  12731. */
  12732. tg3_flag_set(tp, CPMU_PRESENT);
  12733. if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
  12734. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
  12735. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
  12736. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
  12737. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
  12738. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
  12739. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
  12740. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
  12741. reg = TG3PCI_GEN2_PRODID_ASICREV;
  12742. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
  12743. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
  12744. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
  12745. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
  12746. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
  12747. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
  12748. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
  12749. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
  12750. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
  12751. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
  12752. reg = TG3PCI_GEN15_PRODID_ASICREV;
  12753. else
  12754. reg = TG3PCI_PRODID_ASICREV;
  12755. pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
  12756. }
  12757. /* Wrong chip ID in 5752 A0. This code can be removed later
  12758. * as A0 is not in production.
  12759. */
  12760. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
  12761. tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
  12762. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
  12763. tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
  12764. if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
  12765. tg3_asic_rev(tp) == ASIC_REV_5719 ||
  12766. tg3_asic_rev(tp) == ASIC_REV_5720)
  12767. tg3_flag_set(tp, 5717_PLUS);
  12768. if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
  12769. tg3_asic_rev(tp) == ASIC_REV_57766)
  12770. tg3_flag_set(tp, 57765_CLASS);
  12771. if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
  12772. tg3_asic_rev(tp) == ASIC_REV_5762)
  12773. tg3_flag_set(tp, 57765_PLUS);
  12774. /* Intentionally exclude ASIC_REV_5906 */
  12775. if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
  12776. tg3_asic_rev(tp) == ASIC_REV_5787 ||
  12777. tg3_asic_rev(tp) == ASIC_REV_5784 ||
  12778. tg3_asic_rev(tp) == ASIC_REV_5761 ||
  12779. tg3_asic_rev(tp) == ASIC_REV_5785 ||
  12780. tg3_asic_rev(tp) == ASIC_REV_57780 ||
  12781. tg3_flag(tp, 57765_PLUS))
  12782. tg3_flag_set(tp, 5755_PLUS);
  12783. if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
  12784. tg3_asic_rev(tp) == ASIC_REV_5714)
  12785. tg3_flag_set(tp, 5780_CLASS);
  12786. if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
  12787. tg3_asic_rev(tp) == ASIC_REV_5752 ||
  12788. tg3_asic_rev(tp) == ASIC_REV_5906 ||
  12789. tg3_flag(tp, 5755_PLUS) ||
  12790. tg3_flag(tp, 5780_CLASS))
  12791. tg3_flag_set(tp, 5750_PLUS);
  12792. if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
  12793. tg3_flag(tp, 5750_PLUS))
  12794. tg3_flag_set(tp, 5705_PLUS);
  12795. }
  12796. static bool tg3_10_100_only_device(struct tg3 *tp,
  12797. const struct pci_device_id *ent)
  12798. {
  12799. u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
  12800. if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
  12801. (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
  12802. (tp->phy_flags & TG3_PHYFLG_IS_FET))
  12803. return true;
  12804. if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
  12805. if (tg3_asic_rev(tp) == ASIC_REV_5705) {
  12806. if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
  12807. return true;
  12808. } else {
  12809. return true;
  12810. }
  12811. }
  12812. return false;
  12813. }
  12814. static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
  12815. {
  12816. u32 misc_ctrl_reg;
  12817. u32 pci_state_reg, grc_misc_cfg;
  12818. u32 val;
  12819. u16 pci_cmd;
  12820. int err;
  12821. /* Force memory write invalidate off. If we leave it on,
  12822. * then on 5700_BX chips we have to enable a workaround.
  12823. * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
  12824. * to match the cacheline size. The Broadcom driver have this
  12825. * workaround but turns MWI off all the times so never uses
  12826. * it. This seems to suggest that the workaround is insufficient.
  12827. */
  12828. pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
  12829. pci_cmd &= ~PCI_COMMAND_INVALIDATE;
  12830. pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
  12831. /* Important! -- Make sure register accesses are byteswapped
  12832. * correctly. Also, for those chips that require it, make
  12833. * sure that indirect register accesses are enabled before
  12834. * the first operation.
  12835. */
  12836. pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  12837. &misc_ctrl_reg);
  12838. tp->misc_host_ctrl |= (misc_ctrl_reg &
  12839. MISC_HOST_CTRL_CHIPREV);
  12840. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  12841. tp->misc_host_ctrl);
  12842. tg3_detect_asic_rev(tp, misc_ctrl_reg);
  12843. /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
  12844. * we need to disable memory and use config. cycles
  12845. * only to access all registers. The 5702/03 chips
  12846. * can mistakenly decode the special cycles from the
  12847. * ICH chipsets as memory write cycles, causing corruption
  12848. * of register and memory space. Only certain ICH bridges
  12849. * will drive special cycles with non-zero data during the
  12850. * address phase which can fall within the 5703's address
  12851. * range. This is not an ICH bug as the PCI spec allows
  12852. * non-zero address during special cycles. However, only
  12853. * these ICH bridges are known to drive non-zero addresses
  12854. * during special cycles.
  12855. *
  12856. * Since special cycles do not cross PCI bridges, we only
  12857. * enable this workaround if the 5703 is on the secondary
  12858. * bus of these ICH bridges.
  12859. */
  12860. if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
  12861. (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
  12862. static struct tg3_dev_id {
  12863. u32 vendor;
  12864. u32 device;
  12865. u32 rev;
  12866. } ich_chipsets[] = {
  12867. { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
  12868. PCI_ANY_ID },
  12869. { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
  12870. PCI_ANY_ID },
  12871. { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
  12872. 0xa },
  12873. { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
  12874. PCI_ANY_ID },
  12875. { },
  12876. };
  12877. struct tg3_dev_id *pci_id = &ich_chipsets[0];
  12878. struct pci_dev *bridge = NULL;
  12879. while (pci_id->vendor != 0) {
  12880. bridge = pci_get_device(pci_id->vendor, pci_id->device,
  12881. bridge);
  12882. if (!bridge) {
  12883. pci_id++;
  12884. continue;
  12885. }
  12886. if (pci_id->rev != PCI_ANY_ID) {
  12887. if (bridge->revision > pci_id->rev)
  12888. continue;
  12889. }
  12890. if (bridge->subordinate &&
  12891. (bridge->subordinate->number ==
  12892. tp->pdev->bus->number)) {
  12893. tg3_flag_set(tp, ICH_WORKAROUND);
  12894. pci_dev_put(bridge);
  12895. break;
  12896. }
  12897. }
  12898. }
  12899. if (tg3_asic_rev(tp) == ASIC_REV_5701) {
  12900. static struct tg3_dev_id {
  12901. u32 vendor;
  12902. u32 device;
  12903. } bridge_chipsets[] = {
  12904. { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
  12905. { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
  12906. { },
  12907. };
  12908. struct tg3_dev_id *pci_id = &bridge_chipsets[0];
  12909. struct pci_dev *bridge = NULL;
  12910. while (pci_id->vendor != 0) {
  12911. bridge = pci_get_device(pci_id->vendor,
  12912. pci_id->device,
  12913. bridge);
  12914. if (!bridge) {
  12915. pci_id++;
  12916. continue;
  12917. }
  12918. if (bridge->subordinate &&
  12919. (bridge->subordinate->number <=
  12920. tp->pdev->bus->number) &&
  12921. (bridge->subordinate->busn_res.end >=
  12922. tp->pdev->bus->number)) {
  12923. tg3_flag_set(tp, 5701_DMA_BUG);
  12924. pci_dev_put(bridge);
  12925. break;
  12926. }
  12927. }
  12928. }
  12929. /* The EPB bridge inside 5714, 5715, and 5780 cannot support
  12930. * DMA addresses > 40-bit. This bridge may have other additional
  12931. * 57xx devices behind it in some 4-port NIC designs for example.
  12932. * Any tg3 device found behind the bridge will also need the 40-bit
  12933. * DMA workaround.
  12934. */
  12935. if (tg3_flag(tp, 5780_CLASS)) {
  12936. tg3_flag_set(tp, 40BIT_DMA_BUG);
  12937. tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
  12938. } else {
  12939. struct pci_dev *bridge = NULL;
  12940. do {
  12941. bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
  12942. PCI_DEVICE_ID_SERVERWORKS_EPB,
  12943. bridge);
  12944. if (bridge && bridge->subordinate &&
  12945. (bridge->subordinate->number <=
  12946. tp->pdev->bus->number) &&
  12947. (bridge->subordinate->busn_res.end >=
  12948. tp->pdev->bus->number)) {
  12949. tg3_flag_set(tp, 40BIT_DMA_BUG);
  12950. pci_dev_put(bridge);
  12951. break;
  12952. }
  12953. } while (bridge);
  12954. }
  12955. if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
  12956. tg3_asic_rev(tp) == ASIC_REV_5714)
  12957. tp->pdev_peer = tg3_find_peer(tp);
  12958. /* Determine TSO capabilities */
  12959. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
  12960. ; /* Do nothing. HW bug. */
  12961. else if (tg3_flag(tp, 57765_PLUS))
  12962. tg3_flag_set(tp, HW_TSO_3);
  12963. else if (tg3_flag(tp, 5755_PLUS) ||
  12964. tg3_asic_rev(tp) == ASIC_REV_5906)
  12965. tg3_flag_set(tp, HW_TSO_2);
  12966. else if (tg3_flag(tp, 5750_PLUS)) {
  12967. tg3_flag_set(tp, HW_TSO_1);
  12968. tg3_flag_set(tp, TSO_BUG);
  12969. if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
  12970. tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
  12971. tg3_flag_clear(tp, TSO_BUG);
  12972. } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
  12973. tg3_asic_rev(tp) != ASIC_REV_5701 &&
  12974. tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
  12975. tg3_flag_set(tp, FW_TSO);
  12976. tg3_flag_set(tp, TSO_BUG);
  12977. if (tg3_asic_rev(tp) == ASIC_REV_5705)
  12978. tp->fw_needed = FIRMWARE_TG3TSO5;
  12979. else
  12980. tp->fw_needed = FIRMWARE_TG3TSO;
  12981. }
  12982. /* Selectively allow TSO based on operating conditions */
  12983. if (tg3_flag(tp, HW_TSO_1) ||
  12984. tg3_flag(tp, HW_TSO_2) ||
  12985. tg3_flag(tp, HW_TSO_3) ||
  12986. tg3_flag(tp, FW_TSO)) {
  12987. /* For firmware TSO, assume ASF is disabled.
  12988. * We'll disable TSO later if we discover ASF
  12989. * is enabled in tg3_get_eeprom_hw_cfg().
  12990. */
  12991. tg3_flag_set(tp, TSO_CAPABLE);
  12992. } else {
  12993. tg3_flag_clear(tp, TSO_CAPABLE);
  12994. tg3_flag_clear(tp, TSO_BUG);
  12995. tp->fw_needed = NULL;
  12996. }
  12997. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
  12998. tp->fw_needed = FIRMWARE_TG3;
  12999. if (tg3_asic_rev(tp) == ASIC_REV_57766)
  13000. tp->fw_needed = FIRMWARE_TG357766;
  13001. tp->irq_max = 1;
  13002. if (tg3_flag(tp, 5750_PLUS)) {
  13003. tg3_flag_set(tp, SUPPORT_MSI);
  13004. if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
  13005. tg3_chip_rev(tp) == CHIPREV_5750_BX ||
  13006. (tg3_asic_rev(tp) == ASIC_REV_5714 &&
  13007. tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
  13008. tp->pdev_peer == tp->pdev))
  13009. tg3_flag_clear(tp, SUPPORT_MSI);
  13010. if (tg3_flag(tp, 5755_PLUS) ||
  13011. tg3_asic_rev(tp) == ASIC_REV_5906) {
  13012. tg3_flag_set(tp, 1SHOT_MSI);
  13013. }
  13014. if (tg3_flag(tp, 57765_PLUS)) {
  13015. tg3_flag_set(tp, SUPPORT_MSIX);
  13016. tp->irq_max = TG3_IRQ_MAX_VECS;
  13017. }
  13018. }
  13019. tp->txq_max = 1;
  13020. tp->rxq_max = 1;
  13021. if (tp->irq_max > 1) {
  13022. tp->rxq_max = TG3_RSS_MAX_NUM_QS;
  13023. tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
  13024. if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
  13025. tg3_asic_rev(tp) == ASIC_REV_5720)
  13026. tp->txq_max = tp->irq_max - 1;
  13027. }
  13028. if (tg3_flag(tp, 5755_PLUS) ||
  13029. tg3_asic_rev(tp) == ASIC_REV_5906)
  13030. tg3_flag_set(tp, SHORT_DMA_BUG);
  13031. if (tg3_asic_rev(tp) == ASIC_REV_5719)
  13032. tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
  13033. if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
  13034. tg3_asic_rev(tp) == ASIC_REV_5719 ||
  13035. tg3_asic_rev(tp) == ASIC_REV_5720 ||
  13036. tg3_asic_rev(tp) == ASIC_REV_5762)
  13037. tg3_flag_set(tp, LRG_PROD_RING_CAP);
  13038. if (tg3_flag(tp, 57765_PLUS) &&
  13039. tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
  13040. tg3_flag_set(tp, USE_JUMBO_BDFLAG);
  13041. if (!tg3_flag(tp, 5705_PLUS) ||
  13042. tg3_flag(tp, 5780_CLASS) ||
  13043. tg3_flag(tp, USE_JUMBO_BDFLAG))
  13044. tg3_flag_set(tp, JUMBO_CAPABLE);
  13045. pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
  13046. &pci_state_reg);
  13047. if (pci_is_pcie(tp->pdev)) {
  13048. u16 lnkctl;
  13049. tg3_flag_set(tp, PCI_EXPRESS);
  13050. pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
  13051. if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
  13052. if (tg3_asic_rev(tp) == ASIC_REV_5906) {
  13053. tg3_flag_clear(tp, HW_TSO_2);
  13054. tg3_flag_clear(tp, TSO_CAPABLE);
  13055. }
  13056. if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
  13057. tg3_asic_rev(tp) == ASIC_REV_5761 ||
  13058. tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
  13059. tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
  13060. tg3_flag_set(tp, CLKREQ_BUG);
  13061. } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
  13062. tg3_flag_set(tp, L1PLLPD_EN);
  13063. }
  13064. } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
  13065. /* BCM5785 devices are effectively PCIe devices, and should
  13066. * follow PCIe codepaths, but do not have a PCIe capabilities
  13067. * section.
  13068. */
  13069. tg3_flag_set(tp, PCI_EXPRESS);
  13070. } else if (!tg3_flag(tp, 5705_PLUS) ||
  13071. tg3_flag(tp, 5780_CLASS)) {
  13072. tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
  13073. if (!tp->pcix_cap) {
  13074. dev_err(&tp->pdev->dev,
  13075. "Cannot find PCI-X capability, aborting\n");
  13076. return -EIO;
  13077. }
  13078. if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
  13079. tg3_flag_set(tp, PCIX_MODE);
  13080. }
  13081. /* If we have an AMD 762 or VIA K8T800 chipset, write
  13082. * reordering to the mailbox registers done by the host
  13083. * controller can cause major troubles. We read back from
  13084. * every mailbox register write to force the writes to be
  13085. * posted to the chip in order.
  13086. */
  13087. if (pci_dev_present(tg3_write_reorder_chipsets) &&
  13088. !tg3_flag(tp, PCI_EXPRESS))
  13089. tg3_flag_set(tp, MBOX_WRITE_REORDER);
  13090. pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
  13091. &tp->pci_cacheline_sz);
  13092. pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  13093. &tp->pci_lat_timer);
  13094. if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
  13095. tp->pci_lat_timer < 64) {
  13096. tp->pci_lat_timer = 64;
  13097. pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  13098. tp->pci_lat_timer);
  13099. }
  13100. /* Important! -- It is critical that the PCI-X hw workaround
  13101. * situation is decided before the first MMIO register access.
  13102. */
  13103. if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
  13104. /* 5700 BX chips need to have their TX producer index
  13105. * mailboxes written twice to workaround a bug.
  13106. */
  13107. tg3_flag_set(tp, TXD_MBOX_HWBUG);
  13108. /* If we are in PCI-X mode, enable register write workaround.
  13109. *
  13110. * The workaround is to use indirect register accesses
  13111. * for all chip writes not to mailbox registers.
  13112. */
  13113. if (tg3_flag(tp, PCIX_MODE)) {
  13114. u32 pm_reg;
  13115. tg3_flag_set(tp, PCIX_TARGET_HWBUG);
  13116. /* The chip can have it's power management PCI config
  13117. * space registers clobbered due to this bug.
  13118. * So explicitly force the chip into D0 here.
  13119. */
  13120. pci_read_config_dword(tp->pdev,
  13121. tp->pm_cap + PCI_PM_CTRL,
  13122. &pm_reg);
  13123. pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
  13124. pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
  13125. pci_write_config_dword(tp->pdev,
  13126. tp->pm_cap + PCI_PM_CTRL,
  13127. pm_reg);
  13128. /* Also, force SERR#/PERR# in PCI command. */
  13129. pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
  13130. pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
  13131. pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
  13132. }
  13133. }
  13134. if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
  13135. tg3_flag_set(tp, PCI_HIGH_SPEED);
  13136. if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
  13137. tg3_flag_set(tp, PCI_32BIT);
  13138. /* Chip-specific fixup from Broadcom driver */
  13139. if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
  13140. (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
  13141. pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
  13142. pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
  13143. }
  13144. /* Default fast path register access methods */
  13145. tp->read32 = tg3_read32;
  13146. tp->write32 = tg3_write32;
  13147. tp->read32_mbox = tg3_read32;
  13148. tp->write32_mbox = tg3_write32;
  13149. tp->write32_tx_mbox = tg3_write32;
  13150. tp->write32_rx_mbox = tg3_write32;
  13151. /* Various workaround register access methods */
  13152. if (tg3_flag(tp, PCIX_TARGET_HWBUG))
  13153. tp->write32 = tg3_write_indirect_reg32;
  13154. else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
  13155. (tg3_flag(tp, PCI_EXPRESS) &&
  13156. tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
  13157. /*
  13158. * Back to back register writes can cause problems on these
  13159. * chips, the workaround is to read back all reg writes
  13160. * except those to mailbox regs.
  13161. *
  13162. * See tg3_write_indirect_reg32().
  13163. */
  13164. tp->write32 = tg3_write_flush_reg32;
  13165. }
  13166. if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
  13167. tp->write32_tx_mbox = tg3_write32_tx_mbox;
  13168. if (tg3_flag(tp, MBOX_WRITE_REORDER))
  13169. tp->write32_rx_mbox = tg3_write_flush_reg32;
  13170. }
  13171. if (tg3_flag(tp, ICH_WORKAROUND)) {
  13172. tp->read32 = tg3_read_indirect_reg32;
  13173. tp->write32 = tg3_write_indirect_reg32;
  13174. tp->read32_mbox = tg3_read_indirect_mbox;
  13175. tp->write32_mbox = tg3_write_indirect_mbox;
  13176. tp->write32_tx_mbox = tg3_write_indirect_mbox;
  13177. tp->write32_rx_mbox = tg3_write_indirect_mbox;
  13178. iounmap(tp->regs);
  13179. tp->regs = NULL;
  13180. pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
  13181. pci_cmd &= ~PCI_COMMAND_MEMORY;
  13182. pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
  13183. }
  13184. if (tg3_asic_rev(tp) == ASIC_REV_5906) {
  13185. tp->read32_mbox = tg3_read32_mbox_5906;
  13186. tp->write32_mbox = tg3_write32_mbox_5906;
  13187. tp->write32_tx_mbox = tg3_write32_mbox_5906;
  13188. tp->write32_rx_mbox = tg3_write32_mbox_5906;
  13189. }
  13190. if (tp->write32 == tg3_write_indirect_reg32 ||
  13191. (tg3_flag(tp, PCIX_MODE) &&
  13192. (tg3_asic_rev(tp) == ASIC_REV_5700 ||
  13193. tg3_asic_rev(tp) == ASIC_REV_5701)))
  13194. tg3_flag_set(tp, SRAM_USE_CONFIG);
  13195. /* The memory arbiter has to be enabled in order for SRAM accesses
  13196. * to succeed. Normally on powerup the tg3 chip firmware will make
  13197. * sure it is enabled, but other entities such as system netboot
  13198. * code might disable it.
  13199. */
  13200. val = tr32(MEMARB_MODE);
  13201. tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
  13202. tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
  13203. if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
  13204. tg3_flag(tp, 5780_CLASS)) {
  13205. if (tg3_flag(tp, PCIX_MODE)) {
  13206. pci_read_config_dword(tp->pdev,
  13207. tp->pcix_cap + PCI_X_STATUS,
  13208. &val);
  13209. tp->pci_fn = val & 0x7;
  13210. }
  13211. } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
  13212. tg3_asic_rev(tp) == ASIC_REV_5719 ||
  13213. tg3_asic_rev(tp) == ASIC_REV_5720) {
  13214. tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
  13215. if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
  13216. val = tr32(TG3_CPMU_STATUS);
  13217. if (tg3_asic_rev(tp) == ASIC_REV_5717)
  13218. tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
  13219. else
  13220. tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
  13221. TG3_CPMU_STATUS_FSHFT_5719;
  13222. }
  13223. if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
  13224. tp->write32_tx_mbox = tg3_write_flush_reg32;
  13225. tp->write32_rx_mbox = tg3_write_flush_reg32;
  13226. }
  13227. /* Get eeprom hw config before calling tg3_set_power_state().
  13228. * In particular, the TG3_FLAG_IS_NIC flag must be
  13229. * determined before calling tg3_set_power_state() so that
  13230. * we know whether or not to switch out of Vaux power.
  13231. * When the flag is set, it means that GPIO1 is used for eeprom
  13232. * write protect and also implies that it is a LOM where GPIOs
  13233. * are not used to switch power.
  13234. */
  13235. tg3_get_eeprom_hw_cfg(tp);
  13236. if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
  13237. tg3_flag_clear(tp, TSO_CAPABLE);
  13238. tg3_flag_clear(tp, TSO_BUG);
  13239. tp->fw_needed = NULL;
  13240. }
  13241. if (tg3_flag(tp, ENABLE_APE)) {
  13242. /* Allow reads and writes to the
  13243. * APE register and memory space.
  13244. */
  13245. pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
  13246. PCISTATE_ALLOW_APE_SHMEM_WR |
  13247. PCISTATE_ALLOW_APE_PSPACE_WR;
  13248. pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
  13249. pci_state_reg);
  13250. tg3_ape_lock_init(tp);
  13251. }
  13252. /* Set up tp->grc_local_ctrl before calling
  13253. * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
  13254. * will bring 5700's external PHY out of reset.
  13255. * It is also used as eeprom write protect on LOMs.
  13256. */
  13257. tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
  13258. if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
  13259. tg3_flag(tp, EEPROM_WRITE_PROT))
  13260. tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
  13261. GRC_LCLCTRL_GPIO_OUTPUT1);
  13262. /* Unused GPIO3 must be driven as output on 5752 because there
  13263. * are no pull-up resistors on unused GPIO pins.
  13264. */
  13265. else if (tg3_asic_rev(tp) == ASIC_REV_5752)
  13266. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
  13267. if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
  13268. tg3_asic_rev(tp) == ASIC_REV_57780 ||
  13269. tg3_flag(tp, 57765_CLASS))
  13270. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
  13271. if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
  13272. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
  13273. /* Turn off the debug UART. */
  13274. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
  13275. if (tg3_flag(tp, IS_NIC))
  13276. /* Keep VMain power. */
  13277. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
  13278. GRC_LCLCTRL_GPIO_OUTPUT0;
  13279. }
  13280. if (tg3_asic_rev(tp) == ASIC_REV_5762)
  13281. tp->grc_local_ctrl |=
  13282. tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
  13283. /* Switch out of Vaux if it is a NIC */
  13284. tg3_pwrsrc_switch_to_vmain(tp);
  13285. /* Derive initial jumbo mode from MTU assigned in
  13286. * ether_setup() via the alloc_etherdev() call
  13287. */
  13288. if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
  13289. tg3_flag_set(tp, JUMBO_RING_ENABLE);
  13290. /* Determine WakeOnLan speed to use. */
  13291. if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
  13292. tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
  13293. tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
  13294. tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
  13295. tg3_flag_clear(tp, WOL_SPEED_100MB);
  13296. } else {
  13297. tg3_flag_set(tp, WOL_SPEED_100MB);
  13298. }
  13299. if (tg3_asic_rev(tp) == ASIC_REV_5906)
  13300. tp->phy_flags |= TG3_PHYFLG_IS_FET;
  13301. /* A few boards don't want Ethernet@WireSpeed phy feature */
  13302. if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
  13303. (tg3_asic_rev(tp) == ASIC_REV_5705 &&
  13304. (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
  13305. (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
  13306. (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
  13307. (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
  13308. tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
  13309. if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
  13310. tg3_chip_rev(tp) == CHIPREV_5704_AX)
  13311. tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
  13312. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
  13313. tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
  13314. if (tg3_flag(tp, 5705_PLUS) &&
  13315. !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
  13316. tg3_asic_rev(tp) != ASIC_REV_5785 &&
  13317. tg3_asic_rev(tp) != ASIC_REV_57780 &&
  13318. !tg3_flag(tp, 57765_PLUS)) {
  13319. if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
  13320. tg3_asic_rev(tp) == ASIC_REV_5787 ||
  13321. tg3_asic_rev(tp) == ASIC_REV_5784 ||
  13322. tg3_asic_rev(tp) == ASIC_REV_5761) {
  13323. if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
  13324. tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
  13325. tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
  13326. if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
  13327. tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
  13328. } else
  13329. tp->phy_flags |= TG3_PHYFLG_BER_BUG;
  13330. }
  13331. if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
  13332. tg3_chip_rev(tp) != CHIPREV_5784_AX) {
  13333. tp->phy_otp = tg3_read_otp_phycfg(tp);
  13334. if (tp->phy_otp == 0)
  13335. tp->phy_otp = TG3_OTP_DEFAULT;
  13336. }
  13337. if (tg3_flag(tp, CPMU_PRESENT))
  13338. tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
  13339. else
  13340. tp->mi_mode = MAC_MI_MODE_BASE;
  13341. tp->coalesce_mode = 0;
  13342. if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
  13343. tg3_chip_rev(tp) != CHIPREV_5700_BX)
  13344. tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
  13345. /* Set these bits to enable statistics workaround. */
  13346. if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
  13347. tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
  13348. tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
  13349. tp->coalesce_mode |= HOSTCC_MODE_ATTN;
  13350. tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
  13351. }
  13352. if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
  13353. tg3_asic_rev(tp) == ASIC_REV_57780)
  13354. tg3_flag_set(tp, USE_PHYLIB);
  13355. err = tg3_mdio_init(tp);
  13356. if (err)
  13357. return err;
  13358. /* Initialize data/descriptor byte/word swapping. */
  13359. val = tr32(GRC_MODE);
  13360. if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
  13361. tg3_asic_rev(tp) == ASIC_REV_5762)
  13362. val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
  13363. GRC_MODE_WORD_SWAP_B2HRX_DATA |
  13364. GRC_MODE_B2HRX_ENABLE |
  13365. GRC_MODE_HTX2B_ENABLE |
  13366. GRC_MODE_HOST_STACKUP);
  13367. else
  13368. val &= GRC_MODE_HOST_STACKUP;
  13369. tw32(GRC_MODE, val | tp->grc_mode);
  13370. tg3_switch_clocks(tp);
  13371. /* Clear this out for sanity. */
  13372. tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  13373. pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
  13374. &pci_state_reg);
  13375. if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
  13376. !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
  13377. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
  13378. tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
  13379. tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
  13380. tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
  13381. void __iomem *sram_base;
  13382. /* Write some dummy words into the SRAM status block
  13383. * area, see if it reads back correctly. If the return
  13384. * value is bad, force enable the PCIX workaround.
  13385. */
  13386. sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
  13387. writel(0x00000000, sram_base);
  13388. writel(0x00000000, sram_base + 4);
  13389. writel(0xffffffff, sram_base + 4);
  13390. if (readl(sram_base) != 0x00000000)
  13391. tg3_flag_set(tp, PCIX_TARGET_HWBUG);
  13392. }
  13393. }
  13394. udelay(50);
  13395. tg3_nvram_init(tp);
  13396. /* If the device has an NVRAM, no need to load patch firmware */
  13397. if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
  13398. !tg3_flag(tp, NO_NVRAM))
  13399. tp->fw_needed = NULL;
  13400. grc_misc_cfg = tr32(GRC_MISC_CFG);
  13401. grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
  13402. if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
  13403. (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
  13404. grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
  13405. tg3_flag_set(tp, IS_5788);
  13406. if (!tg3_flag(tp, IS_5788) &&
  13407. tg3_asic_rev(tp) != ASIC_REV_5700)
  13408. tg3_flag_set(tp, TAGGED_STATUS);
  13409. if (tg3_flag(tp, TAGGED_STATUS)) {
  13410. tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
  13411. HOSTCC_MODE_CLRTICK_TXBD);
  13412. tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
  13413. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  13414. tp->misc_host_ctrl);
  13415. }
  13416. /* Preserve the APE MAC_MODE bits */
  13417. if (tg3_flag(tp, ENABLE_APE))
  13418. tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
  13419. else
  13420. tp->mac_mode = 0;
  13421. if (tg3_10_100_only_device(tp, ent))
  13422. tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
  13423. err = tg3_phy_probe(tp);
  13424. if (err) {
  13425. dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
  13426. /* ... but do not return immediately ... */
  13427. tg3_mdio_fini(tp);
  13428. }
  13429. tg3_read_vpd(tp);
  13430. tg3_read_fw_ver(tp);
  13431. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  13432. tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
  13433. } else {
  13434. if (tg3_asic_rev(tp) == ASIC_REV_5700)
  13435. tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
  13436. else
  13437. tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
  13438. }
  13439. /* 5700 {AX,BX} chips have a broken status block link
  13440. * change bit implementation, so we must use the
  13441. * status register in those cases.
  13442. */
  13443. if (tg3_asic_rev(tp) == ASIC_REV_5700)
  13444. tg3_flag_set(tp, USE_LINKCHG_REG);
  13445. else
  13446. tg3_flag_clear(tp, USE_LINKCHG_REG);
  13447. /* The led_ctrl is set during tg3_phy_probe, here we might
  13448. * have to force the link status polling mechanism based
  13449. * upon subsystem IDs.
  13450. */
  13451. if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
  13452. tg3_asic_rev(tp) == ASIC_REV_5701 &&
  13453. !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
  13454. tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
  13455. tg3_flag_set(tp, USE_LINKCHG_REG);
  13456. }
  13457. /* For all SERDES we poll the MAC status register. */
  13458. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
  13459. tg3_flag_set(tp, POLL_SERDES);
  13460. else
  13461. tg3_flag_clear(tp, POLL_SERDES);
  13462. tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
  13463. tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
  13464. if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
  13465. tg3_flag(tp, PCIX_MODE)) {
  13466. tp->rx_offset = NET_SKB_PAD;
  13467. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  13468. tp->rx_copy_thresh = ~(u16)0;
  13469. #endif
  13470. }
  13471. tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
  13472. tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
  13473. tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
  13474. tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
  13475. /* Increment the rx prod index on the rx std ring by at most
  13476. * 8 for these chips to workaround hw errata.
  13477. */
  13478. if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
  13479. tg3_asic_rev(tp) == ASIC_REV_5752 ||
  13480. tg3_asic_rev(tp) == ASIC_REV_5755)
  13481. tp->rx_std_max_post = 8;
  13482. if (tg3_flag(tp, ASPM_WORKAROUND))
  13483. tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
  13484. PCIE_PWR_MGMT_L1_THRESH_MSK;
  13485. return err;
  13486. }
  13487. #ifdef CONFIG_SPARC
  13488. static int tg3_get_macaddr_sparc(struct tg3 *tp)
  13489. {
  13490. struct net_device *dev = tp->dev;
  13491. struct pci_dev *pdev = tp->pdev;
  13492. struct device_node *dp = pci_device_to_OF_node(pdev);
  13493. const unsigned char *addr;
  13494. int len;
  13495. addr = of_get_property(dp, "local-mac-address", &len);
  13496. if (addr && len == 6) {
  13497. memcpy(dev->dev_addr, addr, 6);
  13498. return 0;
  13499. }
  13500. return -ENODEV;
  13501. }
  13502. static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
  13503. {
  13504. struct net_device *dev = tp->dev;
  13505. memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
  13506. return 0;
  13507. }
  13508. #endif
  13509. static int tg3_get_device_address(struct tg3 *tp)
  13510. {
  13511. struct net_device *dev = tp->dev;
  13512. u32 hi, lo, mac_offset;
  13513. int addr_ok = 0;
  13514. int err;
  13515. #ifdef CONFIG_SPARC
  13516. if (!tg3_get_macaddr_sparc(tp))
  13517. return 0;
  13518. #endif
  13519. if (tg3_flag(tp, IS_SSB_CORE)) {
  13520. err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
  13521. if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
  13522. return 0;
  13523. }
  13524. mac_offset = 0x7c;
  13525. if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
  13526. tg3_flag(tp, 5780_CLASS)) {
  13527. if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
  13528. mac_offset = 0xcc;
  13529. if (tg3_nvram_lock(tp))
  13530. tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
  13531. else
  13532. tg3_nvram_unlock(tp);
  13533. } else if (tg3_flag(tp, 5717_PLUS)) {
  13534. if (tp->pci_fn & 1)
  13535. mac_offset = 0xcc;
  13536. if (tp->pci_fn > 1)
  13537. mac_offset += 0x18c;
  13538. } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
  13539. mac_offset = 0x10;
  13540. /* First try to get it from MAC address mailbox. */
  13541. tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
  13542. if ((hi >> 16) == 0x484b) {
  13543. dev->dev_addr[0] = (hi >> 8) & 0xff;
  13544. dev->dev_addr[1] = (hi >> 0) & 0xff;
  13545. tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
  13546. dev->dev_addr[2] = (lo >> 24) & 0xff;
  13547. dev->dev_addr[3] = (lo >> 16) & 0xff;
  13548. dev->dev_addr[4] = (lo >> 8) & 0xff;
  13549. dev->dev_addr[5] = (lo >> 0) & 0xff;
  13550. /* Some old bootcode may report a 0 MAC address in SRAM */
  13551. addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
  13552. }
  13553. if (!addr_ok) {
  13554. /* Next, try NVRAM. */
  13555. if (!tg3_flag(tp, NO_NVRAM) &&
  13556. !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
  13557. !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
  13558. memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
  13559. memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
  13560. }
  13561. /* Finally just fetch it out of the MAC control regs. */
  13562. else {
  13563. hi = tr32(MAC_ADDR_0_HIGH);
  13564. lo = tr32(MAC_ADDR_0_LOW);
  13565. dev->dev_addr[5] = lo & 0xff;
  13566. dev->dev_addr[4] = (lo >> 8) & 0xff;
  13567. dev->dev_addr[3] = (lo >> 16) & 0xff;
  13568. dev->dev_addr[2] = (lo >> 24) & 0xff;
  13569. dev->dev_addr[1] = hi & 0xff;
  13570. dev->dev_addr[0] = (hi >> 8) & 0xff;
  13571. }
  13572. }
  13573. if (!is_valid_ether_addr(&dev->dev_addr[0])) {
  13574. #ifdef CONFIG_SPARC
  13575. if (!tg3_get_default_macaddr_sparc(tp))
  13576. return 0;
  13577. #endif
  13578. return -EINVAL;
  13579. }
  13580. return 0;
  13581. }
  13582. #define BOUNDARY_SINGLE_CACHELINE 1
  13583. #define BOUNDARY_MULTI_CACHELINE 2
  13584. static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
  13585. {
  13586. int cacheline_size;
  13587. u8 byte;
  13588. int goal;
  13589. pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
  13590. if (byte == 0)
  13591. cacheline_size = 1024;
  13592. else
  13593. cacheline_size = (int) byte * 4;
  13594. /* On 5703 and later chips, the boundary bits have no
  13595. * effect.
  13596. */
  13597. if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
  13598. tg3_asic_rev(tp) != ASIC_REV_5701 &&
  13599. !tg3_flag(tp, PCI_EXPRESS))
  13600. goto out;
  13601. #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
  13602. goal = BOUNDARY_MULTI_CACHELINE;
  13603. #else
  13604. #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
  13605. goal = BOUNDARY_SINGLE_CACHELINE;
  13606. #else
  13607. goal = 0;
  13608. #endif
  13609. #endif
  13610. if (tg3_flag(tp, 57765_PLUS)) {
  13611. val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
  13612. goto out;
  13613. }
  13614. if (!goal)
  13615. goto out;
  13616. /* PCI controllers on most RISC systems tend to disconnect
  13617. * when a device tries to burst across a cache-line boundary.
  13618. * Therefore, letting tg3 do so just wastes PCI bandwidth.
  13619. *
  13620. * Unfortunately, for PCI-E there are only limited
  13621. * write-side controls for this, and thus for reads
  13622. * we will still get the disconnects. We'll also waste
  13623. * these PCI cycles for both read and write for chips
  13624. * other than 5700 and 5701 which do not implement the
  13625. * boundary bits.
  13626. */
  13627. if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
  13628. switch (cacheline_size) {
  13629. case 16:
  13630. case 32:
  13631. case 64:
  13632. case 128:
  13633. if (goal == BOUNDARY_SINGLE_CACHELINE) {
  13634. val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
  13635. DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
  13636. } else {
  13637. val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
  13638. DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
  13639. }
  13640. break;
  13641. case 256:
  13642. val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
  13643. DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
  13644. break;
  13645. default:
  13646. val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
  13647. DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
  13648. break;
  13649. }
  13650. } else if (tg3_flag(tp, PCI_EXPRESS)) {
  13651. switch (cacheline_size) {
  13652. case 16:
  13653. case 32:
  13654. case 64:
  13655. if (goal == BOUNDARY_SINGLE_CACHELINE) {
  13656. val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
  13657. val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
  13658. break;
  13659. }
  13660. /* fallthrough */
  13661. case 128:
  13662. default:
  13663. val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
  13664. val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
  13665. break;
  13666. }
  13667. } else {
  13668. switch (cacheline_size) {
  13669. case 16:
  13670. if (goal == BOUNDARY_SINGLE_CACHELINE) {
  13671. val |= (DMA_RWCTRL_READ_BNDRY_16 |
  13672. DMA_RWCTRL_WRITE_BNDRY_16);
  13673. break;
  13674. }
  13675. /* fallthrough */
  13676. case 32:
  13677. if (goal == BOUNDARY_SINGLE_CACHELINE) {
  13678. val |= (DMA_RWCTRL_READ_BNDRY_32 |
  13679. DMA_RWCTRL_WRITE_BNDRY_32);
  13680. break;
  13681. }
  13682. /* fallthrough */
  13683. case 64:
  13684. if (goal == BOUNDARY_SINGLE_CACHELINE) {
  13685. val |= (DMA_RWCTRL_READ_BNDRY_64 |
  13686. DMA_RWCTRL_WRITE_BNDRY_64);
  13687. break;
  13688. }
  13689. /* fallthrough */
  13690. case 128:
  13691. if (goal == BOUNDARY_SINGLE_CACHELINE) {
  13692. val |= (DMA_RWCTRL_READ_BNDRY_128 |
  13693. DMA_RWCTRL_WRITE_BNDRY_128);
  13694. break;
  13695. }
  13696. /* fallthrough */
  13697. case 256:
  13698. val |= (DMA_RWCTRL_READ_BNDRY_256 |
  13699. DMA_RWCTRL_WRITE_BNDRY_256);
  13700. break;
  13701. case 512:
  13702. val |= (DMA_RWCTRL_READ_BNDRY_512 |
  13703. DMA_RWCTRL_WRITE_BNDRY_512);
  13704. break;
  13705. case 1024:
  13706. default:
  13707. val |= (DMA_RWCTRL_READ_BNDRY_1024 |
  13708. DMA_RWCTRL_WRITE_BNDRY_1024);
  13709. break;
  13710. }
  13711. }
  13712. out:
  13713. return val;
  13714. }
  13715. static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
  13716. int size, int to_device)
  13717. {
  13718. struct tg3_internal_buffer_desc test_desc;
  13719. u32 sram_dma_descs;
  13720. int i, ret;
  13721. sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
  13722. tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
  13723. tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
  13724. tw32(RDMAC_STATUS, 0);
  13725. tw32(WDMAC_STATUS, 0);
  13726. tw32(BUFMGR_MODE, 0);
  13727. tw32(FTQ_RESET, 0);
  13728. test_desc.addr_hi = ((u64) buf_dma) >> 32;
  13729. test_desc.addr_lo = buf_dma & 0xffffffff;
  13730. test_desc.nic_mbuf = 0x00002100;
  13731. test_desc.len = size;
  13732. /*
  13733. * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
  13734. * the *second* time the tg3 driver was getting loaded after an
  13735. * initial scan.
  13736. *
  13737. * Broadcom tells me:
  13738. * ...the DMA engine is connected to the GRC block and a DMA
  13739. * reset may affect the GRC block in some unpredictable way...
  13740. * The behavior of resets to individual blocks has not been tested.
  13741. *
  13742. * Broadcom noted the GRC reset will also reset all sub-components.
  13743. */
  13744. if (to_device) {
  13745. test_desc.cqid_sqid = (13 << 8) | 2;
  13746. tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
  13747. udelay(40);
  13748. } else {
  13749. test_desc.cqid_sqid = (16 << 8) | 7;
  13750. tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
  13751. udelay(40);
  13752. }
  13753. test_desc.flags = 0x00000005;
  13754. for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
  13755. u32 val;
  13756. val = *(((u32 *)&test_desc) + i);
  13757. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
  13758. sram_dma_descs + (i * sizeof(u32)));
  13759. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  13760. }
  13761. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  13762. if (to_device)
  13763. tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
  13764. else
  13765. tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
  13766. ret = -ENODEV;
  13767. for (i = 0; i < 40; i++) {
  13768. u32 val;
  13769. if (to_device)
  13770. val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
  13771. else
  13772. val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
  13773. if ((val & 0xffff) == sram_dma_descs) {
  13774. ret = 0;
  13775. break;
  13776. }
  13777. udelay(100);
  13778. }
  13779. return ret;
  13780. }
  13781. #define TEST_BUFFER_SIZE 0x2000
  13782. static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
  13783. { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
  13784. { },
  13785. };
  13786. static int tg3_test_dma(struct tg3 *tp)
  13787. {
  13788. dma_addr_t buf_dma;
  13789. u32 *buf, saved_dma_rwctrl;
  13790. int ret = 0;
  13791. buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
  13792. &buf_dma, GFP_KERNEL);
  13793. if (!buf) {
  13794. ret = -ENOMEM;
  13795. goto out_nofree;
  13796. }
  13797. tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
  13798. (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
  13799. tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
  13800. if (tg3_flag(tp, 57765_PLUS))
  13801. goto out;
  13802. if (tg3_flag(tp, PCI_EXPRESS)) {
  13803. /* DMA read watermark not used on PCIE */
  13804. tp->dma_rwctrl |= 0x00180000;
  13805. } else if (!tg3_flag(tp, PCIX_MODE)) {
  13806. if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
  13807. tg3_asic_rev(tp) == ASIC_REV_5750)
  13808. tp->dma_rwctrl |= 0x003f0000;
  13809. else
  13810. tp->dma_rwctrl |= 0x003f000f;
  13811. } else {
  13812. if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
  13813. tg3_asic_rev(tp) == ASIC_REV_5704) {
  13814. u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
  13815. u32 read_water = 0x7;
  13816. /* If the 5704 is behind the EPB bridge, we can
  13817. * do the less restrictive ONE_DMA workaround for
  13818. * better performance.
  13819. */
  13820. if (tg3_flag(tp, 40BIT_DMA_BUG) &&
  13821. tg3_asic_rev(tp) == ASIC_REV_5704)
  13822. tp->dma_rwctrl |= 0x8000;
  13823. else if (ccval == 0x6 || ccval == 0x7)
  13824. tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
  13825. if (tg3_asic_rev(tp) == ASIC_REV_5703)
  13826. read_water = 4;
  13827. /* Set bit 23 to enable PCIX hw bug fix */
  13828. tp->dma_rwctrl |=
  13829. (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
  13830. (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
  13831. (1 << 23);
  13832. } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
  13833. /* 5780 always in PCIX mode */
  13834. tp->dma_rwctrl |= 0x00144000;
  13835. } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
  13836. /* 5714 always in PCIX mode */
  13837. tp->dma_rwctrl |= 0x00148000;
  13838. } else {
  13839. tp->dma_rwctrl |= 0x001b000f;
  13840. }
  13841. }
  13842. if (tg3_flag(tp, ONE_DMA_AT_ONCE))
  13843. tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
  13844. if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
  13845. tg3_asic_rev(tp) == ASIC_REV_5704)
  13846. tp->dma_rwctrl &= 0xfffffff0;
  13847. if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
  13848. tg3_asic_rev(tp) == ASIC_REV_5701) {
  13849. /* Remove this if it causes problems for some boards. */
  13850. tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
  13851. /* On 5700/5701 chips, we need to set this bit.
  13852. * Otherwise the chip will issue cacheline transactions
  13853. * to streamable DMA memory with not all the byte
  13854. * enables turned on. This is an error on several
  13855. * RISC PCI controllers, in particular sparc64.
  13856. *
  13857. * On 5703/5704 chips, this bit has been reassigned
  13858. * a different meaning. In particular, it is used
  13859. * on those chips to enable a PCI-X workaround.
  13860. */
  13861. tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
  13862. }
  13863. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  13864. #if 0
  13865. /* Unneeded, already done by tg3_get_invariants. */
  13866. tg3_switch_clocks(tp);
  13867. #endif
  13868. if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
  13869. tg3_asic_rev(tp) != ASIC_REV_5701)
  13870. goto out;
  13871. /* It is best to perform DMA test with maximum write burst size
  13872. * to expose the 5700/5701 write DMA bug.
  13873. */
  13874. saved_dma_rwctrl = tp->dma_rwctrl;
  13875. tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
  13876. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  13877. while (1) {
  13878. u32 *p = buf, i;
  13879. for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
  13880. p[i] = i;
  13881. /* Send the buffer to the chip. */
  13882. ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
  13883. if (ret) {
  13884. dev_err(&tp->pdev->dev,
  13885. "%s: Buffer write failed. err = %d\n",
  13886. __func__, ret);
  13887. break;
  13888. }
  13889. #if 0
  13890. /* validate data reached card RAM correctly. */
  13891. for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
  13892. u32 val;
  13893. tg3_read_mem(tp, 0x2100 + (i*4), &val);
  13894. if (le32_to_cpu(val) != p[i]) {
  13895. dev_err(&tp->pdev->dev,
  13896. "%s: Buffer corrupted on device! "
  13897. "(%d != %d)\n", __func__, val, i);
  13898. /* ret = -ENODEV here? */
  13899. }
  13900. p[i] = 0;
  13901. }
  13902. #endif
  13903. /* Now read it back. */
  13904. ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
  13905. if (ret) {
  13906. dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
  13907. "err = %d\n", __func__, ret);
  13908. break;
  13909. }
  13910. /* Verify it. */
  13911. for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
  13912. if (p[i] == i)
  13913. continue;
  13914. if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
  13915. DMA_RWCTRL_WRITE_BNDRY_16) {
  13916. tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
  13917. tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
  13918. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  13919. break;
  13920. } else {
  13921. dev_err(&tp->pdev->dev,
  13922. "%s: Buffer corrupted on read back! "
  13923. "(%d != %d)\n", __func__, p[i], i);
  13924. ret = -ENODEV;
  13925. goto out;
  13926. }
  13927. }
  13928. if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
  13929. /* Success. */
  13930. ret = 0;
  13931. break;
  13932. }
  13933. }
  13934. if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
  13935. DMA_RWCTRL_WRITE_BNDRY_16) {
  13936. /* DMA test passed without adjusting DMA boundary,
  13937. * now look for chipsets that are known to expose the
  13938. * DMA bug without failing the test.
  13939. */
  13940. if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
  13941. tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
  13942. tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
  13943. } else {
  13944. /* Safe to use the calculated DMA boundary. */
  13945. tp->dma_rwctrl = saved_dma_rwctrl;
  13946. }
  13947. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  13948. }
  13949. out:
  13950. dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
  13951. out_nofree:
  13952. return ret;
  13953. }
  13954. static void tg3_init_bufmgr_config(struct tg3 *tp)
  13955. {
  13956. if (tg3_flag(tp, 57765_PLUS)) {
  13957. tp->bufmgr_config.mbuf_read_dma_low_water =
  13958. DEFAULT_MB_RDMA_LOW_WATER_5705;
  13959. tp->bufmgr_config.mbuf_mac_rx_low_water =
  13960. DEFAULT_MB_MACRX_LOW_WATER_57765;
  13961. tp->bufmgr_config.mbuf_high_water =
  13962. DEFAULT_MB_HIGH_WATER_57765;
  13963. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
  13964. DEFAULT_MB_RDMA_LOW_WATER_5705;
  13965. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
  13966. DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
  13967. tp->bufmgr_config.mbuf_high_water_jumbo =
  13968. DEFAULT_MB_HIGH_WATER_JUMBO_57765;
  13969. } else if (tg3_flag(tp, 5705_PLUS)) {
  13970. tp->bufmgr_config.mbuf_read_dma_low_water =
  13971. DEFAULT_MB_RDMA_LOW_WATER_5705;
  13972. tp->bufmgr_config.mbuf_mac_rx_low_water =
  13973. DEFAULT_MB_MACRX_LOW_WATER_5705;
  13974. tp->bufmgr_config.mbuf_high_water =
  13975. DEFAULT_MB_HIGH_WATER_5705;
  13976. if (tg3_asic_rev(tp) == ASIC_REV_5906) {
  13977. tp->bufmgr_config.mbuf_mac_rx_low_water =
  13978. DEFAULT_MB_MACRX_LOW_WATER_5906;
  13979. tp->bufmgr_config.mbuf_high_water =
  13980. DEFAULT_MB_HIGH_WATER_5906;
  13981. }
  13982. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
  13983. DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
  13984. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
  13985. DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
  13986. tp->bufmgr_config.mbuf_high_water_jumbo =
  13987. DEFAULT_MB_HIGH_WATER_JUMBO_5780;
  13988. } else {
  13989. tp->bufmgr_config.mbuf_read_dma_low_water =
  13990. DEFAULT_MB_RDMA_LOW_WATER;
  13991. tp->bufmgr_config.mbuf_mac_rx_low_water =
  13992. DEFAULT_MB_MACRX_LOW_WATER;
  13993. tp->bufmgr_config.mbuf_high_water =
  13994. DEFAULT_MB_HIGH_WATER;
  13995. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
  13996. DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
  13997. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
  13998. DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
  13999. tp->bufmgr_config.mbuf_high_water_jumbo =
  14000. DEFAULT_MB_HIGH_WATER_JUMBO;
  14001. }
  14002. tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
  14003. tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
  14004. }
  14005. static char *tg3_phy_string(struct tg3 *tp)
  14006. {
  14007. switch (tp->phy_id & TG3_PHY_ID_MASK) {
  14008. case TG3_PHY_ID_BCM5400: return "5400";
  14009. case TG3_PHY_ID_BCM5401: return "5401";
  14010. case TG3_PHY_ID_BCM5411: return "5411";
  14011. case TG3_PHY_ID_BCM5701: return "5701";
  14012. case TG3_PHY_ID_BCM5703: return "5703";
  14013. case TG3_PHY_ID_BCM5704: return "5704";
  14014. case TG3_PHY_ID_BCM5705: return "5705";
  14015. case TG3_PHY_ID_BCM5750: return "5750";
  14016. case TG3_PHY_ID_BCM5752: return "5752";
  14017. case TG3_PHY_ID_BCM5714: return "5714";
  14018. case TG3_PHY_ID_BCM5780: return "5780";
  14019. case TG3_PHY_ID_BCM5755: return "5755";
  14020. case TG3_PHY_ID_BCM5787: return "5787";
  14021. case TG3_PHY_ID_BCM5784: return "5784";
  14022. case TG3_PHY_ID_BCM5756: return "5722/5756";
  14023. case TG3_PHY_ID_BCM5906: return "5906";
  14024. case TG3_PHY_ID_BCM5761: return "5761";
  14025. case TG3_PHY_ID_BCM5718C: return "5718C";
  14026. case TG3_PHY_ID_BCM5718S: return "5718S";
  14027. case TG3_PHY_ID_BCM57765: return "57765";
  14028. case TG3_PHY_ID_BCM5719C: return "5719C";
  14029. case TG3_PHY_ID_BCM5720C: return "5720C";
  14030. case TG3_PHY_ID_BCM5762: return "5762C";
  14031. case TG3_PHY_ID_BCM8002: return "8002/serdes";
  14032. case 0: return "serdes";
  14033. default: return "unknown";
  14034. }
  14035. }
  14036. static char *tg3_bus_string(struct tg3 *tp, char *str)
  14037. {
  14038. if (tg3_flag(tp, PCI_EXPRESS)) {
  14039. strcpy(str, "PCI Express");
  14040. return str;
  14041. } else if (tg3_flag(tp, PCIX_MODE)) {
  14042. u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
  14043. strcpy(str, "PCIX:");
  14044. if ((clock_ctrl == 7) ||
  14045. ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
  14046. GRC_MISC_CFG_BOARD_ID_5704CIOBE))
  14047. strcat(str, "133MHz");
  14048. else if (clock_ctrl == 0)
  14049. strcat(str, "33MHz");
  14050. else if (clock_ctrl == 2)
  14051. strcat(str, "50MHz");
  14052. else if (clock_ctrl == 4)
  14053. strcat(str, "66MHz");
  14054. else if (clock_ctrl == 6)
  14055. strcat(str, "100MHz");
  14056. } else {
  14057. strcpy(str, "PCI:");
  14058. if (tg3_flag(tp, PCI_HIGH_SPEED))
  14059. strcat(str, "66MHz");
  14060. else
  14061. strcat(str, "33MHz");
  14062. }
  14063. if (tg3_flag(tp, PCI_32BIT))
  14064. strcat(str, ":32-bit");
  14065. else
  14066. strcat(str, ":64-bit");
  14067. return str;
  14068. }
  14069. static void tg3_init_coal(struct tg3 *tp)
  14070. {
  14071. struct ethtool_coalesce *ec = &tp->coal;
  14072. memset(ec, 0, sizeof(*ec));
  14073. ec->cmd = ETHTOOL_GCOALESCE;
  14074. ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
  14075. ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
  14076. ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
  14077. ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
  14078. ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
  14079. ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
  14080. ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
  14081. ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
  14082. ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
  14083. if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
  14084. HOSTCC_MODE_CLRTICK_TXBD)) {
  14085. ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
  14086. ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
  14087. ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
  14088. ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
  14089. }
  14090. if (tg3_flag(tp, 5705_PLUS)) {
  14091. ec->rx_coalesce_usecs_irq = 0;
  14092. ec->tx_coalesce_usecs_irq = 0;
  14093. ec->stats_block_coalesce_usecs = 0;
  14094. }
  14095. }
  14096. static int tg3_init_one(struct pci_dev *pdev,
  14097. const struct pci_device_id *ent)
  14098. {
  14099. struct net_device *dev;
  14100. struct tg3 *tp;
  14101. int i, err, pm_cap;
  14102. u32 sndmbx, rcvmbx, intmbx;
  14103. char str[40];
  14104. u64 dma_mask, persist_dma_mask;
  14105. netdev_features_t features = 0;
  14106. printk_once(KERN_INFO "%s\n", version);
  14107. err = pci_enable_device(pdev);
  14108. if (err) {
  14109. dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
  14110. return err;
  14111. }
  14112. err = pci_request_regions(pdev, DRV_MODULE_NAME);
  14113. if (err) {
  14114. dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
  14115. goto err_out_disable_pdev;
  14116. }
  14117. pci_set_master(pdev);
  14118. /* Find power-management capability. */
  14119. pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
  14120. if (pm_cap == 0) {
  14121. dev_err(&pdev->dev,
  14122. "Cannot find Power Management capability, aborting\n");
  14123. err = -EIO;
  14124. goto err_out_free_res;
  14125. }
  14126. err = pci_set_power_state(pdev, PCI_D0);
  14127. if (err) {
  14128. dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
  14129. goto err_out_free_res;
  14130. }
  14131. dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
  14132. if (!dev) {
  14133. err = -ENOMEM;
  14134. goto err_out_power_down;
  14135. }
  14136. SET_NETDEV_DEV(dev, &pdev->dev);
  14137. tp = netdev_priv(dev);
  14138. tp->pdev = pdev;
  14139. tp->dev = dev;
  14140. tp->pm_cap = pm_cap;
  14141. tp->rx_mode = TG3_DEF_RX_MODE;
  14142. tp->tx_mode = TG3_DEF_TX_MODE;
  14143. tp->irq_sync = 1;
  14144. if (tg3_debug > 0)
  14145. tp->msg_enable = tg3_debug;
  14146. else
  14147. tp->msg_enable = TG3_DEF_MSG_ENABLE;
  14148. if (pdev_is_ssb_gige_core(pdev)) {
  14149. tg3_flag_set(tp, IS_SSB_CORE);
  14150. if (ssb_gige_must_flush_posted_writes(pdev))
  14151. tg3_flag_set(tp, FLUSH_POSTED_WRITES);
  14152. if (ssb_gige_one_dma_at_once(pdev))
  14153. tg3_flag_set(tp, ONE_DMA_AT_ONCE);
  14154. if (ssb_gige_have_roboswitch(pdev))
  14155. tg3_flag_set(tp, ROBOSWITCH);
  14156. if (ssb_gige_is_rgmii(pdev))
  14157. tg3_flag_set(tp, RGMII_MODE);
  14158. }
  14159. /* The word/byte swap controls here control register access byte
  14160. * swapping. DMA data byte swapping is controlled in the GRC_MODE
  14161. * setting below.
  14162. */
  14163. tp->misc_host_ctrl =
  14164. MISC_HOST_CTRL_MASK_PCI_INT |
  14165. MISC_HOST_CTRL_WORD_SWAP |
  14166. MISC_HOST_CTRL_INDIR_ACCESS |
  14167. MISC_HOST_CTRL_PCISTATE_RW;
  14168. /* The NONFRM (non-frame) byte/word swap controls take effect
  14169. * on descriptor entries, anything which isn't packet data.
  14170. *
  14171. * The StrongARM chips on the board (one for tx, one for rx)
  14172. * are running in big-endian mode.
  14173. */
  14174. tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
  14175. GRC_MODE_WSWAP_NONFRM_DATA);
  14176. #ifdef __BIG_ENDIAN
  14177. tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
  14178. #endif
  14179. spin_lock_init(&tp->lock);
  14180. spin_lock_init(&tp->indirect_lock);
  14181. INIT_WORK(&tp->reset_task, tg3_reset_task);
  14182. tp->regs = pci_ioremap_bar(pdev, BAR_0);
  14183. if (!tp->regs) {
  14184. dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
  14185. err = -ENOMEM;
  14186. goto err_out_free_dev;
  14187. }
  14188. if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
  14189. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
  14190. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
  14191. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
  14192. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
  14193. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
  14194. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
  14195. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
  14196. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
  14197. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
  14198. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
  14199. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
  14200. tg3_flag_set(tp, ENABLE_APE);
  14201. tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
  14202. if (!tp->aperegs) {
  14203. dev_err(&pdev->dev,
  14204. "Cannot map APE registers, aborting\n");
  14205. err = -ENOMEM;
  14206. goto err_out_iounmap;
  14207. }
  14208. }
  14209. tp->rx_pending = TG3_DEF_RX_RING_PENDING;
  14210. tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
  14211. dev->ethtool_ops = &tg3_ethtool_ops;
  14212. dev->watchdog_timeo = TG3_TX_TIMEOUT;
  14213. dev->netdev_ops = &tg3_netdev_ops;
  14214. dev->irq = pdev->irq;
  14215. err = tg3_get_invariants(tp, ent);
  14216. if (err) {
  14217. dev_err(&pdev->dev,
  14218. "Problem fetching invariants of chip, aborting\n");
  14219. goto err_out_apeunmap;
  14220. }
  14221. /* The EPB bridge inside 5714, 5715, and 5780 and any
  14222. * device behind the EPB cannot support DMA addresses > 40-bit.
  14223. * On 64-bit systems with IOMMU, use 40-bit dma_mask.
  14224. * On 64-bit systems without IOMMU, use 64-bit dma_mask and
  14225. * do DMA address check in tg3_start_xmit().
  14226. */
  14227. if (tg3_flag(tp, IS_5788))
  14228. persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
  14229. else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
  14230. persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
  14231. #ifdef CONFIG_HIGHMEM
  14232. dma_mask = DMA_BIT_MASK(64);
  14233. #endif
  14234. } else
  14235. persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
  14236. /* Configure DMA attributes. */
  14237. if (dma_mask > DMA_BIT_MASK(32)) {
  14238. err = pci_set_dma_mask(pdev, dma_mask);
  14239. if (!err) {
  14240. features |= NETIF_F_HIGHDMA;
  14241. err = pci_set_consistent_dma_mask(pdev,
  14242. persist_dma_mask);
  14243. if (err < 0) {
  14244. dev_err(&pdev->dev, "Unable to obtain 64 bit "
  14245. "DMA for consistent allocations\n");
  14246. goto err_out_apeunmap;
  14247. }
  14248. }
  14249. }
  14250. if (err || dma_mask == DMA_BIT_MASK(32)) {
  14251. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  14252. if (err) {
  14253. dev_err(&pdev->dev,
  14254. "No usable DMA configuration, aborting\n");
  14255. goto err_out_apeunmap;
  14256. }
  14257. }
  14258. tg3_init_bufmgr_config(tp);
  14259. features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  14260. /* 5700 B0 chips do not support checksumming correctly due
  14261. * to hardware bugs.
  14262. */
  14263. if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
  14264. features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
  14265. if (tg3_flag(tp, 5755_PLUS))
  14266. features |= NETIF_F_IPV6_CSUM;
  14267. }
  14268. /* TSO is on by default on chips that support hardware TSO.
  14269. * Firmware TSO on older chips gives lower performance, so it
  14270. * is off by default, but can be enabled using ethtool.
  14271. */
  14272. if ((tg3_flag(tp, HW_TSO_1) ||
  14273. tg3_flag(tp, HW_TSO_2) ||
  14274. tg3_flag(tp, HW_TSO_3)) &&
  14275. (features & NETIF_F_IP_CSUM))
  14276. features |= NETIF_F_TSO;
  14277. if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
  14278. if (features & NETIF_F_IPV6_CSUM)
  14279. features |= NETIF_F_TSO6;
  14280. if (tg3_flag(tp, HW_TSO_3) ||
  14281. tg3_asic_rev(tp) == ASIC_REV_5761 ||
  14282. (tg3_asic_rev(tp) == ASIC_REV_5784 &&
  14283. tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
  14284. tg3_asic_rev(tp) == ASIC_REV_5785 ||
  14285. tg3_asic_rev(tp) == ASIC_REV_57780)
  14286. features |= NETIF_F_TSO_ECN;
  14287. }
  14288. dev->features |= features;
  14289. dev->vlan_features |= features;
  14290. /*
  14291. * Add loopback capability only for a subset of devices that support
  14292. * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
  14293. * loopback for the remaining devices.
  14294. */
  14295. if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
  14296. !tg3_flag(tp, CPMU_PRESENT))
  14297. /* Add the loopback capability */
  14298. features |= NETIF_F_LOOPBACK;
  14299. dev->hw_features |= features;
  14300. if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
  14301. !tg3_flag(tp, TSO_CAPABLE) &&
  14302. !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
  14303. tg3_flag_set(tp, MAX_RXPEND_64);
  14304. tp->rx_pending = 63;
  14305. }
  14306. err = tg3_get_device_address(tp);
  14307. if (err) {
  14308. dev_err(&pdev->dev,
  14309. "Could not obtain valid ethernet address, aborting\n");
  14310. goto err_out_apeunmap;
  14311. }
  14312. /*
  14313. * Reset chip in case UNDI or EFI driver did not shutdown
  14314. * DMA self test will enable WDMAC and we'll see (spurious)
  14315. * pending DMA on the PCI bus at that point.
  14316. */
  14317. if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
  14318. (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
  14319. tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
  14320. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  14321. }
  14322. err = tg3_test_dma(tp);
  14323. if (err) {
  14324. dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
  14325. goto err_out_apeunmap;
  14326. }
  14327. intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
  14328. rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
  14329. sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
  14330. for (i = 0; i < tp->irq_max; i++) {
  14331. struct tg3_napi *tnapi = &tp->napi[i];
  14332. tnapi->tp = tp;
  14333. tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
  14334. tnapi->int_mbox = intmbx;
  14335. if (i <= 4)
  14336. intmbx += 0x8;
  14337. else
  14338. intmbx += 0x4;
  14339. tnapi->consmbox = rcvmbx;
  14340. tnapi->prodmbox = sndmbx;
  14341. if (i)
  14342. tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
  14343. else
  14344. tnapi->coal_now = HOSTCC_MODE_NOW;
  14345. if (!tg3_flag(tp, SUPPORT_MSIX))
  14346. break;
  14347. /*
  14348. * If we support MSIX, we'll be using RSS. If we're using
  14349. * RSS, the first vector only handles link interrupts and the
  14350. * remaining vectors handle rx and tx interrupts. Reuse the
  14351. * mailbox values for the next iteration. The values we setup
  14352. * above are still useful for the single vectored mode.
  14353. */
  14354. if (!i)
  14355. continue;
  14356. rcvmbx += 0x8;
  14357. if (sndmbx & 0x4)
  14358. sndmbx -= 0x4;
  14359. else
  14360. sndmbx += 0xc;
  14361. }
  14362. tg3_init_coal(tp);
  14363. pci_set_drvdata(pdev, dev);
  14364. if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
  14365. tg3_asic_rev(tp) == ASIC_REV_5720 ||
  14366. tg3_asic_rev(tp) == ASIC_REV_5762)
  14367. tg3_flag_set(tp, PTP_CAPABLE);
  14368. if (tg3_flag(tp, 5717_PLUS)) {
  14369. /* Resume a low-power mode */
  14370. tg3_frob_aux_power(tp, false);
  14371. }
  14372. tg3_timer_init(tp);
  14373. tg3_carrier_off(tp);
  14374. err = register_netdev(dev);
  14375. if (err) {
  14376. dev_err(&pdev->dev, "Cannot register net device, aborting\n");
  14377. goto err_out_apeunmap;
  14378. }
  14379. netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
  14380. tp->board_part_number,
  14381. tg3_chip_rev_id(tp),
  14382. tg3_bus_string(tp, str),
  14383. dev->dev_addr);
  14384. if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
  14385. struct phy_device *phydev;
  14386. phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
  14387. netdev_info(dev,
  14388. "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
  14389. phydev->drv->name, dev_name(&phydev->dev));
  14390. } else {
  14391. char *ethtype;
  14392. if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
  14393. ethtype = "10/100Base-TX";
  14394. else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
  14395. ethtype = "1000Base-SX";
  14396. else
  14397. ethtype = "10/100/1000Base-T";
  14398. netdev_info(dev, "attached PHY is %s (%s Ethernet) "
  14399. "(WireSpeed[%d], EEE[%d])\n",
  14400. tg3_phy_string(tp), ethtype,
  14401. (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
  14402. (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
  14403. }
  14404. netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
  14405. (dev->features & NETIF_F_RXCSUM) != 0,
  14406. tg3_flag(tp, USE_LINKCHG_REG) != 0,
  14407. (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
  14408. tg3_flag(tp, ENABLE_ASF) != 0,
  14409. tg3_flag(tp, TSO_CAPABLE) != 0);
  14410. netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
  14411. tp->dma_rwctrl,
  14412. pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
  14413. ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
  14414. pci_save_state(pdev);
  14415. return 0;
  14416. err_out_apeunmap:
  14417. if (tp->aperegs) {
  14418. iounmap(tp->aperegs);
  14419. tp->aperegs = NULL;
  14420. }
  14421. err_out_iounmap:
  14422. if (tp->regs) {
  14423. iounmap(tp->regs);
  14424. tp->regs = NULL;
  14425. }
  14426. err_out_free_dev:
  14427. free_netdev(dev);
  14428. err_out_power_down:
  14429. pci_set_power_state(pdev, PCI_D3hot);
  14430. err_out_free_res:
  14431. pci_release_regions(pdev);
  14432. err_out_disable_pdev:
  14433. pci_disable_device(pdev);
  14434. pci_set_drvdata(pdev, NULL);
  14435. return err;
  14436. }
  14437. static void tg3_remove_one(struct pci_dev *pdev)
  14438. {
  14439. struct net_device *dev = pci_get_drvdata(pdev);
  14440. if (dev) {
  14441. struct tg3 *tp = netdev_priv(dev);
  14442. release_firmware(tp->fw);
  14443. tg3_reset_task_cancel(tp);
  14444. if (tg3_flag(tp, USE_PHYLIB)) {
  14445. tg3_phy_fini(tp);
  14446. tg3_mdio_fini(tp);
  14447. }
  14448. unregister_netdev(dev);
  14449. if (tp->aperegs) {
  14450. iounmap(tp->aperegs);
  14451. tp->aperegs = NULL;
  14452. }
  14453. if (tp->regs) {
  14454. iounmap(tp->regs);
  14455. tp->regs = NULL;
  14456. }
  14457. free_netdev(dev);
  14458. pci_release_regions(pdev);
  14459. pci_disable_device(pdev);
  14460. pci_set_drvdata(pdev, NULL);
  14461. }
  14462. }
  14463. #ifdef CONFIG_PM_SLEEP
  14464. static int tg3_suspend(struct device *device)
  14465. {
  14466. struct pci_dev *pdev = to_pci_dev(device);
  14467. struct net_device *dev = pci_get_drvdata(pdev);
  14468. struct tg3 *tp = netdev_priv(dev);
  14469. int err;
  14470. if (!netif_running(dev))
  14471. return 0;
  14472. tg3_reset_task_cancel(tp);
  14473. tg3_phy_stop(tp);
  14474. tg3_netif_stop(tp);
  14475. tg3_timer_stop(tp);
  14476. tg3_full_lock(tp, 1);
  14477. tg3_disable_ints(tp);
  14478. tg3_full_unlock(tp);
  14479. netif_device_detach(dev);
  14480. tg3_full_lock(tp, 0);
  14481. tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
  14482. tg3_flag_clear(tp, INIT_COMPLETE);
  14483. tg3_full_unlock(tp);
  14484. err = tg3_power_down_prepare(tp);
  14485. if (err) {
  14486. int err2;
  14487. tg3_full_lock(tp, 0);
  14488. tg3_flag_set(tp, INIT_COMPLETE);
  14489. err2 = tg3_restart_hw(tp, 1);
  14490. if (err2)
  14491. goto out;
  14492. tg3_timer_start(tp);
  14493. netif_device_attach(dev);
  14494. tg3_netif_start(tp);
  14495. out:
  14496. tg3_full_unlock(tp);
  14497. if (!err2)
  14498. tg3_phy_start(tp);
  14499. }
  14500. return err;
  14501. }
  14502. static int tg3_resume(struct device *device)
  14503. {
  14504. struct pci_dev *pdev = to_pci_dev(device);
  14505. struct net_device *dev = pci_get_drvdata(pdev);
  14506. struct tg3 *tp = netdev_priv(dev);
  14507. int err;
  14508. if (!netif_running(dev))
  14509. return 0;
  14510. netif_device_attach(dev);
  14511. tg3_full_lock(tp, 0);
  14512. tg3_flag_set(tp, INIT_COMPLETE);
  14513. err = tg3_restart_hw(tp,
  14514. !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
  14515. if (err)
  14516. goto out;
  14517. tg3_timer_start(tp);
  14518. tg3_netif_start(tp);
  14519. out:
  14520. tg3_full_unlock(tp);
  14521. if (!err)
  14522. tg3_phy_start(tp);
  14523. return err;
  14524. }
  14525. static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
  14526. #define TG3_PM_OPS (&tg3_pm_ops)
  14527. #else
  14528. #define TG3_PM_OPS NULL
  14529. #endif /* CONFIG_PM_SLEEP */
  14530. /**
  14531. * tg3_io_error_detected - called when PCI error is detected
  14532. * @pdev: Pointer to PCI device
  14533. * @state: The current pci connection state
  14534. *
  14535. * This function is called after a PCI bus error affecting
  14536. * this device has been detected.
  14537. */
  14538. static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
  14539. pci_channel_state_t state)
  14540. {
  14541. struct net_device *netdev = pci_get_drvdata(pdev);
  14542. struct tg3 *tp = netdev_priv(netdev);
  14543. pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
  14544. netdev_info(netdev, "PCI I/O error detected\n");
  14545. rtnl_lock();
  14546. if (!netif_running(netdev))
  14547. goto done;
  14548. tg3_phy_stop(tp);
  14549. tg3_netif_stop(tp);
  14550. tg3_timer_stop(tp);
  14551. /* Want to make sure that the reset task doesn't run */
  14552. tg3_reset_task_cancel(tp);
  14553. netif_device_detach(netdev);
  14554. /* Clean up software state, even if MMIO is blocked */
  14555. tg3_full_lock(tp, 0);
  14556. tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
  14557. tg3_full_unlock(tp);
  14558. done:
  14559. if (state == pci_channel_io_perm_failure)
  14560. err = PCI_ERS_RESULT_DISCONNECT;
  14561. else
  14562. pci_disable_device(pdev);
  14563. rtnl_unlock();
  14564. return err;
  14565. }
  14566. /**
  14567. * tg3_io_slot_reset - called after the pci bus has been reset.
  14568. * @pdev: Pointer to PCI device
  14569. *
  14570. * Restart the card from scratch, as if from a cold-boot.
  14571. * At this point, the card has exprienced a hard reset,
  14572. * followed by fixups by BIOS, and has its config space
  14573. * set up identically to what it was at cold boot.
  14574. */
  14575. static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
  14576. {
  14577. struct net_device *netdev = pci_get_drvdata(pdev);
  14578. struct tg3 *tp = netdev_priv(netdev);
  14579. pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
  14580. int err;
  14581. rtnl_lock();
  14582. if (pci_enable_device(pdev)) {
  14583. netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
  14584. goto done;
  14585. }
  14586. pci_set_master(pdev);
  14587. pci_restore_state(pdev);
  14588. pci_save_state(pdev);
  14589. if (!netif_running(netdev)) {
  14590. rc = PCI_ERS_RESULT_RECOVERED;
  14591. goto done;
  14592. }
  14593. err = tg3_power_up(tp);
  14594. if (err)
  14595. goto done;
  14596. rc = PCI_ERS_RESULT_RECOVERED;
  14597. done:
  14598. rtnl_unlock();
  14599. return rc;
  14600. }
  14601. /**
  14602. * tg3_io_resume - called when traffic can start flowing again.
  14603. * @pdev: Pointer to PCI device
  14604. *
  14605. * This callback is called when the error recovery driver tells
  14606. * us that its OK to resume normal operation.
  14607. */
  14608. static void tg3_io_resume(struct pci_dev *pdev)
  14609. {
  14610. struct net_device *netdev = pci_get_drvdata(pdev);
  14611. struct tg3 *tp = netdev_priv(netdev);
  14612. int err;
  14613. rtnl_lock();
  14614. if (!netif_running(netdev))
  14615. goto done;
  14616. tg3_full_lock(tp, 0);
  14617. tg3_flag_set(tp, INIT_COMPLETE);
  14618. err = tg3_restart_hw(tp, 1);
  14619. if (err) {
  14620. tg3_full_unlock(tp);
  14621. netdev_err(netdev, "Cannot restart hardware after reset.\n");
  14622. goto done;
  14623. }
  14624. netif_device_attach(netdev);
  14625. tg3_timer_start(tp);
  14626. tg3_netif_start(tp);
  14627. tg3_full_unlock(tp);
  14628. tg3_phy_start(tp);
  14629. done:
  14630. rtnl_unlock();
  14631. }
  14632. static const struct pci_error_handlers tg3_err_handler = {
  14633. .error_detected = tg3_io_error_detected,
  14634. .slot_reset = tg3_io_slot_reset,
  14635. .resume = tg3_io_resume
  14636. };
  14637. static struct pci_driver tg3_driver = {
  14638. .name = DRV_MODULE_NAME,
  14639. .id_table = tg3_pci_tbl,
  14640. .probe = tg3_init_one,
  14641. .remove = tg3_remove_one,
  14642. .err_handler = &tg3_err_handler,
  14643. .driver.pm = TG3_PM_OPS,
  14644. };
  14645. static int __init tg3_init(void)
  14646. {
  14647. return pci_register_driver(&tg3_driver);
  14648. }
  14649. static void __exit tg3_cleanup(void)
  14650. {
  14651. pci_unregister_driver(&tg3_driver);
  14652. }
  14653. module_init(tg3_init);
  14654. module_exit(tg3_cleanup);