bnx2.c 207 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577
  1. /* bnx2.c: Broadcom NX2 network driver.
  2. *
  3. * Copyright (c) 2004-2010 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. *
  9. * Written by: Michael Chan (mchan@broadcom.com)
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/moduleparam.h>
  14. #include <linux/kernel.h>
  15. #include <linux/timer.h>
  16. #include <linux/errno.h>
  17. #include <linux/ioport.h>
  18. #include <linux/slab.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/pci.h>
  22. #include <linux/init.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/etherdevice.h>
  25. #include <linux/skbuff.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/bitops.h>
  28. #include <asm/io.h>
  29. #include <asm/irq.h>
  30. #include <linux/delay.h>
  31. #include <asm/byteorder.h>
  32. #include <asm/page.h>
  33. #include <linux/time.h>
  34. #include <linux/ethtool.h>
  35. #include <linux/mii.h>
  36. #include <linux/if_vlan.h>
  37. #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  38. #define BCM_VLAN 1
  39. #endif
  40. #include <net/ip.h>
  41. #include <net/tcp.h>
  42. #include <net/checksum.h>
  43. #include <linux/workqueue.h>
  44. #include <linux/crc32.h>
  45. #include <linux/prefetch.h>
  46. #include <linux/cache.h>
  47. #include <linux/firmware.h>
  48. #include <linux/log2.h>
  49. #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
  50. #define BCM_CNIC 1
  51. #include "cnic_if.h"
  52. #endif
  53. #include "bnx2.h"
  54. #include "bnx2_fw.h"
  55. #define DRV_MODULE_NAME "bnx2"
  56. #define DRV_MODULE_VERSION "2.0.16"
  57. #define DRV_MODULE_RELDATE "July 2, 2010"
  58. #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
  59. #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
  60. #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j15.fw"
  61. #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
  62. #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
  63. #define RUN_AT(x) (jiffies + (x))
  64. /* Time in jiffies before concluding the transmitter is hung. */
  65. #define TX_TIMEOUT (5*HZ)
  66. static char version[] __devinitdata =
  67. "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  68. MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
  69. MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
  70. MODULE_LICENSE("GPL");
  71. MODULE_VERSION(DRV_MODULE_VERSION);
  72. MODULE_FIRMWARE(FW_MIPS_FILE_06);
  73. MODULE_FIRMWARE(FW_RV2P_FILE_06);
  74. MODULE_FIRMWARE(FW_MIPS_FILE_09);
  75. MODULE_FIRMWARE(FW_RV2P_FILE_09);
  76. MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
  77. static int disable_msi = 0;
  78. module_param(disable_msi, int, 0);
  79. MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  80. typedef enum {
  81. BCM5706 = 0,
  82. NC370T,
  83. NC370I,
  84. BCM5706S,
  85. NC370F,
  86. BCM5708,
  87. BCM5708S,
  88. BCM5709,
  89. BCM5709S,
  90. BCM5716,
  91. BCM5716S,
  92. } board_t;
  93. /* indexed by board_t, above */
  94. static struct {
  95. char *name;
  96. } board_info[] __devinitdata = {
  97. { "Broadcom NetXtreme II BCM5706 1000Base-T" },
  98. { "HP NC370T Multifunction Gigabit Server Adapter" },
  99. { "HP NC370i Multifunction Gigabit Server Adapter" },
  100. { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
  101. { "HP NC370F Multifunction Gigabit Server Adapter" },
  102. { "Broadcom NetXtreme II BCM5708 1000Base-T" },
  103. { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
  104. { "Broadcom NetXtreme II BCM5709 1000Base-T" },
  105. { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
  106. { "Broadcom NetXtreme II BCM5716 1000Base-T" },
  107. { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
  108. };
  109. static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
  110. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  111. PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
  112. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  113. PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
  114. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  115. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
  116. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
  117. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
  118. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
  119. PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
  120. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
  121. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
  122. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
  123. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
  124. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
  125. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
  126. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
  127. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
  128. { PCI_VENDOR_ID_BROADCOM, 0x163b,
  129. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
  130. { PCI_VENDOR_ID_BROADCOM, 0x163c,
  131. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
  132. { 0, }
  133. };
  134. static const struct flash_spec flash_table[] =
  135. {
  136. #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
  137. #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
  138. /* Slow EEPROM */
  139. {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
  140. BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
  141. SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
  142. "EEPROM - slow"},
  143. /* Expansion entry 0001 */
  144. {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
  145. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  146. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  147. "Entry 0001"},
  148. /* Saifun SA25F010 (non-buffered flash) */
  149. /* strap, cfg1, & write1 need updates */
  150. {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
  151. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  152. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
  153. "Non-buffered flash (128kB)"},
  154. /* Saifun SA25F020 (non-buffered flash) */
  155. /* strap, cfg1, & write1 need updates */
  156. {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
  157. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  158. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
  159. "Non-buffered flash (256kB)"},
  160. /* Expansion entry 0100 */
  161. {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
  162. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  163. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  164. "Entry 0100"},
  165. /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
  166. {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
  167. NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
  168. ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
  169. "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
  170. /* Entry 0110: ST M45PE20 (non-buffered flash)*/
  171. {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
  172. NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
  173. ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
  174. "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
  175. /* Saifun SA25F005 (non-buffered flash) */
  176. /* strap, cfg1, & write1 need updates */
  177. {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
  178. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  179. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
  180. "Non-buffered flash (64kB)"},
  181. /* Fast EEPROM */
  182. {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
  183. BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
  184. SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
  185. "EEPROM - fast"},
  186. /* Expansion entry 1001 */
  187. {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
  188. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  189. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  190. "Entry 1001"},
  191. /* Expansion entry 1010 */
  192. {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
  193. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  194. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  195. "Entry 1010"},
  196. /* ATMEL AT45DB011B (buffered flash) */
  197. {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
  198. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  199. BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
  200. "Buffered flash (128kB)"},
  201. /* Expansion entry 1100 */
  202. {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
  203. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  204. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  205. "Entry 1100"},
  206. /* Expansion entry 1101 */
  207. {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
  208. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  209. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  210. "Entry 1101"},
  211. /* Ateml Expansion entry 1110 */
  212. {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
  213. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  214. BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
  215. "Entry 1110 (Atmel)"},
  216. /* ATMEL AT45DB021B (buffered flash) */
  217. {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
  218. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  219. BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
  220. "Buffered flash (256kB)"},
  221. };
  222. static const struct flash_spec flash_5709 = {
  223. .flags = BNX2_NV_BUFFERED,
  224. .page_bits = BCM5709_FLASH_PAGE_BITS,
  225. .page_size = BCM5709_FLASH_PAGE_SIZE,
  226. .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
  227. .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
  228. .name = "5709 Buffered flash (256kB)",
  229. };
  230. MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
  231. static void bnx2_init_napi(struct bnx2 *bp);
  232. static void bnx2_del_napi(struct bnx2 *bp);
  233. static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
  234. {
  235. u32 diff;
  236. smp_mb();
  237. /* The ring uses 256 indices for 255 entries, one of them
  238. * needs to be skipped.
  239. */
  240. diff = txr->tx_prod - txr->tx_cons;
  241. if (unlikely(diff >= TX_DESC_CNT)) {
  242. diff &= 0xffff;
  243. if (diff == TX_DESC_CNT)
  244. diff = MAX_TX_DESC_CNT;
  245. }
  246. return (bp->tx_ring_size - diff);
  247. }
  248. static u32
  249. bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
  250. {
  251. u32 val;
  252. spin_lock_bh(&bp->indirect_lock);
  253. REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
  254. val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
  255. spin_unlock_bh(&bp->indirect_lock);
  256. return val;
  257. }
  258. static void
  259. bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
  260. {
  261. spin_lock_bh(&bp->indirect_lock);
  262. REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
  263. REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
  264. spin_unlock_bh(&bp->indirect_lock);
  265. }
  266. static void
  267. bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
  268. {
  269. bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
  270. }
  271. static u32
  272. bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
  273. {
  274. return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
  275. }
  276. static void
  277. bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
  278. {
  279. offset += cid_addr;
  280. spin_lock_bh(&bp->indirect_lock);
  281. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  282. int i;
  283. REG_WR(bp, BNX2_CTX_CTX_DATA, val);
  284. REG_WR(bp, BNX2_CTX_CTX_CTRL,
  285. offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
  286. for (i = 0; i < 5; i++) {
  287. val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
  288. if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
  289. break;
  290. udelay(5);
  291. }
  292. } else {
  293. REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
  294. REG_WR(bp, BNX2_CTX_DATA, val);
  295. }
  296. spin_unlock_bh(&bp->indirect_lock);
  297. }
  298. #ifdef BCM_CNIC
  299. static int
  300. bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
  301. {
  302. struct bnx2 *bp = netdev_priv(dev);
  303. struct drv_ctl_io *io = &info->data.io;
  304. switch (info->cmd) {
  305. case DRV_CTL_IO_WR_CMD:
  306. bnx2_reg_wr_ind(bp, io->offset, io->data);
  307. break;
  308. case DRV_CTL_IO_RD_CMD:
  309. io->data = bnx2_reg_rd_ind(bp, io->offset);
  310. break;
  311. case DRV_CTL_CTX_WR_CMD:
  312. bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
  313. break;
  314. default:
  315. return -EINVAL;
  316. }
  317. return 0;
  318. }
  319. static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
  320. {
  321. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  322. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  323. int sb_id;
  324. if (bp->flags & BNX2_FLAG_USING_MSIX) {
  325. cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
  326. bnapi->cnic_present = 0;
  327. sb_id = bp->irq_nvecs;
  328. cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
  329. } else {
  330. cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
  331. bnapi->cnic_tag = bnapi->last_status_idx;
  332. bnapi->cnic_present = 1;
  333. sb_id = 0;
  334. cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
  335. }
  336. cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
  337. cp->irq_arr[0].status_blk = (void *)
  338. ((unsigned long) bnapi->status_blk.msi +
  339. (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
  340. cp->irq_arr[0].status_blk_num = sb_id;
  341. cp->num_irq = 1;
  342. }
  343. static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
  344. void *data)
  345. {
  346. struct bnx2 *bp = netdev_priv(dev);
  347. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  348. if (ops == NULL)
  349. return -EINVAL;
  350. if (cp->drv_state & CNIC_DRV_STATE_REGD)
  351. return -EBUSY;
  352. bp->cnic_data = data;
  353. rcu_assign_pointer(bp->cnic_ops, ops);
  354. cp->num_irq = 0;
  355. cp->drv_state = CNIC_DRV_STATE_REGD;
  356. bnx2_setup_cnic_irq_info(bp);
  357. return 0;
  358. }
  359. static int bnx2_unregister_cnic(struct net_device *dev)
  360. {
  361. struct bnx2 *bp = netdev_priv(dev);
  362. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  363. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  364. mutex_lock(&bp->cnic_lock);
  365. cp->drv_state = 0;
  366. bnapi->cnic_present = 0;
  367. rcu_assign_pointer(bp->cnic_ops, NULL);
  368. mutex_unlock(&bp->cnic_lock);
  369. synchronize_rcu();
  370. return 0;
  371. }
  372. struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
  373. {
  374. struct bnx2 *bp = netdev_priv(dev);
  375. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  376. cp->drv_owner = THIS_MODULE;
  377. cp->chip_id = bp->chip_id;
  378. cp->pdev = bp->pdev;
  379. cp->io_base = bp->regview;
  380. cp->drv_ctl = bnx2_drv_ctl;
  381. cp->drv_register_cnic = bnx2_register_cnic;
  382. cp->drv_unregister_cnic = bnx2_unregister_cnic;
  383. return cp;
  384. }
  385. EXPORT_SYMBOL(bnx2_cnic_probe);
  386. static void
  387. bnx2_cnic_stop(struct bnx2 *bp)
  388. {
  389. struct cnic_ops *c_ops;
  390. struct cnic_ctl_info info;
  391. mutex_lock(&bp->cnic_lock);
  392. c_ops = bp->cnic_ops;
  393. if (c_ops) {
  394. info.cmd = CNIC_CTL_STOP_CMD;
  395. c_ops->cnic_ctl(bp->cnic_data, &info);
  396. }
  397. mutex_unlock(&bp->cnic_lock);
  398. }
  399. static void
  400. bnx2_cnic_start(struct bnx2 *bp)
  401. {
  402. struct cnic_ops *c_ops;
  403. struct cnic_ctl_info info;
  404. mutex_lock(&bp->cnic_lock);
  405. c_ops = bp->cnic_ops;
  406. if (c_ops) {
  407. if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
  408. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  409. bnapi->cnic_tag = bnapi->last_status_idx;
  410. }
  411. info.cmd = CNIC_CTL_START_CMD;
  412. c_ops->cnic_ctl(bp->cnic_data, &info);
  413. }
  414. mutex_unlock(&bp->cnic_lock);
  415. }
  416. #else
  417. static void
  418. bnx2_cnic_stop(struct bnx2 *bp)
  419. {
  420. }
  421. static void
  422. bnx2_cnic_start(struct bnx2 *bp)
  423. {
  424. }
  425. #endif
  426. static int
  427. bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
  428. {
  429. u32 val1;
  430. int i, ret;
  431. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  432. val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  433. val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  434. REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  435. REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  436. udelay(40);
  437. }
  438. val1 = (bp->phy_addr << 21) | (reg << 16) |
  439. BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
  440. BNX2_EMAC_MDIO_COMM_START_BUSY;
  441. REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
  442. for (i = 0; i < 50; i++) {
  443. udelay(10);
  444. val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
  445. if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
  446. udelay(5);
  447. val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
  448. val1 &= BNX2_EMAC_MDIO_COMM_DATA;
  449. break;
  450. }
  451. }
  452. if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
  453. *val = 0x0;
  454. ret = -EBUSY;
  455. }
  456. else {
  457. *val = val1;
  458. ret = 0;
  459. }
  460. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  461. val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  462. val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  463. REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  464. REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  465. udelay(40);
  466. }
  467. return ret;
  468. }
  469. static int
  470. bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
  471. {
  472. u32 val1;
  473. int i, ret;
  474. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  475. val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  476. val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  477. REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  478. REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  479. udelay(40);
  480. }
  481. val1 = (bp->phy_addr << 21) | (reg << 16) | val |
  482. BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
  483. BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
  484. REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
  485. for (i = 0; i < 50; i++) {
  486. udelay(10);
  487. val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
  488. if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
  489. udelay(5);
  490. break;
  491. }
  492. }
  493. if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
  494. ret = -EBUSY;
  495. else
  496. ret = 0;
  497. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  498. val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  499. val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  500. REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  501. REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  502. udelay(40);
  503. }
  504. return ret;
  505. }
  506. static void
  507. bnx2_disable_int(struct bnx2 *bp)
  508. {
  509. int i;
  510. struct bnx2_napi *bnapi;
  511. for (i = 0; i < bp->irq_nvecs; i++) {
  512. bnapi = &bp->bnx2_napi[i];
  513. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  514. BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  515. }
  516. REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
  517. }
  518. static void
  519. bnx2_enable_int(struct bnx2 *bp)
  520. {
  521. int i;
  522. struct bnx2_napi *bnapi;
  523. for (i = 0; i < bp->irq_nvecs; i++) {
  524. bnapi = &bp->bnx2_napi[i];
  525. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  526. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  527. BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
  528. bnapi->last_status_idx);
  529. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  530. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  531. bnapi->last_status_idx);
  532. }
  533. REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
  534. }
  535. static void
  536. bnx2_disable_int_sync(struct bnx2 *bp)
  537. {
  538. int i;
  539. atomic_inc(&bp->intr_sem);
  540. if (!netif_running(bp->dev))
  541. return;
  542. bnx2_disable_int(bp);
  543. for (i = 0; i < bp->irq_nvecs; i++)
  544. synchronize_irq(bp->irq_tbl[i].vector);
  545. }
  546. static void
  547. bnx2_napi_disable(struct bnx2 *bp)
  548. {
  549. int i;
  550. for (i = 0; i < bp->irq_nvecs; i++)
  551. napi_disable(&bp->bnx2_napi[i].napi);
  552. }
  553. static void
  554. bnx2_napi_enable(struct bnx2 *bp)
  555. {
  556. int i;
  557. for (i = 0; i < bp->irq_nvecs; i++)
  558. napi_enable(&bp->bnx2_napi[i].napi);
  559. }
  560. static void
  561. bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
  562. {
  563. if (stop_cnic)
  564. bnx2_cnic_stop(bp);
  565. if (netif_running(bp->dev)) {
  566. bnx2_napi_disable(bp);
  567. netif_tx_disable(bp->dev);
  568. }
  569. bnx2_disable_int_sync(bp);
  570. netif_carrier_off(bp->dev); /* prevent tx timeout */
  571. }
  572. static void
  573. bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
  574. {
  575. if (atomic_dec_and_test(&bp->intr_sem)) {
  576. if (netif_running(bp->dev)) {
  577. netif_tx_wake_all_queues(bp->dev);
  578. spin_lock_bh(&bp->phy_lock);
  579. if (bp->link_up)
  580. netif_carrier_on(bp->dev);
  581. spin_unlock_bh(&bp->phy_lock);
  582. bnx2_napi_enable(bp);
  583. bnx2_enable_int(bp);
  584. if (start_cnic)
  585. bnx2_cnic_start(bp);
  586. }
  587. }
  588. }
  589. static void
  590. bnx2_free_tx_mem(struct bnx2 *bp)
  591. {
  592. int i;
  593. for (i = 0; i < bp->num_tx_rings; i++) {
  594. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  595. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  596. if (txr->tx_desc_ring) {
  597. dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
  598. txr->tx_desc_ring,
  599. txr->tx_desc_mapping);
  600. txr->tx_desc_ring = NULL;
  601. }
  602. kfree(txr->tx_buf_ring);
  603. txr->tx_buf_ring = NULL;
  604. }
  605. }
  606. static void
  607. bnx2_free_rx_mem(struct bnx2 *bp)
  608. {
  609. int i;
  610. for (i = 0; i < bp->num_rx_rings; i++) {
  611. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  612. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  613. int j;
  614. for (j = 0; j < bp->rx_max_ring; j++) {
  615. if (rxr->rx_desc_ring[j])
  616. dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
  617. rxr->rx_desc_ring[j],
  618. rxr->rx_desc_mapping[j]);
  619. rxr->rx_desc_ring[j] = NULL;
  620. }
  621. vfree(rxr->rx_buf_ring);
  622. rxr->rx_buf_ring = NULL;
  623. for (j = 0; j < bp->rx_max_pg_ring; j++) {
  624. if (rxr->rx_pg_desc_ring[j])
  625. dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
  626. rxr->rx_pg_desc_ring[j],
  627. rxr->rx_pg_desc_mapping[j]);
  628. rxr->rx_pg_desc_ring[j] = NULL;
  629. }
  630. vfree(rxr->rx_pg_ring);
  631. rxr->rx_pg_ring = NULL;
  632. }
  633. }
  634. static int
  635. bnx2_alloc_tx_mem(struct bnx2 *bp)
  636. {
  637. int i;
  638. for (i = 0; i < bp->num_tx_rings; i++) {
  639. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  640. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  641. txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
  642. if (txr->tx_buf_ring == NULL)
  643. return -ENOMEM;
  644. txr->tx_desc_ring =
  645. dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
  646. &txr->tx_desc_mapping, GFP_KERNEL);
  647. if (txr->tx_desc_ring == NULL)
  648. return -ENOMEM;
  649. }
  650. return 0;
  651. }
  652. static int
  653. bnx2_alloc_rx_mem(struct bnx2 *bp)
  654. {
  655. int i;
  656. for (i = 0; i < bp->num_rx_rings; i++) {
  657. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  658. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  659. int j;
  660. rxr->rx_buf_ring =
  661. vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
  662. if (rxr->rx_buf_ring == NULL)
  663. return -ENOMEM;
  664. memset(rxr->rx_buf_ring, 0,
  665. SW_RXBD_RING_SIZE * bp->rx_max_ring);
  666. for (j = 0; j < bp->rx_max_ring; j++) {
  667. rxr->rx_desc_ring[j] =
  668. dma_alloc_coherent(&bp->pdev->dev,
  669. RXBD_RING_SIZE,
  670. &rxr->rx_desc_mapping[j],
  671. GFP_KERNEL);
  672. if (rxr->rx_desc_ring[j] == NULL)
  673. return -ENOMEM;
  674. }
  675. if (bp->rx_pg_ring_size) {
  676. rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
  677. bp->rx_max_pg_ring);
  678. if (rxr->rx_pg_ring == NULL)
  679. return -ENOMEM;
  680. memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
  681. bp->rx_max_pg_ring);
  682. }
  683. for (j = 0; j < bp->rx_max_pg_ring; j++) {
  684. rxr->rx_pg_desc_ring[j] =
  685. dma_alloc_coherent(&bp->pdev->dev,
  686. RXBD_RING_SIZE,
  687. &rxr->rx_pg_desc_mapping[j],
  688. GFP_KERNEL);
  689. if (rxr->rx_pg_desc_ring[j] == NULL)
  690. return -ENOMEM;
  691. }
  692. }
  693. return 0;
  694. }
  695. static void
  696. bnx2_free_mem(struct bnx2 *bp)
  697. {
  698. int i;
  699. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  700. bnx2_free_tx_mem(bp);
  701. bnx2_free_rx_mem(bp);
  702. for (i = 0; i < bp->ctx_pages; i++) {
  703. if (bp->ctx_blk[i]) {
  704. dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
  705. bp->ctx_blk[i],
  706. bp->ctx_blk_mapping[i]);
  707. bp->ctx_blk[i] = NULL;
  708. }
  709. }
  710. if (bnapi->status_blk.msi) {
  711. dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
  712. bnapi->status_blk.msi,
  713. bp->status_blk_mapping);
  714. bnapi->status_blk.msi = NULL;
  715. bp->stats_blk = NULL;
  716. }
  717. }
  718. static int
  719. bnx2_alloc_mem(struct bnx2 *bp)
  720. {
  721. int i, status_blk_size, err;
  722. struct bnx2_napi *bnapi;
  723. void *status_blk;
  724. /* Combine status and statistics blocks into one allocation. */
  725. status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
  726. if (bp->flags & BNX2_FLAG_MSIX_CAP)
  727. status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
  728. BNX2_SBLK_MSIX_ALIGN_SIZE);
  729. bp->status_stats_size = status_blk_size +
  730. sizeof(struct statistics_block);
  731. status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
  732. &bp->status_blk_mapping, GFP_KERNEL);
  733. if (status_blk == NULL)
  734. goto alloc_mem_err;
  735. memset(status_blk, 0, bp->status_stats_size);
  736. bnapi = &bp->bnx2_napi[0];
  737. bnapi->status_blk.msi = status_blk;
  738. bnapi->hw_tx_cons_ptr =
  739. &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
  740. bnapi->hw_rx_cons_ptr =
  741. &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
  742. if (bp->flags & BNX2_FLAG_MSIX_CAP) {
  743. for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
  744. struct status_block_msix *sblk;
  745. bnapi = &bp->bnx2_napi[i];
  746. sblk = (void *) (status_blk +
  747. BNX2_SBLK_MSIX_ALIGN_SIZE * i);
  748. bnapi->status_blk.msix = sblk;
  749. bnapi->hw_tx_cons_ptr =
  750. &sblk->status_tx_quick_consumer_index;
  751. bnapi->hw_rx_cons_ptr =
  752. &sblk->status_rx_quick_consumer_index;
  753. bnapi->int_num = i << 24;
  754. }
  755. }
  756. bp->stats_blk = status_blk + status_blk_size;
  757. bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
  758. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  759. bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
  760. if (bp->ctx_pages == 0)
  761. bp->ctx_pages = 1;
  762. for (i = 0; i < bp->ctx_pages; i++) {
  763. bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
  764. BCM_PAGE_SIZE,
  765. &bp->ctx_blk_mapping[i],
  766. GFP_KERNEL);
  767. if (bp->ctx_blk[i] == NULL)
  768. goto alloc_mem_err;
  769. }
  770. }
  771. err = bnx2_alloc_rx_mem(bp);
  772. if (err)
  773. goto alloc_mem_err;
  774. err = bnx2_alloc_tx_mem(bp);
  775. if (err)
  776. goto alloc_mem_err;
  777. return 0;
  778. alloc_mem_err:
  779. bnx2_free_mem(bp);
  780. return -ENOMEM;
  781. }
  782. static void
  783. bnx2_report_fw_link(struct bnx2 *bp)
  784. {
  785. u32 fw_link_status = 0;
  786. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  787. return;
  788. if (bp->link_up) {
  789. u32 bmsr;
  790. switch (bp->line_speed) {
  791. case SPEED_10:
  792. if (bp->duplex == DUPLEX_HALF)
  793. fw_link_status = BNX2_LINK_STATUS_10HALF;
  794. else
  795. fw_link_status = BNX2_LINK_STATUS_10FULL;
  796. break;
  797. case SPEED_100:
  798. if (bp->duplex == DUPLEX_HALF)
  799. fw_link_status = BNX2_LINK_STATUS_100HALF;
  800. else
  801. fw_link_status = BNX2_LINK_STATUS_100FULL;
  802. break;
  803. case SPEED_1000:
  804. if (bp->duplex == DUPLEX_HALF)
  805. fw_link_status = BNX2_LINK_STATUS_1000HALF;
  806. else
  807. fw_link_status = BNX2_LINK_STATUS_1000FULL;
  808. break;
  809. case SPEED_2500:
  810. if (bp->duplex == DUPLEX_HALF)
  811. fw_link_status = BNX2_LINK_STATUS_2500HALF;
  812. else
  813. fw_link_status = BNX2_LINK_STATUS_2500FULL;
  814. break;
  815. }
  816. fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
  817. if (bp->autoneg) {
  818. fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
  819. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  820. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  821. if (!(bmsr & BMSR_ANEGCOMPLETE) ||
  822. bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
  823. fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
  824. else
  825. fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
  826. }
  827. }
  828. else
  829. fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
  830. bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
  831. }
  832. static char *
  833. bnx2_xceiver_str(struct bnx2 *bp)
  834. {
  835. return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
  836. ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
  837. "Copper"));
  838. }
  839. static void
  840. bnx2_report_link(struct bnx2 *bp)
  841. {
  842. if (bp->link_up) {
  843. netif_carrier_on(bp->dev);
  844. netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
  845. bnx2_xceiver_str(bp),
  846. bp->line_speed,
  847. bp->duplex == DUPLEX_FULL ? "full" : "half");
  848. if (bp->flow_ctrl) {
  849. if (bp->flow_ctrl & FLOW_CTRL_RX) {
  850. pr_cont(", receive ");
  851. if (bp->flow_ctrl & FLOW_CTRL_TX)
  852. pr_cont("& transmit ");
  853. }
  854. else {
  855. pr_cont(", transmit ");
  856. }
  857. pr_cont("flow control ON");
  858. }
  859. pr_cont("\n");
  860. } else {
  861. netif_carrier_off(bp->dev);
  862. netdev_err(bp->dev, "NIC %s Link is Down\n",
  863. bnx2_xceiver_str(bp));
  864. }
  865. bnx2_report_fw_link(bp);
  866. }
  867. static void
  868. bnx2_resolve_flow_ctrl(struct bnx2 *bp)
  869. {
  870. u32 local_adv, remote_adv;
  871. bp->flow_ctrl = 0;
  872. if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
  873. (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
  874. if (bp->duplex == DUPLEX_FULL) {
  875. bp->flow_ctrl = bp->req_flow_ctrl;
  876. }
  877. return;
  878. }
  879. if (bp->duplex != DUPLEX_FULL) {
  880. return;
  881. }
  882. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  883. (CHIP_NUM(bp) == CHIP_NUM_5708)) {
  884. u32 val;
  885. bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
  886. if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
  887. bp->flow_ctrl |= FLOW_CTRL_TX;
  888. if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
  889. bp->flow_ctrl |= FLOW_CTRL_RX;
  890. return;
  891. }
  892. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  893. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  894. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  895. u32 new_local_adv = 0;
  896. u32 new_remote_adv = 0;
  897. if (local_adv & ADVERTISE_1000XPAUSE)
  898. new_local_adv |= ADVERTISE_PAUSE_CAP;
  899. if (local_adv & ADVERTISE_1000XPSE_ASYM)
  900. new_local_adv |= ADVERTISE_PAUSE_ASYM;
  901. if (remote_adv & ADVERTISE_1000XPAUSE)
  902. new_remote_adv |= ADVERTISE_PAUSE_CAP;
  903. if (remote_adv & ADVERTISE_1000XPSE_ASYM)
  904. new_remote_adv |= ADVERTISE_PAUSE_ASYM;
  905. local_adv = new_local_adv;
  906. remote_adv = new_remote_adv;
  907. }
  908. /* See Table 28B-3 of 802.3ab-1999 spec. */
  909. if (local_adv & ADVERTISE_PAUSE_CAP) {
  910. if(local_adv & ADVERTISE_PAUSE_ASYM) {
  911. if (remote_adv & ADVERTISE_PAUSE_CAP) {
  912. bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
  913. }
  914. else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
  915. bp->flow_ctrl = FLOW_CTRL_RX;
  916. }
  917. }
  918. else {
  919. if (remote_adv & ADVERTISE_PAUSE_CAP) {
  920. bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
  921. }
  922. }
  923. }
  924. else if (local_adv & ADVERTISE_PAUSE_ASYM) {
  925. if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
  926. (remote_adv & ADVERTISE_PAUSE_ASYM)) {
  927. bp->flow_ctrl = FLOW_CTRL_TX;
  928. }
  929. }
  930. }
  931. static int
  932. bnx2_5709s_linkup(struct bnx2 *bp)
  933. {
  934. u32 val, speed;
  935. bp->link_up = 1;
  936. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
  937. bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
  938. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  939. if ((bp->autoneg & AUTONEG_SPEED) == 0) {
  940. bp->line_speed = bp->req_line_speed;
  941. bp->duplex = bp->req_duplex;
  942. return 0;
  943. }
  944. speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
  945. switch (speed) {
  946. case MII_BNX2_GP_TOP_AN_SPEED_10:
  947. bp->line_speed = SPEED_10;
  948. break;
  949. case MII_BNX2_GP_TOP_AN_SPEED_100:
  950. bp->line_speed = SPEED_100;
  951. break;
  952. case MII_BNX2_GP_TOP_AN_SPEED_1G:
  953. case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
  954. bp->line_speed = SPEED_1000;
  955. break;
  956. case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
  957. bp->line_speed = SPEED_2500;
  958. break;
  959. }
  960. if (val & MII_BNX2_GP_TOP_AN_FD)
  961. bp->duplex = DUPLEX_FULL;
  962. else
  963. bp->duplex = DUPLEX_HALF;
  964. return 0;
  965. }
  966. static int
  967. bnx2_5708s_linkup(struct bnx2 *bp)
  968. {
  969. u32 val;
  970. bp->link_up = 1;
  971. bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
  972. switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
  973. case BCM5708S_1000X_STAT1_SPEED_10:
  974. bp->line_speed = SPEED_10;
  975. break;
  976. case BCM5708S_1000X_STAT1_SPEED_100:
  977. bp->line_speed = SPEED_100;
  978. break;
  979. case BCM5708S_1000X_STAT1_SPEED_1G:
  980. bp->line_speed = SPEED_1000;
  981. break;
  982. case BCM5708S_1000X_STAT1_SPEED_2G5:
  983. bp->line_speed = SPEED_2500;
  984. break;
  985. }
  986. if (val & BCM5708S_1000X_STAT1_FD)
  987. bp->duplex = DUPLEX_FULL;
  988. else
  989. bp->duplex = DUPLEX_HALF;
  990. return 0;
  991. }
  992. static int
  993. bnx2_5706s_linkup(struct bnx2 *bp)
  994. {
  995. u32 bmcr, local_adv, remote_adv, common;
  996. bp->link_up = 1;
  997. bp->line_speed = SPEED_1000;
  998. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  999. if (bmcr & BMCR_FULLDPLX) {
  1000. bp->duplex = DUPLEX_FULL;
  1001. }
  1002. else {
  1003. bp->duplex = DUPLEX_HALF;
  1004. }
  1005. if (!(bmcr & BMCR_ANENABLE)) {
  1006. return 0;
  1007. }
  1008. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  1009. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  1010. common = local_adv & remote_adv;
  1011. if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
  1012. if (common & ADVERTISE_1000XFULL) {
  1013. bp->duplex = DUPLEX_FULL;
  1014. }
  1015. else {
  1016. bp->duplex = DUPLEX_HALF;
  1017. }
  1018. }
  1019. return 0;
  1020. }
  1021. static int
  1022. bnx2_copper_linkup(struct bnx2 *bp)
  1023. {
  1024. u32 bmcr;
  1025. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1026. if (bmcr & BMCR_ANENABLE) {
  1027. u32 local_adv, remote_adv, common;
  1028. bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
  1029. bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
  1030. common = local_adv & (remote_adv >> 2);
  1031. if (common & ADVERTISE_1000FULL) {
  1032. bp->line_speed = SPEED_1000;
  1033. bp->duplex = DUPLEX_FULL;
  1034. }
  1035. else if (common & ADVERTISE_1000HALF) {
  1036. bp->line_speed = SPEED_1000;
  1037. bp->duplex = DUPLEX_HALF;
  1038. }
  1039. else {
  1040. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  1041. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  1042. common = local_adv & remote_adv;
  1043. if (common & ADVERTISE_100FULL) {
  1044. bp->line_speed = SPEED_100;
  1045. bp->duplex = DUPLEX_FULL;
  1046. }
  1047. else if (common & ADVERTISE_100HALF) {
  1048. bp->line_speed = SPEED_100;
  1049. bp->duplex = DUPLEX_HALF;
  1050. }
  1051. else if (common & ADVERTISE_10FULL) {
  1052. bp->line_speed = SPEED_10;
  1053. bp->duplex = DUPLEX_FULL;
  1054. }
  1055. else if (common & ADVERTISE_10HALF) {
  1056. bp->line_speed = SPEED_10;
  1057. bp->duplex = DUPLEX_HALF;
  1058. }
  1059. else {
  1060. bp->line_speed = 0;
  1061. bp->link_up = 0;
  1062. }
  1063. }
  1064. }
  1065. else {
  1066. if (bmcr & BMCR_SPEED100) {
  1067. bp->line_speed = SPEED_100;
  1068. }
  1069. else {
  1070. bp->line_speed = SPEED_10;
  1071. }
  1072. if (bmcr & BMCR_FULLDPLX) {
  1073. bp->duplex = DUPLEX_FULL;
  1074. }
  1075. else {
  1076. bp->duplex = DUPLEX_HALF;
  1077. }
  1078. }
  1079. return 0;
  1080. }
  1081. static void
  1082. bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
  1083. {
  1084. u32 val, rx_cid_addr = GET_CID_ADDR(cid);
  1085. val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
  1086. val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
  1087. val |= 0x02 << 8;
  1088. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  1089. u32 lo_water, hi_water;
  1090. if (bp->flow_ctrl & FLOW_CTRL_TX)
  1091. lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
  1092. else
  1093. lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
  1094. if (lo_water >= bp->rx_ring_size)
  1095. lo_water = 0;
  1096. hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
  1097. if (hi_water <= lo_water)
  1098. lo_water = 0;
  1099. hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
  1100. lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
  1101. if (hi_water > 0xf)
  1102. hi_water = 0xf;
  1103. else if (hi_water == 0)
  1104. lo_water = 0;
  1105. val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
  1106. }
  1107. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
  1108. }
  1109. static void
  1110. bnx2_init_all_rx_contexts(struct bnx2 *bp)
  1111. {
  1112. int i;
  1113. u32 cid;
  1114. for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
  1115. if (i == 1)
  1116. cid = RX_RSS_CID;
  1117. bnx2_init_rx_context(bp, cid);
  1118. }
  1119. }
  1120. static void
  1121. bnx2_set_mac_link(struct bnx2 *bp)
  1122. {
  1123. u32 val;
  1124. REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
  1125. if (bp->link_up && (bp->line_speed == SPEED_1000) &&
  1126. (bp->duplex == DUPLEX_HALF)) {
  1127. REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
  1128. }
  1129. /* Configure the EMAC mode register. */
  1130. val = REG_RD(bp, BNX2_EMAC_MODE);
  1131. val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
  1132. BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
  1133. BNX2_EMAC_MODE_25G_MODE);
  1134. if (bp->link_up) {
  1135. switch (bp->line_speed) {
  1136. case SPEED_10:
  1137. if (CHIP_NUM(bp) != CHIP_NUM_5706) {
  1138. val |= BNX2_EMAC_MODE_PORT_MII_10M;
  1139. break;
  1140. }
  1141. /* fall through */
  1142. case SPEED_100:
  1143. val |= BNX2_EMAC_MODE_PORT_MII;
  1144. break;
  1145. case SPEED_2500:
  1146. val |= BNX2_EMAC_MODE_25G_MODE;
  1147. /* fall through */
  1148. case SPEED_1000:
  1149. val |= BNX2_EMAC_MODE_PORT_GMII;
  1150. break;
  1151. }
  1152. }
  1153. else {
  1154. val |= BNX2_EMAC_MODE_PORT_GMII;
  1155. }
  1156. /* Set the MAC to operate in the appropriate duplex mode. */
  1157. if (bp->duplex == DUPLEX_HALF)
  1158. val |= BNX2_EMAC_MODE_HALF_DUPLEX;
  1159. REG_WR(bp, BNX2_EMAC_MODE, val);
  1160. /* Enable/disable rx PAUSE. */
  1161. bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
  1162. if (bp->flow_ctrl & FLOW_CTRL_RX)
  1163. bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
  1164. REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
  1165. /* Enable/disable tx PAUSE. */
  1166. val = REG_RD(bp, BNX2_EMAC_TX_MODE);
  1167. val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
  1168. if (bp->flow_ctrl & FLOW_CTRL_TX)
  1169. val |= BNX2_EMAC_TX_MODE_FLOW_EN;
  1170. REG_WR(bp, BNX2_EMAC_TX_MODE, val);
  1171. /* Acknowledge the interrupt. */
  1172. REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
  1173. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1174. bnx2_init_all_rx_contexts(bp);
  1175. }
  1176. static void
  1177. bnx2_enable_bmsr1(struct bnx2 *bp)
  1178. {
  1179. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1180. (CHIP_NUM(bp) == CHIP_NUM_5709))
  1181. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1182. MII_BNX2_BLK_ADDR_GP_STATUS);
  1183. }
  1184. static void
  1185. bnx2_disable_bmsr1(struct bnx2 *bp)
  1186. {
  1187. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1188. (CHIP_NUM(bp) == CHIP_NUM_5709))
  1189. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1190. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1191. }
  1192. static int
  1193. bnx2_test_and_enable_2g5(struct bnx2 *bp)
  1194. {
  1195. u32 up1;
  1196. int ret = 1;
  1197. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1198. return 0;
  1199. if (bp->autoneg & AUTONEG_SPEED)
  1200. bp->advertising |= ADVERTISED_2500baseX_Full;
  1201. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1202. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
  1203. bnx2_read_phy(bp, bp->mii_up1, &up1);
  1204. if (!(up1 & BCM5708S_UP1_2G5)) {
  1205. up1 |= BCM5708S_UP1_2G5;
  1206. bnx2_write_phy(bp, bp->mii_up1, up1);
  1207. ret = 0;
  1208. }
  1209. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1210. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1211. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1212. return ret;
  1213. }
  1214. static int
  1215. bnx2_test_and_disable_2g5(struct bnx2 *bp)
  1216. {
  1217. u32 up1;
  1218. int ret = 0;
  1219. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1220. return 0;
  1221. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1222. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
  1223. bnx2_read_phy(bp, bp->mii_up1, &up1);
  1224. if (up1 & BCM5708S_UP1_2G5) {
  1225. up1 &= ~BCM5708S_UP1_2G5;
  1226. bnx2_write_phy(bp, bp->mii_up1, up1);
  1227. ret = 1;
  1228. }
  1229. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1230. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1231. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1232. return ret;
  1233. }
  1234. static void
  1235. bnx2_enable_forced_2g5(struct bnx2 *bp)
  1236. {
  1237. u32 uninitialized_var(bmcr);
  1238. int err;
  1239. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1240. return;
  1241. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  1242. u32 val;
  1243. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1244. MII_BNX2_BLK_ADDR_SERDES_DIG);
  1245. if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
  1246. val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
  1247. val |= MII_BNX2_SD_MISC1_FORCE |
  1248. MII_BNX2_SD_MISC1_FORCE_2_5G;
  1249. bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
  1250. }
  1251. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1252. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1253. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1254. } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
  1255. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1256. if (!err)
  1257. bmcr |= BCM5708S_BMCR_FORCE_2500;
  1258. } else {
  1259. return;
  1260. }
  1261. if (err)
  1262. return;
  1263. if (bp->autoneg & AUTONEG_SPEED) {
  1264. bmcr &= ~BMCR_ANENABLE;
  1265. if (bp->req_duplex == DUPLEX_FULL)
  1266. bmcr |= BMCR_FULLDPLX;
  1267. }
  1268. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1269. }
  1270. static void
  1271. bnx2_disable_forced_2g5(struct bnx2 *bp)
  1272. {
  1273. u32 uninitialized_var(bmcr);
  1274. int err;
  1275. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1276. return;
  1277. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  1278. u32 val;
  1279. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1280. MII_BNX2_BLK_ADDR_SERDES_DIG);
  1281. if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
  1282. val &= ~MII_BNX2_SD_MISC1_FORCE;
  1283. bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
  1284. }
  1285. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1286. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1287. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1288. } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
  1289. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1290. if (!err)
  1291. bmcr &= ~BCM5708S_BMCR_FORCE_2500;
  1292. } else {
  1293. return;
  1294. }
  1295. if (err)
  1296. return;
  1297. if (bp->autoneg & AUTONEG_SPEED)
  1298. bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
  1299. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1300. }
  1301. static void
  1302. bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
  1303. {
  1304. u32 val;
  1305. bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
  1306. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
  1307. if (start)
  1308. bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
  1309. else
  1310. bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
  1311. }
  1312. static int
  1313. bnx2_set_link(struct bnx2 *bp)
  1314. {
  1315. u32 bmsr;
  1316. u8 link_up;
  1317. if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
  1318. bp->link_up = 1;
  1319. return 0;
  1320. }
  1321. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  1322. return 0;
  1323. link_up = bp->link_up;
  1324. bnx2_enable_bmsr1(bp);
  1325. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  1326. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  1327. bnx2_disable_bmsr1(bp);
  1328. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1329. (CHIP_NUM(bp) == CHIP_NUM_5706)) {
  1330. u32 val, an_dbg;
  1331. if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
  1332. bnx2_5706s_force_link_dn(bp, 0);
  1333. bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
  1334. }
  1335. val = REG_RD(bp, BNX2_EMAC_STATUS);
  1336. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
  1337. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  1338. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  1339. if ((val & BNX2_EMAC_STATUS_LINK) &&
  1340. !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
  1341. bmsr |= BMSR_LSTATUS;
  1342. else
  1343. bmsr &= ~BMSR_LSTATUS;
  1344. }
  1345. if (bmsr & BMSR_LSTATUS) {
  1346. bp->link_up = 1;
  1347. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1348. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  1349. bnx2_5706s_linkup(bp);
  1350. else if (CHIP_NUM(bp) == CHIP_NUM_5708)
  1351. bnx2_5708s_linkup(bp);
  1352. else if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1353. bnx2_5709s_linkup(bp);
  1354. }
  1355. else {
  1356. bnx2_copper_linkup(bp);
  1357. }
  1358. bnx2_resolve_flow_ctrl(bp);
  1359. }
  1360. else {
  1361. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1362. (bp->autoneg & AUTONEG_SPEED))
  1363. bnx2_disable_forced_2g5(bp);
  1364. if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
  1365. u32 bmcr;
  1366. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1367. bmcr |= BMCR_ANENABLE;
  1368. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1369. bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
  1370. }
  1371. bp->link_up = 0;
  1372. }
  1373. if (bp->link_up != link_up) {
  1374. bnx2_report_link(bp);
  1375. }
  1376. bnx2_set_mac_link(bp);
  1377. return 0;
  1378. }
  1379. static int
  1380. bnx2_reset_phy(struct bnx2 *bp)
  1381. {
  1382. int i;
  1383. u32 reg;
  1384. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
  1385. #define PHY_RESET_MAX_WAIT 100
  1386. for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
  1387. udelay(10);
  1388. bnx2_read_phy(bp, bp->mii_bmcr, &reg);
  1389. if (!(reg & BMCR_RESET)) {
  1390. udelay(20);
  1391. break;
  1392. }
  1393. }
  1394. if (i == PHY_RESET_MAX_WAIT) {
  1395. return -EBUSY;
  1396. }
  1397. return 0;
  1398. }
  1399. static u32
  1400. bnx2_phy_get_pause_adv(struct bnx2 *bp)
  1401. {
  1402. u32 adv = 0;
  1403. if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
  1404. (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
  1405. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1406. adv = ADVERTISE_1000XPAUSE;
  1407. }
  1408. else {
  1409. adv = ADVERTISE_PAUSE_CAP;
  1410. }
  1411. }
  1412. else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
  1413. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1414. adv = ADVERTISE_1000XPSE_ASYM;
  1415. }
  1416. else {
  1417. adv = ADVERTISE_PAUSE_ASYM;
  1418. }
  1419. }
  1420. else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
  1421. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1422. adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
  1423. }
  1424. else {
  1425. adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  1426. }
  1427. }
  1428. return adv;
  1429. }
  1430. static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
  1431. static int
  1432. bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
  1433. __releases(&bp->phy_lock)
  1434. __acquires(&bp->phy_lock)
  1435. {
  1436. u32 speed_arg = 0, pause_adv;
  1437. pause_adv = bnx2_phy_get_pause_adv(bp);
  1438. if (bp->autoneg & AUTONEG_SPEED) {
  1439. speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
  1440. if (bp->advertising & ADVERTISED_10baseT_Half)
  1441. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
  1442. if (bp->advertising & ADVERTISED_10baseT_Full)
  1443. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
  1444. if (bp->advertising & ADVERTISED_100baseT_Half)
  1445. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
  1446. if (bp->advertising & ADVERTISED_100baseT_Full)
  1447. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
  1448. if (bp->advertising & ADVERTISED_1000baseT_Full)
  1449. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
  1450. if (bp->advertising & ADVERTISED_2500baseX_Full)
  1451. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
  1452. } else {
  1453. if (bp->req_line_speed == SPEED_2500)
  1454. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
  1455. else if (bp->req_line_speed == SPEED_1000)
  1456. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
  1457. else if (bp->req_line_speed == SPEED_100) {
  1458. if (bp->req_duplex == DUPLEX_FULL)
  1459. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
  1460. else
  1461. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
  1462. } else if (bp->req_line_speed == SPEED_10) {
  1463. if (bp->req_duplex == DUPLEX_FULL)
  1464. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
  1465. else
  1466. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
  1467. }
  1468. }
  1469. if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
  1470. speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
  1471. if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
  1472. speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
  1473. if (port == PORT_TP)
  1474. speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
  1475. BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
  1476. bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
  1477. spin_unlock_bh(&bp->phy_lock);
  1478. bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
  1479. spin_lock_bh(&bp->phy_lock);
  1480. return 0;
  1481. }
  1482. static int
  1483. bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
  1484. __releases(&bp->phy_lock)
  1485. __acquires(&bp->phy_lock)
  1486. {
  1487. u32 adv, bmcr;
  1488. u32 new_adv = 0;
  1489. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  1490. return (bnx2_setup_remote_phy(bp, port));
  1491. if (!(bp->autoneg & AUTONEG_SPEED)) {
  1492. u32 new_bmcr;
  1493. int force_link_down = 0;
  1494. if (bp->req_line_speed == SPEED_2500) {
  1495. if (!bnx2_test_and_enable_2g5(bp))
  1496. force_link_down = 1;
  1497. } else if (bp->req_line_speed == SPEED_1000) {
  1498. if (bnx2_test_and_disable_2g5(bp))
  1499. force_link_down = 1;
  1500. }
  1501. bnx2_read_phy(bp, bp->mii_adv, &adv);
  1502. adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
  1503. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1504. new_bmcr = bmcr & ~BMCR_ANENABLE;
  1505. new_bmcr |= BMCR_SPEED1000;
  1506. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  1507. if (bp->req_line_speed == SPEED_2500)
  1508. bnx2_enable_forced_2g5(bp);
  1509. else if (bp->req_line_speed == SPEED_1000) {
  1510. bnx2_disable_forced_2g5(bp);
  1511. new_bmcr &= ~0x2000;
  1512. }
  1513. } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
  1514. if (bp->req_line_speed == SPEED_2500)
  1515. new_bmcr |= BCM5708S_BMCR_FORCE_2500;
  1516. else
  1517. new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
  1518. }
  1519. if (bp->req_duplex == DUPLEX_FULL) {
  1520. adv |= ADVERTISE_1000XFULL;
  1521. new_bmcr |= BMCR_FULLDPLX;
  1522. }
  1523. else {
  1524. adv |= ADVERTISE_1000XHALF;
  1525. new_bmcr &= ~BMCR_FULLDPLX;
  1526. }
  1527. if ((new_bmcr != bmcr) || (force_link_down)) {
  1528. /* Force a link down visible on the other side */
  1529. if (bp->link_up) {
  1530. bnx2_write_phy(bp, bp->mii_adv, adv &
  1531. ~(ADVERTISE_1000XFULL |
  1532. ADVERTISE_1000XHALF));
  1533. bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
  1534. BMCR_ANRESTART | BMCR_ANENABLE);
  1535. bp->link_up = 0;
  1536. netif_carrier_off(bp->dev);
  1537. bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
  1538. bnx2_report_link(bp);
  1539. }
  1540. bnx2_write_phy(bp, bp->mii_adv, adv);
  1541. bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
  1542. } else {
  1543. bnx2_resolve_flow_ctrl(bp);
  1544. bnx2_set_mac_link(bp);
  1545. }
  1546. return 0;
  1547. }
  1548. bnx2_test_and_enable_2g5(bp);
  1549. if (bp->advertising & ADVERTISED_1000baseT_Full)
  1550. new_adv |= ADVERTISE_1000XFULL;
  1551. new_adv |= bnx2_phy_get_pause_adv(bp);
  1552. bnx2_read_phy(bp, bp->mii_adv, &adv);
  1553. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1554. bp->serdes_an_pending = 0;
  1555. if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
  1556. /* Force a link down visible on the other side */
  1557. if (bp->link_up) {
  1558. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
  1559. spin_unlock_bh(&bp->phy_lock);
  1560. msleep(20);
  1561. spin_lock_bh(&bp->phy_lock);
  1562. }
  1563. bnx2_write_phy(bp, bp->mii_adv, new_adv);
  1564. bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
  1565. BMCR_ANENABLE);
  1566. /* Speed up link-up time when the link partner
  1567. * does not autonegotiate which is very common
  1568. * in blade servers. Some blade servers use
  1569. * IPMI for kerboard input and it's important
  1570. * to minimize link disruptions. Autoneg. involves
  1571. * exchanging base pages plus 3 next pages and
  1572. * normally completes in about 120 msec.
  1573. */
  1574. bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
  1575. bp->serdes_an_pending = 1;
  1576. mod_timer(&bp->timer, jiffies + bp->current_interval);
  1577. } else {
  1578. bnx2_resolve_flow_ctrl(bp);
  1579. bnx2_set_mac_link(bp);
  1580. }
  1581. return 0;
  1582. }
  1583. #define ETHTOOL_ALL_FIBRE_SPEED \
  1584. (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
  1585. (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
  1586. (ADVERTISED_1000baseT_Full)
  1587. #define ETHTOOL_ALL_COPPER_SPEED \
  1588. (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
  1589. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
  1590. ADVERTISED_1000baseT_Full)
  1591. #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
  1592. ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
  1593. #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
  1594. static void
  1595. bnx2_set_default_remote_link(struct bnx2 *bp)
  1596. {
  1597. u32 link;
  1598. if (bp->phy_port == PORT_TP)
  1599. link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
  1600. else
  1601. link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
  1602. if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
  1603. bp->req_line_speed = 0;
  1604. bp->autoneg |= AUTONEG_SPEED;
  1605. bp->advertising = ADVERTISED_Autoneg;
  1606. if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
  1607. bp->advertising |= ADVERTISED_10baseT_Half;
  1608. if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
  1609. bp->advertising |= ADVERTISED_10baseT_Full;
  1610. if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
  1611. bp->advertising |= ADVERTISED_100baseT_Half;
  1612. if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
  1613. bp->advertising |= ADVERTISED_100baseT_Full;
  1614. if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
  1615. bp->advertising |= ADVERTISED_1000baseT_Full;
  1616. if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
  1617. bp->advertising |= ADVERTISED_2500baseX_Full;
  1618. } else {
  1619. bp->autoneg = 0;
  1620. bp->advertising = 0;
  1621. bp->req_duplex = DUPLEX_FULL;
  1622. if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
  1623. bp->req_line_speed = SPEED_10;
  1624. if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
  1625. bp->req_duplex = DUPLEX_HALF;
  1626. }
  1627. if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
  1628. bp->req_line_speed = SPEED_100;
  1629. if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
  1630. bp->req_duplex = DUPLEX_HALF;
  1631. }
  1632. if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
  1633. bp->req_line_speed = SPEED_1000;
  1634. if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
  1635. bp->req_line_speed = SPEED_2500;
  1636. }
  1637. }
  1638. static void
  1639. bnx2_set_default_link(struct bnx2 *bp)
  1640. {
  1641. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  1642. bnx2_set_default_remote_link(bp);
  1643. return;
  1644. }
  1645. bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
  1646. bp->req_line_speed = 0;
  1647. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1648. u32 reg;
  1649. bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
  1650. reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
  1651. reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
  1652. if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
  1653. bp->autoneg = 0;
  1654. bp->req_line_speed = bp->line_speed = SPEED_1000;
  1655. bp->req_duplex = DUPLEX_FULL;
  1656. }
  1657. } else
  1658. bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
  1659. }
  1660. static void
  1661. bnx2_send_heart_beat(struct bnx2 *bp)
  1662. {
  1663. u32 msg;
  1664. u32 addr;
  1665. spin_lock(&bp->indirect_lock);
  1666. msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
  1667. addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
  1668. REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
  1669. REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
  1670. spin_unlock(&bp->indirect_lock);
  1671. }
  1672. static void
  1673. bnx2_remote_phy_event(struct bnx2 *bp)
  1674. {
  1675. u32 msg;
  1676. u8 link_up = bp->link_up;
  1677. u8 old_port;
  1678. msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
  1679. if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
  1680. bnx2_send_heart_beat(bp);
  1681. msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
  1682. if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
  1683. bp->link_up = 0;
  1684. else {
  1685. u32 speed;
  1686. bp->link_up = 1;
  1687. speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
  1688. bp->duplex = DUPLEX_FULL;
  1689. switch (speed) {
  1690. case BNX2_LINK_STATUS_10HALF:
  1691. bp->duplex = DUPLEX_HALF;
  1692. case BNX2_LINK_STATUS_10FULL:
  1693. bp->line_speed = SPEED_10;
  1694. break;
  1695. case BNX2_LINK_STATUS_100HALF:
  1696. bp->duplex = DUPLEX_HALF;
  1697. case BNX2_LINK_STATUS_100BASE_T4:
  1698. case BNX2_LINK_STATUS_100FULL:
  1699. bp->line_speed = SPEED_100;
  1700. break;
  1701. case BNX2_LINK_STATUS_1000HALF:
  1702. bp->duplex = DUPLEX_HALF;
  1703. case BNX2_LINK_STATUS_1000FULL:
  1704. bp->line_speed = SPEED_1000;
  1705. break;
  1706. case BNX2_LINK_STATUS_2500HALF:
  1707. bp->duplex = DUPLEX_HALF;
  1708. case BNX2_LINK_STATUS_2500FULL:
  1709. bp->line_speed = SPEED_2500;
  1710. break;
  1711. default:
  1712. bp->line_speed = 0;
  1713. break;
  1714. }
  1715. bp->flow_ctrl = 0;
  1716. if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
  1717. (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
  1718. if (bp->duplex == DUPLEX_FULL)
  1719. bp->flow_ctrl = bp->req_flow_ctrl;
  1720. } else {
  1721. if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
  1722. bp->flow_ctrl |= FLOW_CTRL_TX;
  1723. if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
  1724. bp->flow_ctrl |= FLOW_CTRL_RX;
  1725. }
  1726. old_port = bp->phy_port;
  1727. if (msg & BNX2_LINK_STATUS_SERDES_LINK)
  1728. bp->phy_port = PORT_FIBRE;
  1729. else
  1730. bp->phy_port = PORT_TP;
  1731. if (old_port != bp->phy_port)
  1732. bnx2_set_default_link(bp);
  1733. }
  1734. if (bp->link_up != link_up)
  1735. bnx2_report_link(bp);
  1736. bnx2_set_mac_link(bp);
  1737. }
  1738. static int
  1739. bnx2_set_remote_link(struct bnx2 *bp)
  1740. {
  1741. u32 evt_code;
  1742. evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
  1743. switch (evt_code) {
  1744. case BNX2_FW_EVT_CODE_LINK_EVENT:
  1745. bnx2_remote_phy_event(bp);
  1746. break;
  1747. case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
  1748. default:
  1749. bnx2_send_heart_beat(bp);
  1750. break;
  1751. }
  1752. return 0;
  1753. }
  1754. static int
  1755. bnx2_setup_copper_phy(struct bnx2 *bp)
  1756. __releases(&bp->phy_lock)
  1757. __acquires(&bp->phy_lock)
  1758. {
  1759. u32 bmcr;
  1760. u32 new_bmcr;
  1761. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1762. if (bp->autoneg & AUTONEG_SPEED) {
  1763. u32 adv_reg, adv1000_reg;
  1764. u32 new_adv_reg = 0;
  1765. u32 new_adv1000_reg = 0;
  1766. bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
  1767. adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
  1768. ADVERTISE_PAUSE_ASYM);
  1769. bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
  1770. adv1000_reg &= PHY_ALL_1000_SPEED;
  1771. if (bp->advertising & ADVERTISED_10baseT_Half)
  1772. new_adv_reg |= ADVERTISE_10HALF;
  1773. if (bp->advertising & ADVERTISED_10baseT_Full)
  1774. new_adv_reg |= ADVERTISE_10FULL;
  1775. if (bp->advertising & ADVERTISED_100baseT_Half)
  1776. new_adv_reg |= ADVERTISE_100HALF;
  1777. if (bp->advertising & ADVERTISED_100baseT_Full)
  1778. new_adv_reg |= ADVERTISE_100FULL;
  1779. if (bp->advertising & ADVERTISED_1000baseT_Full)
  1780. new_adv1000_reg |= ADVERTISE_1000FULL;
  1781. new_adv_reg |= ADVERTISE_CSMA;
  1782. new_adv_reg |= bnx2_phy_get_pause_adv(bp);
  1783. if ((adv1000_reg != new_adv1000_reg) ||
  1784. (adv_reg != new_adv_reg) ||
  1785. ((bmcr & BMCR_ANENABLE) == 0)) {
  1786. bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
  1787. bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
  1788. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
  1789. BMCR_ANENABLE);
  1790. }
  1791. else if (bp->link_up) {
  1792. /* Flow ctrl may have changed from auto to forced */
  1793. /* or vice-versa. */
  1794. bnx2_resolve_flow_ctrl(bp);
  1795. bnx2_set_mac_link(bp);
  1796. }
  1797. return 0;
  1798. }
  1799. new_bmcr = 0;
  1800. if (bp->req_line_speed == SPEED_100) {
  1801. new_bmcr |= BMCR_SPEED100;
  1802. }
  1803. if (bp->req_duplex == DUPLEX_FULL) {
  1804. new_bmcr |= BMCR_FULLDPLX;
  1805. }
  1806. if (new_bmcr != bmcr) {
  1807. u32 bmsr;
  1808. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1809. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1810. if (bmsr & BMSR_LSTATUS) {
  1811. /* Force link down */
  1812. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
  1813. spin_unlock_bh(&bp->phy_lock);
  1814. msleep(50);
  1815. spin_lock_bh(&bp->phy_lock);
  1816. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1817. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1818. }
  1819. bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
  1820. /* Normally, the new speed is setup after the link has
  1821. * gone down and up again. In some cases, link will not go
  1822. * down so we need to set up the new speed here.
  1823. */
  1824. if (bmsr & BMSR_LSTATUS) {
  1825. bp->line_speed = bp->req_line_speed;
  1826. bp->duplex = bp->req_duplex;
  1827. bnx2_resolve_flow_ctrl(bp);
  1828. bnx2_set_mac_link(bp);
  1829. }
  1830. } else {
  1831. bnx2_resolve_flow_ctrl(bp);
  1832. bnx2_set_mac_link(bp);
  1833. }
  1834. return 0;
  1835. }
  1836. static int
  1837. bnx2_setup_phy(struct bnx2 *bp, u8 port)
  1838. __releases(&bp->phy_lock)
  1839. __acquires(&bp->phy_lock)
  1840. {
  1841. if (bp->loopback == MAC_LOOPBACK)
  1842. return 0;
  1843. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1844. return (bnx2_setup_serdes_phy(bp, port));
  1845. }
  1846. else {
  1847. return (bnx2_setup_copper_phy(bp));
  1848. }
  1849. }
  1850. static int
  1851. bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
  1852. {
  1853. u32 val;
  1854. bp->mii_bmcr = MII_BMCR + 0x10;
  1855. bp->mii_bmsr = MII_BMSR + 0x10;
  1856. bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
  1857. bp->mii_adv = MII_ADVERTISE + 0x10;
  1858. bp->mii_lpa = MII_LPA + 0x10;
  1859. bp->mii_up1 = MII_BNX2_OVER1G_UP1;
  1860. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
  1861. bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
  1862. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1863. if (reset_phy)
  1864. bnx2_reset_phy(bp);
  1865. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
  1866. bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
  1867. val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
  1868. val |= MII_BNX2_SD_1000XCTL1_FIBER;
  1869. bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
  1870. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
  1871. bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
  1872. if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
  1873. val |= BCM5708S_UP1_2G5;
  1874. else
  1875. val &= ~BCM5708S_UP1_2G5;
  1876. bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
  1877. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
  1878. bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
  1879. val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
  1880. bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
  1881. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
  1882. val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
  1883. MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
  1884. bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
  1885. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1886. return 0;
  1887. }
  1888. static int
  1889. bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
  1890. {
  1891. u32 val;
  1892. if (reset_phy)
  1893. bnx2_reset_phy(bp);
  1894. bp->mii_up1 = BCM5708S_UP1;
  1895. bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
  1896. bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
  1897. bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
  1898. bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
  1899. val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
  1900. bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
  1901. bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
  1902. val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
  1903. bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
  1904. if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
  1905. bnx2_read_phy(bp, BCM5708S_UP1, &val);
  1906. val |= BCM5708S_UP1_2G5;
  1907. bnx2_write_phy(bp, BCM5708S_UP1, val);
  1908. }
  1909. if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
  1910. (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
  1911. (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
  1912. /* increase tx signal amplitude */
  1913. bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
  1914. BCM5708S_BLK_ADDR_TX_MISC);
  1915. bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
  1916. val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
  1917. bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
  1918. bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
  1919. }
  1920. val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
  1921. BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
  1922. if (val) {
  1923. u32 is_backplane;
  1924. is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
  1925. if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
  1926. bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
  1927. BCM5708S_BLK_ADDR_TX_MISC);
  1928. bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
  1929. bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
  1930. BCM5708S_BLK_ADDR_DIG);
  1931. }
  1932. }
  1933. return 0;
  1934. }
  1935. static int
  1936. bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
  1937. {
  1938. if (reset_phy)
  1939. bnx2_reset_phy(bp);
  1940. bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
  1941. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  1942. REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
  1943. if (bp->dev->mtu > 1500) {
  1944. u32 val;
  1945. /* Set extended packet length bit */
  1946. bnx2_write_phy(bp, 0x18, 0x7);
  1947. bnx2_read_phy(bp, 0x18, &val);
  1948. bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
  1949. bnx2_write_phy(bp, 0x1c, 0x6c00);
  1950. bnx2_read_phy(bp, 0x1c, &val);
  1951. bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
  1952. }
  1953. else {
  1954. u32 val;
  1955. bnx2_write_phy(bp, 0x18, 0x7);
  1956. bnx2_read_phy(bp, 0x18, &val);
  1957. bnx2_write_phy(bp, 0x18, val & ~0x4007);
  1958. bnx2_write_phy(bp, 0x1c, 0x6c00);
  1959. bnx2_read_phy(bp, 0x1c, &val);
  1960. bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
  1961. }
  1962. return 0;
  1963. }
  1964. static int
  1965. bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
  1966. {
  1967. u32 val;
  1968. if (reset_phy)
  1969. bnx2_reset_phy(bp);
  1970. if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
  1971. bnx2_write_phy(bp, 0x18, 0x0c00);
  1972. bnx2_write_phy(bp, 0x17, 0x000a);
  1973. bnx2_write_phy(bp, 0x15, 0x310b);
  1974. bnx2_write_phy(bp, 0x17, 0x201f);
  1975. bnx2_write_phy(bp, 0x15, 0x9506);
  1976. bnx2_write_phy(bp, 0x17, 0x401f);
  1977. bnx2_write_phy(bp, 0x15, 0x14e2);
  1978. bnx2_write_phy(bp, 0x18, 0x0400);
  1979. }
  1980. if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
  1981. bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
  1982. MII_BNX2_DSP_EXPAND_REG | 0x8);
  1983. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
  1984. val &= ~(1 << 8);
  1985. bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
  1986. }
  1987. if (bp->dev->mtu > 1500) {
  1988. /* Set extended packet length bit */
  1989. bnx2_write_phy(bp, 0x18, 0x7);
  1990. bnx2_read_phy(bp, 0x18, &val);
  1991. bnx2_write_phy(bp, 0x18, val | 0x4000);
  1992. bnx2_read_phy(bp, 0x10, &val);
  1993. bnx2_write_phy(bp, 0x10, val | 0x1);
  1994. }
  1995. else {
  1996. bnx2_write_phy(bp, 0x18, 0x7);
  1997. bnx2_read_phy(bp, 0x18, &val);
  1998. bnx2_write_phy(bp, 0x18, val & ~0x4007);
  1999. bnx2_read_phy(bp, 0x10, &val);
  2000. bnx2_write_phy(bp, 0x10, val & ~0x1);
  2001. }
  2002. /* ethernet@wirespeed */
  2003. bnx2_write_phy(bp, 0x18, 0x7007);
  2004. bnx2_read_phy(bp, 0x18, &val);
  2005. bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
  2006. return 0;
  2007. }
  2008. static int
  2009. bnx2_init_phy(struct bnx2 *bp, int reset_phy)
  2010. __releases(&bp->phy_lock)
  2011. __acquires(&bp->phy_lock)
  2012. {
  2013. u32 val;
  2014. int rc = 0;
  2015. bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
  2016. bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
  2017. bp->mii_bmcr = MII_BMCR;
  2018. bp->mii_bmsr = MII_BMSR;
  2019. bp->mii_bmsr1 = MII_BMSR;
  2020. bp->mii_adv = MII_ADVERTISE;
  2021. bp->mii_lpa = MII_LPA;
  2022. REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
  2023. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  2024. goto setup_phy;
  2025. bnx2_read_phy(bp, MII_PHYSID1, &val);
  2026. bp->phy_id = val << 16;
  2027. bnx2_read_phy(bp, MII_PHYSID2, &val);
  2028. bp->phy_id |= val & 0xffff;
  2029. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  2030. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  2031. rc = bnx2_init_5706s_phy(bp, reset_phy);
  2032. else if (CHIP_NUM(bp) == CHIP_NUM_5708)
  2033. rc = bnx2_init_5708s_phy(bp, reset_phy);
  2034. else if (CHIP_NUM(bp) == CHIP_NUM_5709)
  2035. rc = bnx2_init_5709s_phy(bp, reset_phy);
  2036. }
  2037. else {
  2038. rc = bnx2_init_copper_phy(bp, reset_phy);
  2039. }
  2040. setup_phy:
  2041. if (!rc)
  2042. rc = bnx2_setup_phy(bp, bp->phy_port);
  2043. return rc;
  2044. }
  2045. static int
  2046. bnx2_set_mac_loopback(struct bnx2 *bp)
  2047. {
  2048. u32 mac_mode;
  2049. mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
  2050. mac_mode &= ~BNX2_EMAC_MODE_PORT;
  2051. mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
  2052. REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
  2053. bp->link_up = 1;
  2054. return 0;
  2055. }
  2056. static int bnx2_test_link(struct bnx2 *);
  2057. static int
  2058. bnx2_set_phy_loopback(struct bnx2 *bp)
  2059. {
  2060. u32 mac_mode;
  2061. int rc, i;
  2062. spin_lock_bh(&bp->phy_lock);
  2063. rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
  2064. BMCR_SPEED1000);
  2065. spin_unlock_bh(&bp->phy_lock);
  2066. if (rc)
  2067. return rc;
  2068. for (i = 0; i < 10; i++) {
  2069. if (bnx2_test_link(bp) == 0)
  2070. break;
  2071. msleep(100);
  2072. }
  2073. mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
  2074. mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
  2075. BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
  2076. BNX2_EMAC_MODE_25G_MODE);
  2077. mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
  2078. REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
  2079. bp->link_up = 1;
  2080. return 0;
  2081. }
  2082. static int
  2083. bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
  2084. {
  2085. int i;
  2086. u32 val;
  2087. bp->fw_wr_seq++;
  2088. msg_data |= bp->fw_wr_seq;
  2089. bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
  2090. if (!ack)
  2091. return 0;
  2092. /* wait for an acknowledgement. */
  2093. for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
  2094. msleep(10);
  2095. val = bnx2_shmem_rd(bp, BNX2_FW_MB);
  2096. if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
  2097. break;
  2098. }
  2099. if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
  2100. return 0;
  2101. /* If we timed out, inform the firmware that this is the case. */
  2102. if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
  2103. if (!silent)
  2104. pr_err("fw sync timeout, reset code = %x\n", msg_data);
  2105. msg_data &= ~BNX2_DRV_MSG_CODE;
  2106. msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
  2107. bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
  2108. return -EBUSY;
  2109. }
  2110. if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
  2111. return -EIO;
  2112. return 0;
  2113. }
  2114. static int
  2115. bnx2_init_5709_context(struct bnx2 *bp)
  2116. {
  2117. int i, ret = 0;
  2118. u32 val;
  2119. val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
  2120. val |= (BCM_PAGE_BITS - 8) << 16;
  2121. REG_WR(bp, BNX2_CTX_COMMAND, val);
  2122. for (i = 0; i < 10; i++) {
  2123. val = REG_RD(bp, BNX2_CTX_COMMAND);
  2124. if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
  2125. break;
  2126. udelay(2);
  2127. }
  2128. if (val & BNX2_CTX_COMMAND_MEM_INIT)
  2129. return -EBUSY;
  2130. for (i = 0; i < bp->ctx_pages; i++) {
  2131. int j;
  2132. if (bp->ctx_blk[i])
  2133. memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
  2134. else
  2135. return -ENOMEM;
  2136. REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
  2137. (bp->ctx_blk_mapping[i] & 0xffffffff) |
  2138. BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
  2139. REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
  2140. (u64) bp->ctx_blk_mapping[i] >> 32);
  2141. REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
  2142. BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
  2143. for (j = 0; j < 10; j++) {
  2144. val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
  2145. if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
  2146. break;
  2147. udelay(5);
  2148. }
  2149. if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
  2150. ret = -EBUSY;
  2151. break;
  2152. }
  2153. }
  2154. return ret;
  2155. }
  2156. static void
  2157. bnx2_init_context(struct bnx2 *bp)
  2158. {
  2159. u32 vcid;
  2160. vcid = 96;
  2161. while (vcid) {
  2162. u32 vcid_addr, pcid_addr, offset;
  2163. int i;
  2164. vcid--;
  2165. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  2166. u32 new_vcid;
  2167. vcid_addr = GET_PCID_ADDR(vcid);
  2168. if (vcid & 0x8) {
  2169. new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
  2170. }
  2171. else {
  2172. new_vcid = vcid;
  2173. }
  2174. pcid_addr = GET_PCID_ADDR(new_vcid);
  2175. }
  2176. else {
  2177. vcid_addr = GET_CID_ADDR(vcid);
  2178. pcid_addr = vcid_addr;
  2179. }
  2180. for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
  2181. vcid_addr += (i << PHY_CTX_SHIFT);
  2182. pcid_addr += (i << PHY_CTX_SHIFT);
  2183. REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
  2184. REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
  2185. /* Zero out the context. */
  2186. for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
  2187. bnx2_ctx_wr(bp, vcid_addr, offset, 0);
  2188. }
  2189. }
  2190. }
  2191. static int
  2192. bnx2_alloc_bad_rbuf(struct bnx2 *bp)
  2193. {
  2194. u16 *good_mbuf;
  2195. u32 good_mbuf_cnt;
  2196. u32 val;
  2197. good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
  2198. if (good_mbuf == NULL) {
  2199. pr_err("Failed to allocate memory in %s\n", __func__);
  2200. return -ENOMEM;
  2201. }
  2202. REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
  2203. BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
  2204. good_mbuf_cnt = 0;
  2205. /* Allocate a bunch of mbufs and save the good ones in an array. */
  2206. val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
  2207. while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
  2208. bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
  2209. BNX2_RBUF_COMMAND_ALLOC_REQ);
  2210. val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
  2211. val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
  2212. /* The addresses with Bit 9 set are bad memory blocks. */
  2213. if (!(val & (1 << 9))) {
  2214. good_mbuf[good_mbuf_cnt] = (u16) val;
  2215. good_mbuf_cnt++;
  2216. }
  2217. val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
  2218. }
  2219. /* Free the good ones back to the mbuf pool thus discarding
  2220. * all the bad ones. */
  2221. while (good_mbuf_cnt) {
  2222. good_mbuf_cnt--;
  2223. val = good_mbuf[good_mbuf_cnt];
  2224. val = (val << 9) | val | 1;
  2225. bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
  2226. }
  2227. kfree(good_mbuf);
  2228. return 0;
  2229. }
  2230. static void
  2231. bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
  2232. {
  2233. u32 val;
  2234. val = (mac_addr[0] << 8) | mac_addr[1];
  2235. REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
  2236. val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
  2237. (mac_addr[4] << 8) | mac_addr[5];
  2238. REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
  2239. }
  2240. static inline int
  2241. bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
  2242. {
  2243. dma_addr_t mapping;
  2244. struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
  2245. struct rx_bd *rxbd =
  2246. &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
  2247. struct page *page = alloc_page(gfp);
  2248. if (!page)
  2249. return -ENOMEM;
  2250. mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
  2251. PCI_DMA_FROMDEVICE);
  2252. if (dma_mapping_error(&bp->pdev->dev, mapping)) {
  2253. __free_page(page);
  2254. return -EIO;
  2255. }
  2256. rx_pg->page = page;
  2257. dma_unmap_addr_set(rx_pg, mapping, mapping);
  2258. rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
  2259. rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  2260. return 0;
  2261. }
  2262. static void
  2263. bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
  2264. {
  2265. struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
  2266. struct page *page = rx_pg->page;
  2267. if (!page)
  2268. return;
  2269. dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
  2270. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  2271. __free_page(page);
  2272. rx_pg->page = NULL;
  2273. }
  2274. static inline int
  2275. bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
  2276. {
  2277. struct sk_buff *skb;
  2278. struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
  2279. dma_addr_t mapping;
  2280. struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
  2281. unsigned long align;
  2282. skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
  2283. if (skb == NULL) {
  2284. return -ENOMEM;
  2285. }
  2286. if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
  2287. skb_reserve(skb, BNX2_RX_ALIGN - align);
  2288. mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
  2289. PCI_DMA_FROMDEVICE);
  2290. if (dma_mapping_error(&bp->pdev->dev, mapping)) {
  2291. dev_kfree_skb(skb);
  2292. return -EIO;
  2293. }
  2294. rx_buf->skb = skb;
  2295. rx_buf->desc = (struct l2_fhdr *) skb->data;
  2296. dma_unmap_addr_set(rx_buf, mapping, mapping);
  2297. rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
  2298. rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  2299. rxr->rx_prod_bseq += bp->rx_buf_use_size;
  2300. return 0;
  2301. }
  2302. static int
  2303. bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
  2304. {
  2305. struct status_block *sblk = bnapi->status_blk.msi;
  2306. u32 new_link_state, old_link_state;
  2307. int is_set = 1;
  2308. new_link_state = sblk->status_attn_bits & event;
  2309. old_link_state = sblk->status_attn_bits_ack & event;
  2310. if (new_link_state != old_link_state) {
  2311. if (new_link_state)
  2312. REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
  2313. else
  2314. REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
  2315. } else
  2316. is_set = 0;
  2317. return is_set;
  2318. }
  2319. static void
  2320. bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
  2321. {
  2322. spin_lock(&bp->phy_lock);
  2323. if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
  2324. bnx2_set_link(bp);
  2325. if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
  2326. bnx2_set_remote_link(bp);
  2327. spin_unlock(&bp->phy_lock);
  2328. }
  2329. static inline u16
  2330. bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
  2331. {
  2332. u16 cons;
  2333. /* Tell compiler that status block fields can change. */
  2334. barrier();
  2335. cons = *bnapi->hw_tx_cons_ptr;
  2336. barrier();
  2337. if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
  2338. cons++;
  2339. return cons;
  2340. }
  2341. static int
  2342. bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
  2343. {
  2344. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  2345. u16 hw_cons, sw_cons, sw_ring_cons;
  2346. int tx_pkt = 0, index;
  2347. struct netdev_queue *txq;
  2348. index = (bnapi - bp->bnx2_napi);
  2349. txq = netdev_get_tx_queue(bp->dev, index);
  2350. hw_cons = bnx2_get_hw_tx_cons(bnapi);
  2351. sw_cons = txr->tx_cons;
  2352. while (sw_cons != hw_cons) {
  2353. struct sw_tx_bd *tx_buf;
  2354. struct sk_buff *skb;
  2355. int i, last;
  2356. sw_ring_cons = TX_RING_IDX(sw_cons);
  2357. tx_buf = &txr->tx_buf_ring[sw_ring_cons];
  2358. skb = tx_buf->skb;
  2359. /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
  2360. prefetch(&skb->end);
  2361. /* partial BD completions possible with TSO packets */
  2362. if (tx_buf->is_gso) {
  2363. u16 last_idx, last_ring_idx;
  2364. last_idx = sw_cons + tx_buf->nr_frags + 1;
  2365. last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
  2366. if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
  2367. last_idx++;
  2368. }
  2369. if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
  2370. break;
  2371. }
  2372. }
  2373. dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
  2374. skb_headlen(skb), PCI_DMA_TODEVICE);
  2375. tx_buf->skb = NULL;
  2376. last = tx_buf->nr_frags;
  2377. for (i = 0; i < last; i++) {
  2378. sw_cons = NEXT_TX_BD(sw_cons);
  2379. dma_unmap_page(&bp->pdev->dev,
  2380. dma_unmap_addr(
  2381. &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
  2382. mapping),
  2383. skb_shinfo(skb)->frags[i].size,
  2384. PCI_DMA_TODEVICE);
  2385. }
  2386. sw_cons = NEXT_TX_BD(sw_cons);
  2387. dev_kfree_skb(skb);
  2388. tx_pkt++;
  2389. if (tx_pkt == budget)
  2390. break;
  2391. if (hw_cons == sw_cons)
  2392. hw_cons = bnx2_get_hw_tx_cons(bnapi);
  2393. }
  2394. txr->hw_tx_cons = hw_cons;
  2395. txr->tx_cons = sw_cons;
  2396. /* Need to make the tx_cons update visible to bnx2_start_xmit()
  2397. * before checking for netif_tx_queue_stopped(). Without the
  2398. * memory barrier, there is a small possibility that bnx2_start_xmit()
  2399. * will miss it and cause the queue to be stopped forever.
  2400. */
  2401. smp_mb();
  2402. if (unlikely(netif_tx_queue_stopped(txq)) &&
  2403. (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
  2404. __netif_tx_lock(txq, smp_processor_id());
  2405. if ((netif_tx_queue_stopped(txq)) &&
  2406. (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
  2407. netif_tx_wake_queue(txq);
  2408. __netif_tx_unlock(txq);
  2409. }
  2410. return tx_pkt;
  2411. }
  2412. static void
  2413. bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
  2414. struct sk_buff *skb, int count)
  2415. {
  2416. struct sw_pg *cons_rx_pg, *prod_rx_pg;
  2417. struct rx_bd *cons_bd, *prod_bd;
  2418. int i;
  2419. u16 hw_prod, prod;
  2420. u16 cons = rxr->rx_pg_cons;
  2421. cons_rx_pg = &rxr->rx_pg_ring[cons];
  2422. /* The caller was unable to allocate a new page to replace the
  2423. * last one in the frags array, so we need to recycle that page
  2424. * and then free the skb.
  2425. */
  2426. if (skb) {
  2427. struct page *page;
  2428. struct skb_shared_info *shinfo;
  2429. shinfo = skb_shinfo(skb);
  2430. shinfo->nr_frags--;
  2431. page = shinfo->frags[shinfo->nr_frags].page;
  2432. shinfo->frags[shinfo->nr_frags].page = NULL;
  2433. cons_rx_pg->page = page;
  2434. dev_kfree_skb(skb);
  2435. }
  2436. hw_prod = rxr->rx_pg_prod;
  2437. for (i = 0; i < count; i++) {
  2438. prod = RX_PG_RING_IDX(hw_prod);
  2439. prod_rx_pg = &rxr->rx_pg_ring[prod];
  2440. cons_rx_pg = &rxr->rx_pg_ring[cons];
  2441. cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
  2442. prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  2443. if (prod != cons) {
  2444. prod_rx_pg->page = cons_rx_pg->page;
  2445. cons_rx_pg->page = NULL;
  2446. dma_unmap_addr_set(prod_rx_pg, mapping,
  2447. dma_unmap_addr(cons_rx_pg, mapping));
  2448. prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
  2449. prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
  2450. }
  2451. cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
  2452. hw_prod = NEXT_RX_BD(hw_prod);
  2453. }
  2454. rxr->rx_pg_prod = hw_prod;
  2455. rxr->rx_pg_cons = cons;
  2456. }
  2457. static inline void
  2458. bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
  2459. struct sk_buff *skb, u16 cons, u16 prod)
  2460. {
  2461. struct sw_bd *cons_rx_buf, *prod_rx_buf;
  2462. struct rx_bd *cons_bd, *prod_bd;
  2463. cons_rx_buf = &rxr->rx_buf_ring[cons];
  2464. prod_rx_buf = &rxr->rx_buf_ring[prod];
  2465. dma_sync_single_for_device(&bp->pdev->dev,
  2466. dma_unmap_addr(cons_rx_buf, mapping),
  2467. BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
  2468. rxr->rx_prod_bseq += bp->rx_buf_use_size;
  2469. prod_rx_buf->skb = skb;
  2470. prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
  2471. if (cons == prod)
  2472. return;
  2473. dma_unmap_addr_set(prod_rx_buf, mapping,
  2474. dma_unmap_addr(cons_rx_buf, mapping));
  2475. cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
  2476. prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  2477. prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
  2478. prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
  2479. }
  2480. static int
  2481. bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
  2482. unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
  2483. u32 ring_idx)
  2484. {
  2485. int err;
  2486. u16 prod = ring_idx & 0xffff;
  2487. err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
  2488. if (unlikely(err)) {
  2489. bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
  2490. if (hdr_len) {
  2491. unsigned int raw_len = len + 4;
  2492. int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
  2493. bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
  2494. }
  2495. return err;
  2496. }
  2497. skb_reserve(skb, BNX2_RX_OFFSET);
  2498. dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
  2499. PCI_DMA_FROMDEVICE);
  2500. if (hdr_len == 0) {
  2501. skb_put(skb, len);
  2502. return 0;
  2503. } else {
  2504. unsigned int i, frag_len, frag_size, pages;
  2505. struct sw_pg *rx_pg;
  2506. u16 pg_cons = rxr->rx_pg_cons;
  2507. u16 pg_prod = rxr->rx_pg_prod;
  2508. frag_size = len + 4 - hdr_len;
  2509. pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
  2510. skb_put(skb, hdr_len);
  2511. for (i = 0; i < pages; i++) {
  2512. dma_addr_t mapping_old;
  2513. frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
  2514. if (unlikely(frag_len <= 4)) {
  2515. unsigned int tail = 4 - frag_len;
  2516. rxr->rx_pg_cons = pg_cons;
  2517. rxr->rx_pg_prod = pg_prod;
  2518. bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
  2519. pages - i);
  2520. skb->len -= tail;
  2521. if (i == 0) {
  2522. skb->tail -= tail;
  2523. } else {
  2524. skb_frag_t *frag =
  2525. &skb_shinfo(skb)->frags[i - 1];
  2526. frag->size -= tail;
  2527. skb->data_len -= tail;
  2528. skb->truesize -= tail;
  2529. }
  2530. return 0;
  2531. }
  2532. rx_pg = &rxr->rx_pg_ring[pg_cons];
  2533. /* Don't unmap yet. If we're unable to allocate a new
  2534. * page, we need to recycle the page and the DMA addr.
  2535. */
  2536. mapping_old = dma_unmap_addr(rx_pg, mapping);
  2537. if (i == pages - 1)
  2538. frag_len -= 4;
  2539. skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
  2540. rx_pg->page = NULL;
  2541. err = bnx2_alloc_rx_page(bp, rxr,
  2542. RX_PG_RING_IDX(pg_prod),
  2543. GFP_ATOMIC);
  2544. if (unlikely(err)) {
  2545. rxr->rx_pg_cons = pg_cons;
  2546. rxr->rx_pg_prod = pg_prod;
  2547. bnx2_reuse_rx_skb_pages(bp, rxr, skb,
  2548. pages - i);
  2549. return err;
  2550. }
  2551. dma_unmap_page(&bp->pdev->dev, mapping_old,
  2552. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  2553. frag_size -= frag_len;
  2554. skb->data_len += frag_len;
  2555. skb->truesize += frag_len;
  2556. skb->len += frag_len;
  2557. pg_prod = NEXT_RX_BD(pg_prod);
  2558. pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
  2559. }
  2560. rxr->rx_pg_prod = pg_prod;
  2561. rxr->rx_pg_cons = pg_cons;
  2562. }
  2563. return 0;
  2564. }
  2565. static inline u16
  2566. bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
  2567. {
  2568. u16 cons;
  2569. /* Tell compiler that status block fields can change. */
  2570. barrier();
  2571. cons = *bnapi->hw_rx_cons_ptr;
  2572. barrier();
  2573. if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
  2574. cons++;
  2575. return cons;
  2576. }
  2577. static int
  2578. bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
  2579. {
  2580. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  2581. u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
  2582. struct l2_fhdr *rx_hdr;
  2583. int rx_pkt = 0, pg_ring_used = 0;
  2584. hw_cons = bnx2_get_hw_rx_cons(bnapi);
  2585. sw_cons = rxr->rx_cons;
  2586. sw_prod = rxr->rx_prod;
  2587. /* Memory barrier necessary as speculative reads of the rx
  2588. * buffer can be ahead of the index in the status block
  2589. */
  2590. rmb();
  2591. while (sw_cons != hw_cons) {
  2592. unsigned int len, hdr_len;
  2593. u32 status;
  2594. struct sw_bd *rx_buf, *next_rx_buf;
  2595. struct sk_buff *skb;
  2596. dma_addr_t dma_addr;
  2597. u16 vtag = 0;
  2598. int hw_vlan __maybe_unused = 0;
  2599. sw_ring_cons = RX_RING_IDX(sw_cons);
  2600. sw_ring_prod = RX_RING_IDX(sw_prod);
  2601. rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
  2602. skb = rx_buf->skb;
  2603. prefetchw(skb);
  2604. next_rx_buf =
  2605. &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
  2606. prefetch(next_rx_buf->desc);
  2607. rx_buf->skb = NULL;
  2608. dma_addr = dma_unmap_addr(rx_buf, mapping);
  2609. dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
  2610. BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
  2611. PCI_DMA_FROMDEVICE);
  2612. rx_hdr = rx_buf->desc;
  2613. len = rx_hdr->l2_fhdr_pkt_len;
  2614. status = rx_hdr->l2_fhdr_status;
  2615. hdr_len = 0;
  2616. if (status & L2_FHDR_STATUS_SPLIT) {
  2617. hdr_len = rx_hdr->l2_fhdr_ip_xsum;
  2618. pg_ring_used = 1;
  2619. } else if (len > bp->rx_jumbo_thresh) {
  2620. hdr_len = bp->rx_jumbo_thresh;
  2621. pg_ring_used = 1;
  2622. }
  2623. if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
  2624. L2_FHDR_ERRORS_PHY_DECODE |
  2625. L2_FHDR_ERRORS_ALIGNMENT |
  2626. L2_FHDR_ERRORS_TOO_SHORT |
  2627. L2_FHDR_ERRORS_GIANT_FRAME))) {
  2628. bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
  2629. sw_ring_prod);
  2630. if (pg_ring_used) {
  2631. int pages;
  2632. pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
  2633. bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
  2634. }
  2635. goto next_rx;
  2636. }
  2637. len -= 4;
  2638. if (len <= bp->rx_copy_thresh) {
  2639. struct sk_buff *new_skb;
  2640. new_skb = netdev_alloc_skb(bp->dev, len + 6);
  2641. if (new_skb == NULL) {
  2642. bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
  2643. sw_ring_prod);
  2644. goto next_rx;
  2645. }
  2646. /* aligned copy */
  2647. skb_copy_from_linear_data_offset(skb,
  2648. BNX2_RX_OFFSET - 6,
  2649. new_skb->data, len + 6);
  2650. skb_reserve(new_skb, 6);
  2651. skb_put(new_skb, len);
  2652. bnx2_reuse_rx_skb(bp, rxr, skb,
  2653. sw_ring_cons, sw_ring_prod);
  2654. skb = new_skb;
  2655. } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
  2656. dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
  2657. goto next_rx;
  2658. if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
  2659. !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
  2660. vtag = rx_hdr->l2_fhdr_vlan_tag;
  2661. #ifdef BCM_VLAN
  2662. if (bp->vlgrp)
  2663. hw_vlan = 1;
  2664. else
  2665. #endif
  2666. {
  2667. struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
  2668. __skb_push(skb, 4);
  2669. memmove(ve, skb->data + 4, ETH_ALEN * 2);
  2670. ve->h_vlan_proto = htons(ETH_P_8021Q);
  2671. ve->h_vlan_TCI = htons(vtag);
  2672. len += 4;
  2673. }
  2674. }
  2675. skb->protocol = eth_type_trans(skb, bp->dev);
  2676. if ((len > (bp->dev->mtu + ETH_HLEN)) &&
  2677. (ntohs(skb->protocol) != 0x8100)) {
  2678. dev_kfree_skb(skb);
  2679. goto next_rx;
  2680. }
  2681. skb->ip_summed = CHECKSUM_NONE;
  2682. if (bp->rx_csum &&
  2683. (status & (L2_FHDR_STATUS_TCP_SEGMENT |
  2684. L2_FHDR_STATUS_UDP_DATAGRAM))) {
  2685. if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
  2686. L2_FHDR_ERRORS_UDP_XSUM)) == 0))
  2687. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2688. }
  2689. if ((bp->dev->features & NETIF_F_RXHASH) &&
  2690. ((status & L2_FHDR_STATUS_USE_RXHASH) ==
  2691. L2_FHDR_STATUS_USE_RXHASH))
  2692. skb->rxhash = rx_hdr->l2_fhdr_hash;
  2693. skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
  2694. #ifdef BCM_VLAN
  2695. if (hw_vlan)
  2696. vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
  2697. else
  2698. #endif
  2699. napi_gro_receive(&bnapi->napi, skb);
  2700. rx_pkt++;
  2701. next_rx:
  2702. sw_cons = NEXT_RX_BD(sw_cons);
  2703. sw_prod = NEXT_RX_BD(sw_prod);
  2704. if ((rx_pkt == budget))
  2705. break;
  2706. /* Refresh hw_cons to see if there is new work */
  2707. if (sw_cons == hw_cons) {
  2708. hw_cons = bnx2_get_hw_rx_cons(bnapi);
  2709. rmb();
  2710. }
  2711. }
  2712. rxr->rx_cons = sw_cons;
  2713. rxr->rx_prod = sw_prod;
  2714. if (pg_ring_used)
  2715. REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
  2716. REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
  2717. REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
  2718. mmiowb();
  2719. return rx_pkt;
  2720. }
  2721. /* MSI ISR - The only difference between this and the INTx ISR
  2722. * is that the MSI interrupt is always serviced.
  2723. */
  2724. static irqreturn_t
  2725. bnx2_msi(int irq, void *dev_instance)
  2726. {
  2727. struct bnx2_napi *bnapi = dev_instance;
  2728. struct bnx2 *bp = bnapi->bp;
  2729. prefetch(bnapi->status_blk.msi);
  2730. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2731. BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
  2732. BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  2733. /* Return here if interrupt is disabled. */
  2734. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  2735. return IRQ_HANDLED;
  2736. napi_schedule(&bnapi->napi);
  2737. return IRQ_HANDLED;
  2738. }
  2739. static irqreturn_t
  2740. bnx2_msi_1shot(int irq, void *dev_instance)
  2741. {
  2742. struct bnx2_napi *bnapi = dev_instance;
  2743. struct bnx2 *bp = bnapi->bp;
  2744. prefetch(bnapi->status_blk.msi);
  2745. /* Return here if interrupt is disabled. */
  2746. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  2747. return IRQ_HANDLED;
  2748. napi_schedule(&bnapi->napi);
  2749. return IRQ_HANDLED;
  2750. }
  2751. static irqreturn_t
  2752. bnx2_interrupt(int irq, void *dev_instance)
  2753. {
  2754. struct bnx2_napi *bnapi = dev_instance;
  2755. struct bnx2 *bp = bnapi->bp;
  2756. struct status_block *sblk = bnapi->status_blk.msi;
  2757. /* When using INTx, it is possible for the interrupt to arrive
  2758. * at the CPU before the status block posted prior to the
  2759. * interrupt. Reading a register will flush the status block.
  2760. * When using MSI, the MSI message will always complete after
  2761. * the status block write.
  2762. */
  2763. if ((sblk->status_idx == bnapi->last_status_idx) &&
  2764. (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
  2765. BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
  2766. return IRQ_NONE;
  2767. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2768. BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
  2769. BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  2770. /* Read back to deassert IRQ immediately to avoid too many
  2771. * spurious interrupts.
  2772. */
  2773. REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
  2774. /* Return here if interrupt is shared and is disabled. */
  2775. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  2776. return IRQ_HANDLED;
  2777. if (napi_schedule_prep(&bnapi->napi)) {
  2778. bnapi->last_status_idx = sblk->status_idx;
  2779. __napi_schedule(&bnapi->napi);
  2780. }
  2781. return IRQ_HANDLED;
  2782. }
  2783. static inline int
  2784. bnx2_has_fast_work(struct bnx2_napi *bnapi)
  2785. {
  2786. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  2787. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  2788. if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
  2789. (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
  2790. return 1;
  2791. return 0;
  2792. }
  2793. #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
  2794. STATUS_ATTN_BITS_TIMER_ABORT)
  2795. static inline int
  2796. bnx2_has_work(struct bnx2_napi *bnapi)
  2797. {
  2798. struct status_block *sblk = bnapi->status_blk.msi;
  2799. if (bnx2_has_fast_work(bnapi))
  2800. return 1;
  2801. #ifdef BCM_CNIC
  2802. if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
  2803. return 1;
  2804. #endif
  2805. if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
  2806. (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
  2807. return 1;
  2808. return 0;
  2809. }
  2810. static void
  2811. bnx2_chk_missed_msi(struct bnx2 *bp)
  2812. {
  2813. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  2814. u32 msi_ctrl;
  2815. if (bnx2_has_work(bnapi)) {
  2816. msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
  2817. if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
  2818. return;
  2819. if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
  2820. REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
  2821. ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
  2822. REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
  2823. bnx2_msi(bp->irq_tbl[0].vector, bnapi);
  2824. }
  2825. }
  2826. bp->idle_chk_status_idx = bnapi->last_status_idx;
  2827. }
  2828. #ifdef BCM_CNIC
  2829. static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
  2830. {
  2831. struct cnic_ops *c_ops;
  2832. if (!bnapi->cnic_present)
  2833. return;
  2834. rcu_read_lock();
  2835. c_ops = rcu_dereference(bp->cnic_ops);
  2836. if (c_ops)
  2837. bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
  2838. bnapi->status_blk.msi);
  2839. rcu_read_unlock();
  2840. }
  2841. #endif
  2842. static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
  2843. {
  2844. struct status_block *sblk = bnapi->status_blk.msi;
  2845. u32 status_attn_bits = sblk->status_attn_bits;
  2846. u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
  2847. if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
  2848. (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
  2849. bnx2_phy_int(bp, bnapi);
  2850. /* This is needed to take care of transient status
  2851. * during link changes.
  2852. */
  2853. REG_WR(bp, BNX2_HC_COMMAND,
  2854. bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
  2855. REG_RD(bp, BNX2_HC_COMMAND);
  2856. }
  2857. }
  2858. static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
  2859. int work_done, int budget)
  2860. {
  2861. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  2862. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  2863. if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
  2864. bnx2_tx_int(bp, bnapi, 0);
  2865. if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
  2866. work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
  2867. return work_done;
  2868. }
  2869. static int bnx2_poll_msix(struct napi_struct *napi, int budget)
  2870. {
  2871. struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
  2872. struct bnx2 *bp = bnapi->bp;
  2873. int work_done = 0;
  2874. struct status_block_msix *sblk = bnapi->status_blk.msix;
  2875. while (1) {
  2876. work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
  2877. if (unlikely(work_done >= budget))
  2878. break;
  2879. bnapi->last_status_idx = sblk->status_idx;
  2880. /* status idx must be read before checking for more work. */
  2881. rmb();
  2882. if (likely(!bnx2_has_fast_work(bnapi))) {
  2883. napi_complete(napi);
  2884. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  2885. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2886. bnapi->last_status_idx);
  2887. break;
  2888. }
  2889. }
  2890. return work_done;
  2891. }
  2892. static int bnx2_poll(struct napi_struct *napi, int budget)
  2893. {
  2894. struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
  2895. struct bnx2 *bp = bnapi->bp;
  2896. int work_done = 0;
  2897. struct status_block *sblk = bnapi->status_blk.msi;
  2898. while (1) {
  2899. bnx2_poll_link(bp, bnapi);
  2900. work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
  2901. #ifdef BCM_CNIC
  2902. bnx2_poll_cnic(bp, bnapi);
  2903. #endif
  2904. /* bnapi->last_status_idx is used below to tell the hw how
  2905. * much work has been processed, so we must read it before
  2906. * checking for more work.
  2907. */
  2908. bnapi->last_status_idx = sblk->status_idx;
  2909. if (unlikely(work_done >= budget))
  2910. break;
  2911. rmb();
  2912. if (likely(!bnx2_has_work(bnapi))) {
  2913. napi_complete(napi);
  2914. if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
  2915. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2916. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2917. bnapi->last_status_idx);
  2918. break;
  2919. }
  2920. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2921. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2922. BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
  2923. bnapi->last_status_idx);
  2924. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2925. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2926. bnapi->last_status_idx);
  2927. break;
  2928. }
  2929. }
  2930. return work_done;
  2931. }
  2932. /* Called with rtnl_lock from vlan functions and also netif_tx_lock
  2933. * from set_multicast.
  2934. */
  2935. static void
  2936. bnx2_set_rx_mode(struct net_device *dev)
  2937. {
  2938. struct bnx2 *bp = netdev_priv(dev);
  2939. u32 rx_mode, sort_mode;
  2940. struct netdev_hw_addr *ha;
  2941. int i;
  2942. if (!netif_running(dev))
  2943. return;
  2944. spin_lock_bh(&bp->phy_lock);
  2945. rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
  2946. BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
  2947. sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
  2948. #ifdef BCM_VLAN
  2949. if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
  2950. rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
  2951. #else
  2952. if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
  2953. rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
  2954. #endif
  2955. if (dev->flags & IFF_PROMISC) {
  2956. /* Promiscuous mode. */
  2957. rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
  2958. sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
  2959. BNX2_RPM_SORT_USER0_PROM_VLAN;
  2960. }
  2961. else if (dev->flags & IFF_ALLMULTI) {
  2962. for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
  2963. REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
  2964. 0xffffffff);
  2965. }
  2966. sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
  2967. }
  2968. else {
  2969. /* Accept one or more multicast(s). */
  2970. u32 mc_filter[NUM_MC_HASH_REGISTERS];
  2971. u32 regidx;
  2972. u32 bit;
  2973. u32 crc;
  2974. memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
  2975. netdev_for_each_mc_addr(ha, dev) {
  2976. crc = ether_crc_le(ETH_ALEN, ha->addr);
  2977. bit = crc & 0xff;
  2978. regidx = (bit & 0xe0) >> 5;
  2979. bit &= 0x1f;
  2980. mc_filter[regidx] |= (1 << bit);
  2981. }
  2982. for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
  2983. REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
  2984. mc_filter[i]);
  2985. }
  2986. sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
  2987. }
  2988. if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
  2989. rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
  2990. sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
  2991. BNX2_RPM_SORT_USER0_PROM_VLAN;
  2992. } else if (!(dev->flags & IFF_PROMISC)) {
  2993. /* Add all entries into to the match filter list */
  2994. i = 0;
  2995. netdev_for_each_uc_addr(ha, dev) {
  2996. bnx2_set_mac_addr(bp, ha->addr,
  2997. i + BNX2_START_UNICAST_ADDRESS_INDEX);
  2998. sort_mode |= (1 <<
  2999. (i + BNX2_START_UNICAST_ADDRESS_INDEX));
  3000. i++;
  3001. }
  3002. }
  3003. if (rx_mode != bp->rx_mode) {
  3004. bp->rx_mode = rx_mode;
  3005. REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
  3006. }
  3007. REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
  3008. REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
  3009. REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
  3010. spin_unlock_bh(&bp->phy_lock);
  3011. }
  3012. static int __devinit
  3013. check_fw_section(const struct firmware *fw,
  3014. const struct bnx2_fw_file_section *section,
  3015. u32 alignment, bool non_empty)
  3016. {
  3017. u32 offset = be32_to_cpu(section->offset);
  3018. u32 len = be32_to_cpu(section->len);
  3019. if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
  3020. return -EINVAL;
  3021. if ((non_empty && len == 0) || len > fw->size - offset ||
  3022. len & (alignment - 1))
  3023. return -EINVAL;
  3024. return 0;
  3025. }
  3026. static int __devinit
  3027. check_mips_fw_entry(const struct firmware *fw,
  3028. const struct bnx2_mips_fw_file_entry *entry)
  3029. {
  3030. if (check_fw_section(fw, &entry->text, 4, true) ||
  3031. check_fw_section(fw, &entry->data, 4, false) ||
  3032. check_fw_section(fw, &entry->rodata, 4, false))
  3033. return -EINVAL;
  3034. return 0;
  3035. }
  3036. static int __devinit
  3037. bnx2_request_firmware(struct bnx2 *bp)
  3038. {
  3039. const char *mips_fw_file, *rv2p_fw_file;
  3040. const struct bnx2_mips_fw_file *mips_fw;
  3041. const struct bnx2_rv2p_fw_file *rv2p_fw;
  3042. int rc;
  3043. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3044. mips_fw_file = FW_MIPS_FILE_09;
  3045. if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
  3046. (CHIP_ID(bp) == CHIP_ID_5709_A1))
  3047. rv2p_fw_file = FW_RV2P_FILE_09_Ax;
  3048. else
  3049. rv2p_fw_file = FW_RV2P_FILE_09;
  3050. } else {
  3051. mips_fw_file = FW_MIPS_FILE_06;
  3052. rv2p_fw_file = FW_RV2P_FILE_06;
  3053. }
  3054. rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
  3055. if (rc) {
  3056. pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
  3057. return rc;
  3058. }
  3059. rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
  3060. if (rc) {
  3061. pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
  3062. return rc;
  3063. }
  3064. mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
  3065. rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
  3066. if (bp->mips_firmware->size < sizeof(*mips_fw) ||
  3067. check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
  3068. check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
  3069. check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
  3070. check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
  3071. check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
  3072. pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
  3073. return -EINVAL;
  3074. }
  3075. if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
  3076. check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
  3077. check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
  3078. pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
  3079. return -EINVAL;
  3080. }
  3081. return 0;
  3082. }
  3083. static u32
  3084. rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
  3085. {
  3086. switch (idx) {
  3087. case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
  3088. rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
  3089. rv2p_code |= RV2P_BD_PAGE_SIZE;
  3090. break;
  3091. }
  3092. return rv2p_code;
  3093. }
  3094. static int
  3095. load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
  3096. const struct bnx2_rv2p_fw_file_entry *fw_entry)
  3097. {
  3098. u32 rv2p_code_len, file_offset;
  3099. __be32 *rv2p_code;
  3100. int i;
  3101. u32 val, cmd, addr;
  3102. rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
  3103. file_offset = be32_to_cpu(fw_entry->rv2p.offset);
  3104. rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
  3105. if (rv2p_proc == RV2P_PROC1) {
  3106. cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
  3107. addr = BNX2_RV2P_PROC1_ADDR_CMD;
  3108. } else {
  3109. cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
  3110. addr = BNX2_RV2P_PROC2_ADDR_CMD;
  3111. }
  3112. for (i = 0; i < rv2p_code_len; i += 8) {
  3113. REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
  3114. rv2p_code++;
  3115. REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
  3116. rv2p_code++;
  3117. val = (i / 8) | cmd;
  3118. REG_WR(bp, addr, val);
  3119. }
  3120. rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
  3121. for (i = 0; i < 8; i++) {
  3122. u32 loc, code;
  3123. loc = be32_to_cpu(fw_entry->fixup[i]);
  3124. if (loc && ((loc * 4) < rv2p_code_len)) {
  3125. code = be32_to_cpu(*(rv2p_code + loc - 1));
  3126. REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
  3127. code = be32_to_cpu(*(rv2p_code + loc));
  3128. code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
  3129. REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
  3130. val = (loc / 2) | cmd;
  3131. REG_WR(bp, addr, val);
  3132. }
  3133. }
  3134. /* Reset the processor, un-stall is done later. */
  3135. if (rv2p_proc == RV2P_PROC1) {
  3136. REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
  3137. }
  3138. else {
  3139. REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
  3140. }
  3141. return 0;
  3142. }
  3143. static int
  3144. load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
  3145. const struct bnx2_mips_fw_file_entry *fw_entry)
  3146. {
  3147. u32 addr, len, file_offset;
  3148. __be32 *data;
  3149. u32 offset;
  3150. u32 val;
  3151. /* Halt the CPU. */
  3152. val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
  3153. val |= cpu_reg->mode_value_halt;
  3154. bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
  3155. bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
  3156. /* Load the Text area. */
  3157. addr = be32_to_cpu(fw_entry->text.addr);
  3158. len = be32_to_cpu(fw_entry->text.len);
  3159. file_offset = be32_to_cpu(fw_entry->text.offset);
  3160. data = (__be32 *)(bp->mips_firmware->data + file_offset);
  3161. offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
  3162. if (len) {
  3163. int j;
  3164. for (j = 0; j < (len / 4); j++, offset += 4)
  3165. bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
  3166. }
  3167. /* Load the Data area. */
  3168. addr = be32_to_cpu(fw_entry->data.addr);
  3169. len = be32_to_cpu(fw_entry->data.len);
  3170. file_offset = be32_to_cpu(fw_entry->data.offset);
  3171. data = (__be32 *)(bp->mips_firmware->data + file_offset);
  3172. offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
  3173. if (len) {
  3174. int j;
  3175. for (j = 0; j < (len / 4); j++, offset += 4)
  3176. bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
  3177. }
  3178. /* Load the Read-Only area. */
  3179. addr = be32_to_cpu(fw_entry->rodata.addr);
  3180. len = be32_to_cpu(fw_entry->rodata.len);
  3181. file_offset = be32_to_cpu(fw_entry->rodata.offset);
  3182. data = (__be32 *)(bp->mips_firmware->data + file_offset);
  3183. offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
  3184. if (len) {
  3185. int j;
  3186. for (j = 0; j < (len / 4); j++, offset += 4)
  3187. bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
  3188. }
  3189. /* Clear the pre-fetch instruction. */
  3190. bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
  3191. val = be32_to_cpu(fw_entry->start_addr);
  3192. bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
  3193. /* Start the CPU. */
  3194. val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
  3195. val &= ~cpu_reg->mode_value_halt;
  3196. bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
  3197. bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
  3198. return 0;
  3199. }
  3200. static int
  3201. bnx2_init_cpus(struct bnx2 *bp)
  3202. {
  3203. const struct bnx2_mips_fw_file *mips_fw =
  3204. (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
  3205. const struct bnx2_rv2p_fw_file *rv2p_fw =
  3206. (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
  3207. int rc;
  3208. /* Initialize the RV2P processor. */
  3209. load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
  3210. load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
  3211. /* Initialize the RX Processor. */
  3212. rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
  3213. if (rc)
  3214. goto init_cpu_err;
  3215. /* Initialize the TX Processor. */
  3216. rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
  3217. if (rc)
  3218. goto init_cpu_err;
  3219. /* Initialize the TX Patch-up Processor. */
  3220. rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
  3221. if (rc)
  3222. goto init_cpu_err;
  3223. /* Initialize the Completion Processor. */
  3224. rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
  3225. if (rc)
  3226. goto init_cpu_err;
  3227. /* Initialize the Command Processor. */
  3228. rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
  3229. init_cpu_err:
  3230. return rc;
  3231. }
  3232. static int
  3233. bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
  3234. {
  3235. u16 pmcsr;
  3236. pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
  3237. switch (state) {
  3238. case PCI_D0: {
  3239. u32 val;
  3240. pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
  3241. (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
  3242. PCI_PM_CTRL_PME_STATUS);
  3243. if (pmcsr & PCI_PM_CTRL_STATE_MASK)
  3244. /* delay required during transition out of D3hot */
  3245. msleep(20);
  3246. val = REG_RD(bp, BNX2_EMAC_MODE);
  3247. val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
  3248. val &= ~BNX2_EMAC_MODE_MPKT;
  3249. REG_WR(bp, BNX2_EMAC_MODE, val);
  3250. val = REG_RD(bp, BNX2_RPM_CONFIG);
  3251. val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
  3252. REG_WR(bp, BNX2_RPM_CONFIG, val);
  3253. break;
  3254. }
  3255. case PCI_D3hot: {
  3256. int i;
  3257. u32 val, wol_msg;
  3258. if (bp->wol) {
  3259. u32 advertising;
  3260. u8 autoneg;
  3261. autoneg = bp->autoneg;
  3262. advertising = bp->advertising;
  3263. if (bp->phy_port == PORT_TP) {
  3264. bp->autoneg = AUTONEG_SPEED;
  3265. bp->advertising = ADVERTISED_10baseT_Half |
  3266. ADVERTISED_10baseT_Full |
  3267. ADVERTISED_100baseT_Half |
  3268. ADVERTISED_100baseT_Full |
  3269. ADVERTISED_Autoneg;
  3270. }
  3271. spin_lock_bh(&bp->phy_lock);
  3272. bnx2_setup_phy(bp, bp->phy_port);
  3273. spin_unlock_bh(&bp->phy_lock);
  3274. bp->autoneg = autoneg;
  3275. bp->advertising = advertising;
  3276. bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
  3277. val = REG_RD(bp, BNX2_EMAC_MODE);
  3278. /* Enable port mode. */
  3279. val &= ~BNX2_EMAC_MODE_PORT;
  3280. val |= BNX2_EMAC_MODE_MPKT_RCVD |
  3281. BNX2_EMAC_MODE_ACPI_RCVD |
  3282. BNX2_EMAC_MODE_MPKT;
  3283. if (bp->phy_port == PORT_TP)
  3284. val |= BNX2_EMAC_MODE_PORT_MII;
  3285. else {
  3286. val |= BNX2_EMAC_MODE_PORT_GMII;
  3287. if (bp->line_speed == SPEED_2500)
  3288. val |= BNX2_EMAC_MODE_25G_MODE;
  3289. }
  3290. REG_WR(bp, BNX2_EMAC_MODE, val);
  3291. /* receive all multicast */
  3292. for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
  3293. REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
  3294. 0xffffffff);
  3295. }
  3296. REG_WR(bp, BNX2_EMAC_RX_MODE,
  3297. BNX2_EMAC_RX_MODE_SORT_MODE);
  3298. val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
  3299. BNX2_RPM_SORT_USER0_MC_EN;
  3300. REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
  3301. REG_WR(bp, BNX2_RPM_SORT_USER0, val);
  3302. REG_WR(bp, BNX2_RPM_SORT_USER0, val |
  3303. BNX2_RPM_SORT_USER0_ENA);
  3304. /* Need to enable EMAC and RPM for WOL. */
  3305. REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
  3306. BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
  3307. BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
  3308. BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
  3309. val = REG_RD(bp, BNX2_RPM_CONFIG);
  3310. val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
  3311. REG_WR(bp, BNX2_RPM_CONFIG, val);
  3312. wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
  3313. }
  3314. else {
  3315. wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
  3316. }
  3317. if (!(bp->flags & BNX2_FLAG_NO_WOL))
  3318. bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
  3319. 1, 0);
  3320. pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
  3321. if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
  3322. (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
  3323. if (bp->wol)
  3324. pmcsr |= 3;
  3325. }
  3326. else {
  3327. pmcsr |= 3;
  3328. }
  3329. if (bp->wol) {
  3330. pmcsr |= PCI_PM_CTRL_PME_ENABLE;
  3331. }
  3332. pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
  3333. pmcsr);
  3334. /* No more memory access after this point until
  3335. * device is brought back to D0.
  3336. */
  3337. udelay(50);
  3338. break;
  3339. }
  3340. default:
  3341. return -EINVAL;
  3342. }
  3343. return 0;
  3344. }
  3345. static int
  3346. bnx2_acquire_nvram_lock(struct bnx2 *bp)
  3347. {
  3348. u32 val;
  3349. int j;
  3350. /* Request access to the flash interface. */
  3351. REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
  3352. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3353. val = REG_RD(bp, BNX2_NVM_SW_ARB);
  3354. if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
  3355. break;
  3356. udelay(5);
  3357. }
  3358. if (j >= NVRAM_TIMEOUT_COUNT)
  3359. return -EBUSY;
  3360. return 0;
  3361. }
  3362. static int
  3363. bnx2_release_nvram_lock(struct bnx2 *bp)
  3364. {
  3365. int j;
  3366. u32 val;
  3367. /* Relinquish nvram interface. */
  3368. REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
  3369. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3370. val = REG_RD(bp, BNX2_NVM_SW_ARB);
  3371. if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
  3372. break;
  3373. udelay(5);
  3374. }
  3375. if (j >= NVRAM_TIMEOUT_COUNT)
  3376. return -EBUSY;
  3377. return 0;
  3378. }
  3379. static int
  3380. bnx2_enable_nvram_write(struct bnx2 *bp)
  3381. {
  3382. u32 val;
  3383. val = REG_RD(bp, BNX2_MISC_CFG);
  3384. REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
  3385. if (bp->flash_info->flags & BNX2_NV_WREN) {
  3386. int j;
  3387. REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3388. REG_WR(bp, BNX2_NVM_COMMAND,
  3389. BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
  3390. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3391. udelay(5);
  3392. val = REG_RD(bp, BNX2_NVM_COMMAND);
  3393. if (val & BNX2_NVM_COMMAND_DONE)
  3394. break;
  3395. }
  3396. if (j >= NVRAM_TIMEOUT_COUNT)
  3397. return -EBUSY;
  3398. }
  3399. return 0;
  3400. }
  3401. static void
  3402. bnx2_disable_nvram_write(struct bnx2 *bp)
  3403. {
  3404. u32 val;
  3405. val = REG_RD(bp, BNX2_MISC_CFG);
  3406. REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
  3407. }
  3408. static void
  3409. bnx2_enable_nvram_access(struct bnx2 *bp)
  3410. {
  3411. u32 val;
  3412. val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
  3413. /* Enable both bits, even on read. */
  3414. REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
  3415. val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
  3416. }
  3417. static void
  3418. bnx2_disable_nvram_access(struct bnx2 *bp)
  3419. {
  3420. u32 val;
  3421. val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
  3422. /* Disable both bits, even after read. */
  3423. REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
  3424. val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
  3425. BNX2_NVM_ACCESS_ENABLE_WR_EN));
  3426. }
  3427. static int
  3428. bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
  3429. {
  3430. u32 cmd;
  3431. int j;
  3432. if (bp->flash_info->flags & BNX2_NV_BUFFERED)
  3433. /* Buffered flash, no erase needed */
  3434. return 0;
  3435. /* Build an erase command */
  3436. cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
  3437. BNX2_NVM_COMMAND_DOIT;
  3438. /* Need to clear DONE bit separately. */
  3439. REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3440. /* Address of the NVRAM to read from. */
  3441. REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
  3442. /* Issue an erase command. */
  3443. REG_WR(bp, BNX2_NVM_COMMAND, cmd);
  3444. /* Wait for completion. */
  3445. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3446. u32 val;
  3447. udelay(5);
  3448. val = REG_RD(bp, BNX2_NVM_COMMAND);
  3449. if (val & BNX2_NVM_COMMAND_DONE)
  3450. break;
  3451. }
  3452. if (j >= NVRAM_TIMEOUT_COUNT)
  3453. return -EBUSY;
  3454. return 0;
  3455. }
  3456. static int
  3457. bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
  3458. {
  3459. u32 cmd;
  3460. int j;
  3461. /* Build the command word. */
  3462. cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
  3463. /* Calculate an offset of a buffered flash, not needed for 5709. */
  3464. if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
  3465. offset = ((offset / bp->flash_info->page_size) <<
  3466. bp->flash_info->page_bits) +
  3467. (offset % bp->flash_info->page_size);
  3468. }
  3469. /* Need to clear DONE bit separately. */
  3470. REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3471. /* Address of the NVRAM to read from. */
  3472. REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
  3473. /* Issue a read command. */
  3474. REG_WR(bp, BNX2_NVM_COMMAND, cmd);
  3475. /* Wait for completion. */
  3476. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3477. u32 val;
  3478. udelay(5);
  3479. val = REG_RD(bp, BNX2_NVM_COMMAND);
  3480. if (val & BNX2_NVM_COMMAND_DONE) {
  3481. __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
  3482. memcpy(ret_val, &v, 4);
  3483. break;
  3484. }
  3485. }
  3486. if (j >= NVRAM_TIMEOUT_COUNT)
  3487. return -EBUSY;
  3488. return 0;
  3489. }
  3490. static int
  3491. bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
  3492. {
  3493. u32 cmd;
  3494. __be32 val32;
  3495. int j;
  3496. /* Build the command word. */
  3497. cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
  3498. /* Calculate an offset of a buffered flash, not needed for 5709. */
  3499. if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
  3500. offset = ((offset / bp->flash_info->page_size) <<
  3501. bp->flash_info->page_bits) +
  3502. (offset % bp->flash_info->page_size);
  3503. }
  3504. /* Need to clear DONE bit separately. */
  3505. REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3506. memcpy(&val32, val, 4);
  3507. /* Write the data. */
  3508. REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
  3509. /* Address of the NVRAM to write to. */
  3510. REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
  3511. /* Issue the write command. */
  3512. REG_WR(bp, BNX2_NVM_COMMAND, cmd);
  3513. /* Wait for completion. */
  3514. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3515. udelay(5);
  3516. if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
  3517. break;
  3518. }
  3519. if (j >= NVRAM_TIMEOUT_COUNT)
  3520. return -EBUSY;
  3521. return 0;
  3522. }
  3523. static int
  3524. bnx2_init_nvram(struct bnx2 *bp)
  3525. {
  3526. u32 val;
  3527. int j, entry_count, rc = 0;
  3528. const struct flash_spec *flash;
  3529. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3530. bp->flash_info = &flash_5709;
  3531. goto get_flash_size;
  3532. }
  3533. /* Determine the selected interface. */
  3534. val = REG_RD(bp, BNX2_NVM_CFG1);
  3535. entry_count = ARRAY_SIZE(flash_table);
  3536. if (val & 0x40000000) {
  3537. /* Flash interface has been reconfigured */
  3538. for (j = 0, flash = &flash_table[0]; j < entry_count;
  3539. j++, flash++) {
  3540. if ((val & FLASH_BACKUP_STRAP_MASK) ==
  3541. (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
  3542. bp->flash_info = flash;
  3543. break;
  3544. }
  3545. }
  3546. }
  3547. else {
  3548. u32 mask;
  3549. /* Not yet been reconfigured */
  3550. if (val & (1 << 23))
  3551. mask = FLASH_BACKUP_STRAP_MASK;
  3552. else
  3553. mask = FLASH_STRAP_MASK;
  3554. for (j = 0, flash = &flash_table[0]; j < entry_count;
  3555. j++, flash++) {
  3556. if ((val & mask) == (flash->strapping & mask)) {
  3557. bp->flash_info = flash;
  3558. /* Request access to the flash interface. */
  3559. if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
  3560. return rc;
  3561. /* Enable access to flash interface */
  3562. bnx2_enable_nvram_access(bp);
  3563. /* Reconfigure the flash interface */
  3564. REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
  3565. REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
  3566. REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
  3567. REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
  3568. /* Disable access to flash interface */
  3569. bnx2_disable_nvram_access(bp);
  3570. bnx2_release_nvram_lock(bp);
  3571. break;
  3572. }
  3573. }
  3574. } /* if (val & 0x40000000) */
  3575. if (j == entry_count) {
  3576. bp->flash_info = NULL;
  3577. pr_alert("Unknown flash/EEPROM type\n");
  3578. return -ENODEV;
  3579. }
  3580. get_flash_size:
  3581. val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
  3582. val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
  3583. if (val)
  3584. bp->flash_size = val;
  3585. else
  3586. bp->flash_size = bp->flash_info->total_size;
  3587. return rc;
  3588. }
  3589. static int
  3590. bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
  3591. int buf_size)
  3592. {
  3593. int rc = 0;
  3594. u32 cmd_flags, offset32, len32, extra;
  3595. if (buf_size == 0)
  3596. return 0;
  3597. /* Request access to the flash interface. */
  3598. if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
  3599. return rc;
  3600. /* Enable access to flash interface */
  3601. bnx2_enable_nvram_access(bp);
  3602. len32 = buf_size;
  3603. offset32 = offset;
  3604. extra = 0;
  3605. cmd_flags = 0;
  3606. if (offset32 & 3) {
  3607. u8 buf[4];
  3608. u32 pre_len;
  3609. offset32 &= ~3;
  3610. pre_len = 4 - (offset & 3);
  3611. if (pre_len >= len32) {
  3612. pre_len = len32;
  3613. cmd_flags = BNX2_NVM_COMMAND_FIRST |
  3614. BNX2_NVM_COMMAND_LAST;
  3615. }
  3616. else {
  3617. cmd_flags = BNX2_NVM_COMMAND_FIRST;
  3618. }
  3619. rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
  3620. if (rc)
  3621. return rc;
  3622. memcpy(ret_buf, buf + (offset & 3), pre_len);
  3623. offset32 += 4;
  3624. ret_buf += pre_len;
  3625. len32 -= pre_len;
  3626. }
  3627. if (len32 & 3) {
  3628. extra = 4 - (len32 & 3);
  3629. len32 = (len32 + 4) & ~3;
  3630. }
  3631. if (len32 == 4) {
  3632. u8 buf[4];
  3633. if (cmd_flags)
  3634. cmd_flags = BNX2_NVM_COMMAND_LAST;
  3635. else
  3636. cmd_flags = BNX2_NVM_COMMAND_FIRST |
  3637. BNX2_NVM_COMMAND_LAST;
  3638. rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
  3639. memcpy(ret_buf, buf, 4 - extra);
  3640. }
  3641. else if (len32 > 0) {
  3642. u8 buf[4];
  3643. /* Read the first word. */
  3644. if (cmd_flags)
  3645. cmd_flags = 0;
  3646. else
  3647. cmd_flags = BNX2_NVM_COMMAND_FIRST;
  3648. rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
  3649. /* Advance to the next dword. */
  3650. offset32 += 4;
  3651. ret_buf += 4;
  3652. len32 -= 4;
  3653. while (len32 > 4 && rc == 0) {
  3654. rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
  3655. /* Advance to the next dword. */
  3656. offset32 += 4;
  3657. ret_buf += 4;
  3658. len32 -= 4;
  3659. }
  3660. if (rc)
  3661. return rc;
  3662. cmd_flags = BNX2_NVM_COMMAND_LAST;
  3663. rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
  3664. memcpy(ret_buf, buf, 4 - extra);
  3665. }
  3666. /* Disable access to flash interface */
  3667. bnx2_disable_nvram_access(bp);
  3668. bnx2_release_nvram_lock(bp);
  3669. return rc;
  3670. }
  3671. static int
  3672. bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
  3673. int buf_size)
  3674. {
  3675. u32 written, offset32, len32;
  3676. u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
  3677. int rc = 0;
  3678. int align_start, align_end;
  3679. buf = data_buf;
  3680. offset32 = offset;
  3681. len32 = buf_size;
  3682. align_start = align_end = 0;
  3683. if ((align_start = (offset32 & 3))) {
  3684. offset32 &= ~3;
  3685. len32 += align_start;
  3686. if (len32 < 4)
  3687. len32 = 4;
  3688. if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
  3689. return rc;
  3690. }
  3691. if (len32 & 3) {
  3692. align_end = 4 - (len32 & 3);
  3693. len32 += align_end;
  3694. if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
  3695. return rc;
  3696. }
  3697. if (align_start || align_end) {
  3698. align_buf = kmalloc(len32, GFP_KERNEL);
  3699. if (align_buf == NULL)
  3700. return -ENOMEM;
  3701. if (align_start) {
  3702. memcpy(align_buf, start, 4);
  3703. }
  3704. if (align_end) {
  3705. memcpy(align_buf + len32 - 4, end, 4);
  3706. }
  3707. memcpy(align_buf + align_start, data_buf, buf_size);
  3708. buf = align_buf;
  3709. }
  3710. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3711. flash_buffer = kmalloc(264, GFP_KERNEL);
  3712. if (flash_buffer == NULL) {
  3713. rc = -ENOMEM;
  3714. goto nvram_write_end;
  3715. }
  3716. }
  3717. written = 0;
  3718. while ((written < len32) && (rc == 0)) {
  3719. u32 page_start, page_end, data_start, data_end;
  3720. u32 addr, cmd_flags;
  3721. int i;
  3722. /* Find the page_start addr */
  3723. page_start = offset32 + written;
  3724. page_start -= (page_start % bp->flash_info->page_size);
  3725. /* Find the page_end addr */
  3726. page_end = page_start + bp->flash_info->page_size;
  3727. /* Find the data_start addr */
  3728. data_start = (written == 0) ? offset32 : page_start;
  3729. /* Find the data_end addr */
  3730. data_end = (page_end > offset32 + len32) ?
  3731. (offset32 + len32) : page_end;
  3732. /* Request access to the flash interface. */
  3733. if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
  3734. goto nvram_write_end;
  3735. /* Enable access to flash interface */
  3736. bnx2_enable_nvram_access(bp);
  3737. cmd_flags = BNX2_NVM_COMMAND_FIRST;
  3738. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3739. int j;
  3740. /* Read the whole page into the buffer
  3741. * (non-buffer flash only) */
  3742. for (j = 0; j < bp->flash_info->page_size; j += 4) {
  3743. if (j == (bp->flash_info->page_size - 4)) {
  3744. cmd_flags |= BNX2_NVM_COMMAND_LAST;
  3745. }
  3746. rc = bnx2_nvram_read_dword(bp,
  3747. page_start + j,
  3748. &flash_buffer[j],
  3749. cmd_flags);
  3750. if (rc)
  3751. goto nvram_write_end;
  3752. cmd_flags = 0;
  3753. }
  3754. }
  3755. /* Enable writes to flash interface (unlock write-protect) */
  3756. if ((rc = bnx2_enable_nvram_write(bp)) != 0)
  3757. goto nvram_write_end;
  3758. /* Loop to write back the buffer data from page_start to
  3759. * data_start */
  3760. i = 0;
  3761. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3762. /* Erase the page */
  3763. if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
  3764. goto nvram_write_end;
  3765. /* Re-enable the write again for the actual write */
  3766. bnx2_enable_nvram_write(bp);
  3767. for (addr = page_start; addr < data_start;
  3768. addr += 4, i += 4) {
  3769. rc = bnx2_nvram_write_dword(bp, addr,
  3770. &flash_buffer[i], cmd_flags);
  3771. if (rc != 0)
  3772. goto nvram_write_end;
  3773. cmd_flags = 0;
  3774. }
  3775. }
  3776. /* Loop to write the new data from data_start to data_end */
  3777. for (addr = data_start; addr < data_end; addr += 4, i += 4) {
  3778. if ((addr == page_end - 4) ||
  3779. ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
  3780. (addr == data_end - 4))) {
  3781. cmd_flags |= BNX2_NVM_COMMAND_LAST;
  3782. }
  3783. rc = bnx2_nvram_write_dword(bp, addr, buf,
  3784. cmd_flags);
  3785. if (rc != 0)
  3786. goto nvram_write_end;
  3787. cmd_flags = 0;
  3788. buf += 4;
  3789. }
  3790. /* Loop to write back the buffer data from data_end
  3791. * to page_end */
  3792. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3793. for (addr = data_end; addr < page_end;
  3794. addr += 4, i += 4) {
  3795. if (addr == page_end-4) {
  3796. cmd_flags = BNX2_NVM_COMMAND_LAST;
  3797. }
  3798. rc = bnx2_nvram_write_dword(bp, addr,
  3799. &flash_buffer[i], cmd_flags);
  3800. if (rc != 0)
  3801. goto nvram_write_end;
  3802. cmd_flags = 0;
  3803. }
  3804. }
  3805. /* Disable writes to flash interface (lock write-protect) */
  3806. bnx2_disable_nvram_write(bp);
  3807. /* Disable access to flash interface */
  3808. bnx2_disable_nvram_access(bp);
  3809. bnx2_release_nvram_lock(bp);
  3810. /* Increment written */
  3811. written += data_end - data_start;
  3812. }
  3813. nvram_write_end:
  3814. kfree(flash_buffer);
  3815. kfree(align_buf);
  3816. return rc;
  3817. }
  3818. static void
  3819. bnx2_init_fw_cap(struct bnx2 *bp)
  3820. {
  3821. u32 val, sig = 0;
  3822. bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
  3823. bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
  3824. if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
  3825. bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
  3826. val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
  3827. if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
  3828. return;
  3829. if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
  3830. bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
  3831. sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
  3832. }
  3833. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  3834. (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
  3835. u32 link;
  3836. bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
  3837. link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
  3838. if (link & BNX2_LINK_STATUS_SERDES_LINK)
  3839. bp->phy_port = PORT_FIBRE;
  3840. else
  3841. bp->phy_port = PORT_TP;
  3842. sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
  3843. BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
  3844. }
  3845. if (netif_running(bp->dev) && sig)
  3846. bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
  3847. }
  3848. static void
  3849. bnx2_setup_msix_tbl(struct bnx2 *bp)
  3850. {
  3851. REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
  3852. REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
  3853. REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
  3854. }
  3855. static int
  3856. bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
  3857. {
  3858. u32 val;
  3859. int i, rc = 0;
  3860. u8 old_port;
  3861. /* Wait for the current PCI transaction to complete before
  3862. * issuing a reset. */
  3863. REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
  3864. BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
  3865. BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
  3866. BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
  3867. BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
  3868. val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
  3869. udelay(5);
  3870. /* Wait for the firmware to tell us it is ok to issue a reset. */
  3871. bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
  3872. /* Deposit a driver reset signature so the firmware knows that
  3873. * this is a soft reset. */
  3874. bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
  3875. BNX2_DRV_RESET_SIGNATURE_MAGIC);
  3876. /* Do a dummy read to force the chip to complete all current transaction
  3877. * before we issue a reset. */
  3878. val = REG_RD(bp, BNX2_MISC_ID);
  3879. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3880. REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
  3881. REG_RD(bp, BNX2_MISC_COMMAND);
  3882. udelay(5);
  3883. val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
  3884. BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
  3885. pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
  3886. } else {
  3887. val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
  3888. BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
  3889. BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
  3890. /* Chip reset. */
  3891. REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
  3892. /* Reading back any register after chip reset will hang the
  3893. * bus on 5706 A0 and A1. The msleep below provides plenty
  3894. * of margin for write posting.
  3895. */
  3896. if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
  3897. (CHIP_ID(bp) == CHIP_ID_5706_A1))
  3898. msleep(20);
  3899. /* Reset takes approximate 30 usec */
  3900. for (i = 0; i < 10; i++) {
  3901. val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
  3902. if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
  3903. BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
  3904. break;
  3905. udelay(10);
  3906. }
  3907. if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
  3908. BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
  3909. pr_err("Chip reset did not complete\n");
  3910. return -EBUSY;
  3911. }
  3912. }
  3913. /* Make sure byte swapping is properly configured. */
  3914. val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
  3915. if (val != 0x01020304) {
  3916. pr_err("Chip not in correct endian mode\n");
  3917. return -ENODEV;
  3918. }
  3919. /* Wait for the firmware to finish its initialization. */
  3920. rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
  3921. if (rc)
  3922. return rc;
  3923. spin_lock_bh(&bp->phy_lock);
  3924. old_port = bp->phy_port;
  3925. bnx2_init_fw_cap(bp);
  3926. if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
  3927. old_port != bp->phy_port)
  3928. bnx2_set_default_remote_link(bp);
  3929. spin_unlock_bh(&bp->phy_lock);
  3930. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  3931. /* Adjust the voltage regular to two steps lower. The default
  3932. * of this register is 0x0000000e. */
  3933. REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
  3934. /* Remove bad rbuf memory from the free pool. */
  3935. rc = bnx2_alloc_bad_rbuf(bp);
  3936. }
  3937. if (bp->flags & BNX2_FLAG_USING_MSIX) {
  3938. bnx2_setup_msix_tbl(bp);
  3939. /* Prevent MSIX table reads and write from timing out */
  3940. REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
  3941. BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
  3942. }
  3943. return rc;
  3944. }
  3945. static int
  3946. bnx2_init_chip(struct bnx2 *bp)
  3947. {
  3948. u32 val, mtu;
  3949. int rc, i;
  3950. /* Make sure the interrupt is not active. */
  3951. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  3952. val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
  3953. BNX2_DMA_CONFIG_DATA_WORD_SWAP |
  3954. #ifdef __BIG_ENDIAN
  3955. BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
  3956. #endif
  3957. BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
  3958. DMA_READ_CHANS << 12 |
  3959. DMA_WRITE_CHANS << 16;
  3960. val |= (0x2 << 20) | (1 << 11);
  3961. if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
  3962. val |= (1 << 23);
  3963. if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
  3964. (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
  3965. val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
  3966. REG_WR(bp, BNX2_DMA_CONFIG, val);
  3967. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  3968. val = REG_RD(bp, BNX2_TDMA_CONFIG);
  3969. val |= BNX2_TDMA_CONFIG_ONE_DMA;
  3970. REG_WR(bp, BNX2_TDMA_CONFIG, val);
  3971. }
  3972. if (bp->flags & BNX2_FLAG_PCIX) {
  3973. u16 val16;
  3974. pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
  3975. &val16);
  3976. pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
  3977. val16 & ~PCI_X_CMD_ERO);
  3978. }
  3979. REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
  3980. BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
  3981. BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
  3982. BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
  3983. /* Initialize context mapping and zero out the quick contexts. The
  3984. * context block must have already been enabled. */
  3985. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3986. rc = bnx2_init_5709_context(bp);
  3987. if (rc)
  3988. return rc;
  3989. } else
  3990. bnx2_init_context(bp);
  3991. if ((rc = bnx2_init_cpus(bp)) != 0)
  3992. return rc;
  3993. bnx2_init_nvram(bp);
  3994. bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
  3995. val = REG_RD(bp, BNX2_MQ_CONFIG);
  3996. val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
  3997. val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
  3998. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3999. val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
  4000. if (CHIP_REV(bp) == CHIP_REV_Ax)
  4001. val |= BNX2_MQ_CONFIG_HALT_DIS;
  4002. }
  4003. REG_WR(bp, BNX2_MQ_CONFIG, val);
  4004. val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
  4005. REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
  4006. REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
  4007. val = (BCM_PAGE_BITS - 8) << 24;
  4008. REG_WR(bp, BNX2_RV2P_CONFIG, val);
  4009. /* Configure page size. */
  4010. val = REG_RD(bp, BNX2_TBDR_CONFIG);
  4011. val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
  4012. val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
  4013. REG_WR(bp, BNX2_TBDR_CONFIG, val);
  4014. val = bp->mac_addr[0] +
  4015. (bp->mac_addr[1] << 8) +
  4016. (bp->mac_addr[2] << 16) +
  4017. bp->mac_addr[3] +
  4018. (bp->mac_addr[4] << 8) +
  4019. (bp->mac_addr[5] << 16);
  4020. REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
  4021. /* Program the MTU. Also include 4 bytes for CRC32. */
  4022. mtu = bp->dev->mtu;
  4023. val = mtu + ETH_HLEN + ETH_FCS_LEN;
  4024. if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
  4025. val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
  4026. REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
  4027. if (mtu < 1500)
  4028. mtu = 1500;
  4029. bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
  4030. bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
  4031. bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
  4032. memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
  4033. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
  4034. bp->bnx2_napi[i].last_status_idx = 0;
  4035. bp->idle_chk_status_idx = 0xffff;
  4036. bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
  4037. /* Set up how to generate a link change interrupt. */
  4038. REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
  4039. REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
  4040. (u64) bp->status_blk_mapping & 0xffffffff);
  4041. REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
  4042. REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
  4043. (u64) bp->stats_blk_mapping & 0xffffffff);
  4044. REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
  4045. (u64) bp->stats_blk_mapping >> 32);
  4046. REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
  4047. (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
  4048. REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
  4049. (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
  4050. REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
  4051. (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
  4052. REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
  4053. REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
  4054. REG_WR(bp, BNX2_HC_COM_TICKS,
  4055. (bp->com_ticks_int << 16) | bp->com_ticks);
  4056. REG_WR(bp, BNX2_HC_CMD_TICKS,
  4057. (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
  4058. if (bp->flags & BNX2_FLAG_BROKEN_STATS)
  4059. REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
  4060. else
  4061. REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
  4062. REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
  4063. if (CHIP_ID(bp) == CHIP_ID_5706_A1)
  4064. val = BNX2_HC_CONFIG_COLLECT_STATS;
  4065. else {
  4066. val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
  4067. BNX2_HC_CONFIG_COLLECT_STATS;
  4068. }
  4069. if (bp->flags & BNX2_FLAG_USING_MSIX) {
  4070. REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
  4071. BNX2_HC_MSIX_BIT_VECTOR_VAL);
  4072. val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
  4073. }
  4074. if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
  4075. val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
  4076. REG_WR(bp, BNX2_HC_CONFIG, val);
  4077. for (i = 1; i < bp->irq_nvecs; i++) {
  4078. u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
  4079. BNX2_HC_SB_CONFIG_1;
  4080. REG_WR(bp, base,
  4081. BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
  4082. BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
  4083. BNX2_HC_SB_CONFIG_1_ONE_SHOT);
  4084. REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
  4085. (bp->tx_quick_cons_trip_int << 16) |
  4086. bp->tx_quick_cons_trip);
  4087. REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
  4088. (bp->tx_ticks_int << 16) | bp->tx_ticks);
  4089. REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
  4090. (bp->rx_quick_cons_trip_int << 16) |
  4091. bp->rx_quick_cons_trip);
  4092. REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
  4093. (bp->rx_ticks_int << 16) | bp->rx_ticks);
  4094. }
  4095. /* Clear internal stats counters. */
  4096. REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
  4097. REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
  4098. /* Initialize the receive filter. */
  4099. bnx2_set_rx_mode(bp->dev);
  4100. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  4101. val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
  4102. val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
  4103. REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
  4104. }
  4105. rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
  4106. 1, 0);
  4107. REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
  4108. REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
  4109. udelay(20);
  4110. bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
  4111. return rc;
  4112. }
  4113. static void
  4114. bnx2_clear_ring_states(struct bnx2 *bp)
  4115. {
  4116. struct bnx2_napi *bnapi;
  4117. struct bnx2_tx_ring_info *txr;
  4118. struct bnx2_rx_ring_info *rxr;
  4119. int i;
  4120. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
  4121. bnapi = &bp->bnx2_napi[i];
  4122. txr = &bnapi->tx_ring;
  4123. rxr = &bnapi->rx_ring;
  4124. txr->tx_cons = 0;
  4125. txr->hw_tx_cons = 0;
  4126. rxr->rx_prod_bseq = 0;
  4127. rxr->rx_prod = 0;
  4128. rxr->rx_cons = 0;
  4129. rxr->rx_pg_prod = 0;
  4130. rxr->rx_pg_cons = 0;
  4131. }
  4132. }
  4133. static void
  4134. bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
  4135. {
  4136. u32 val, offset0, offset1, offset2, offset3;
  4137. u32 cid_addr = GET_CID_ADDR(cid);
  4138. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  4139. offset0 = BNX2_L2CTX_TYPE_XI;
  4140. offset1 = BNX2_L2CTX_CMD_TYPE_XI;
  4141. offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
  4142. offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
  4143. } else {
  4144. offset0 = BNX2_L2CTX_TYPE;
  4145. offset1 = BNX2_L2CTX_CMD_TYPE;
  4146. offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
  4147. offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
  4148. }
  4149. val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
  4150. bnx2_ctx_wr(bp, cid_addr, offset0, val);
  4151. val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
  4152. bnx2_ctx_wr(bp, cid_addr, offset1, val);
  4153. val = (u64) txr->tx_desc_mapping >> 32;
  4154. bnx2_ctx_wr(bp, cid_addr, offset2, val);
  4155. val = (u64) txr->tx_desc_mapping & 0xffffffff;
  4156. bnx2_ctx_wr(bp, cid_addr, offset3, val);
  4157. }
  4158. static void
  4159. bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
  4160. {
  4161. struct tx_bd *txbd;
  4162. u32 cid = TX_CID;
  4163. struct bnx2_napi *bnapi;
  4164. struct bnx2_tx_ring_info *txr;
  4165. bnapi = &bp->bnx2_napi[ring_num];
  4166. txr = &bnapi->tx_ring;
  4167. if (ring_num == 0)
  4168. cid = TX_CID;
  4169. else
  4170. cid = TX_TSS_CID + ring_num - 1;
  4171. bp->tx_wake_thresh = bp->tx_ring_size / 2;
  4172. txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
  4173. txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
  4174. txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
  4175. txr->tx_prod = 0;
  4176. txr->tx_prod_bseq = 0;
  4177. txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
  4178. txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
  4179. bnx2_init_tx_context(bp, cid, txr);
  4180. }
  4181. static void
  4182. bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
  4183. int num_rings)
  4184. {
  4185. int i;
  4186. struct rx_bd *rxbd;
  4187. for (i = 0; i < num_rings; i++) {
  4188. int j;
  4189. rxbd = &rx_ring[i][0];
  4190. for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
  4191. rxbd->rx_bd_len = buf_size;
  4192. rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
  4193. }
  4194. if (i == (num_rings - 1))
  4195. j = 0;
  4196. else
  4197. j = i + 1;
  4198. rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
  4199. rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
  4200. }
  4201. }
  4202. static void
  4203. bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
  4204. {
  4205. int i;
  4206. u16 prod, ring_prod;
  4207. u32 cid, rx_cid_addr, val;
  4208. struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
  4209. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  4210. if (ring_num == 0)
  4211. cid = RX_CID;
  4212. else
  4213. cid = RX_RSS_CID + ring_num - 1;
  4214. rx_cid_addr = GET_CID_ADDR(cid);
  4215. bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
  4216. bp->rx_buf_use_size, bp->rx_max_ring);
  4217. bnx2_init_rx_context(bp, cid);
  4218. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  4219. val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
  4220. REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
  4221. }
  4222. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
  4223. if (bp->rx_pg_ring_size) {
  4224. bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
  4225. rxr->rx_pg_desc_mapping,
  4226. PAGE_SIZE, bp->rx_max_pg_ring);
  4227. val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
  4228. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
  4229. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
  4230. BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
  4231. val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
  4232. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
  4233. val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
  4234. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
  4235. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  4236. REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
  4237. }
  4238. val = (u64) rxr->rx_desc_mapping[0] >> 32;
  4239. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
  4240. val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
  4241. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
  4242. ring_prod = prod = rxr->rx_pg_prod;
  4243. for (i = 0; i < bp->rx_pg_ring_size; i++) {
  4244. if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
  4245. netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
  4246. ring_num, i, bp->rx_pg_ring_size);
  4247. break;
  4248. }
  4249. prod = NEXT_RX_BD(prod);
  4250. ring_prod = RX_PG_RING_IDX(prod);
  4251. }
  4252. rxr->rx_pg_prod = prod;
  4253. ring_prod = prod = rxr->rx_prod;
  4254. for (i = 0; i < bp->rx_ring_size; i++) {
  4255. if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
  4256. netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
  4257. ring_num, i, bp->rx_ring_size);
  4258. break;
  4259. }
  4260. prod = NEXT_RX_BD(prod);
  4261. ring_prod = RX_RING_IDX(prod);
  4262. }
  4263. rxr->rx_prod = prod;
  4264. rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
  4265. rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
  4266. rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
  4267. REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
  4268. REG_WR16(bp, rxr->rx_bidx_addr, prod);
  4269. REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
  4270. }
  4271. static void
  4272. bnx2_init_all_rings(struct bnx2 *bp)
  4273. {
  4274. int i;
  4275. u32 val;
  4276. bnx2_clear_ring_states(bp);
  4277. REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
  4278. for (i = 0; i < bp->num_tx_rings; i++)
  4279. bnx2_init_tx_ring(bp, i);
  4280. if (bp->num_tx_rings > 1)
  4281. REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
  4282. (TX_TSS_CID << 7));
  4283. REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
  4284. bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
  4285. for (i = 0; i < bp->num_rx_rings; i++)
  4286. bnx2_init_rx_ring(bp, i);
  4287. if (bp->num_rx_rings > 1) {
  4288. u32 tbl_32;
  4289. u8 *tbl = (u8 *) &tbl_32;
  4290. bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
  4291. BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
  4292. for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
  4293. tbl[i % 4] = i % (bp->num_rx_rings - 1);
  4294. if ((i % 4) == 3)
  4295. bnx2_reg_wr_ind(bp,
  4296. BNX2_RXP_SCRATCH_RSS_TBL + i,
  4297. cpu_to_be32(tbl_32));
  4298. }
  4299. val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
  4300. BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
  4301. REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
  4302. }
  4303. }
  4304. static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
  4305. {
  4306. u32 max, num_rings = 1;
  4307. while (ring_size > MAX_RX_DESC_CNT) {
  4308. ring_size -= MAX_RX_DESC_CNT;
  4309. num_rings++;
  4310. }
  4311. /* round to next power of 2 */
  4312. max = max_size;
  4313. while ((max & num_rings) == 0)
  4314. max >>= 1;
  4315. if (num_rings != max)
  4316. max <<= 1;
  4317. return max;
  4318. }
  4319. static void
  4320. bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
  4321. {
  4322. u32 rx_size, rx_space, jumbo_size;
  4323. /* 8 for CRC and VLAN */
  4324. rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
  4325. rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
  4326. sizeof(struct skb_shared_info);
  4327. bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
  4328. bp->rx_pg_ring_size = 0;
  4329. bp->rx_max_pg_ring = 0;
  4330. bp->rx_max_pg_ring_idx = 0;
  4331. if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
  4332. int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
  4333. jumbo_size = size * pages;
  4334. if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
  4335. jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
  4336. bp->rx_pg_ring_size = jumbo_size;
  4337. bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
  4338. MAX_RX_PG_RINGS);
  4339. bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
  4340. rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
  4341. bp->rx_copy_thresh = 0;
  4342. }
  4343. bp->rx_buf_use_size = rx_size;
  4344. /* hw alignment */
  4345. bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
  4346. bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
  4347. bp->rx_ring_size = size;
  4348. bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
  4349. bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
  4350. }
  4351. static void
  4352. bnx2_free_tx_skbs(struct bnx2 *bp)
  4353. {
  4354. int i;
  4355. for (i = 0; i < bp->num_tx_rings; i++) {
  4356. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  4357. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  4358. int j;
  4359. if (txr->tx_buf_ring == NULL)
  4360. continue;
  4361. for (j = 0; j < TX_DESC_CNT; ) {
  4362. struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
  4363. struct sk_buff *skb = tx_buf->skb;
  4364. int k, last;
  4365. if (skb == NULL) {
  4366. j++;
  4367. continue;
  4368. }
  4369. dma_unmap_single(&bp->pdev->dev,
  4370. dma_unmap_addr(tx_buf, mapping),
  4371. skb_headlen(skb),
  4372. PCI_DMA_TODEVICE);
  4373. tx_buf->skb = NULL;
  4374. last = tx_buf->nr_frags;
  4375. j++;
  4376. for (k = 0; k < last; k++, j++) {
  4377. tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
  4378. dma_unmap_page(&bp->pdev->dev,
  4379. dma_unmap_addr(tx_buf, mapping),
  4380. skb_shinfo(skb)->frags[k].size,
  4381. PCI_DMA_TODEVICE);
  4382. }
  4383. dev_kfree_skb(skb);
  4384. }
  4385. }
  4386. }
  4387. static void
  4388. bnx2_free_rx_skbs(struct bnx2 *bp)
  4389. {
  4390. int i;
  4391. for (i = 0; i < bp->num_rx_rings; i++) {
  4392. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  4393. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  4394. int j;
  4395. if (rxr->rx_buf_ring == NULL)
  4396. return;
  4397. for (j = 0; j < bp->rx_max_ring_idx; j++) {
  4398. struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
  4399. struct sk_buff *skb = rx_buf->skb;
  4400. if (skb == NULL)
  4401. continue;
  4402. dma_unmap_single(&bp->pdev->dev,
  4403. dma_unmap_addr(rx_buf, mapping),
  4404. bp->rx_buf_use_size,
  4405. PCI_DMA_FROMDEVICE);
  4406. rx_buf->skb = NULL;
  4407. dev_kfree_skb(skb);
  4408. }
  4409. for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
  4410. bnx2_free_rx_page(bp, rxr, j);
  4411. }
  4412. }
  4413. static void
  4414. bnx2_free_skbs(struct bnx2 *bp)
  4415. {
  4416. bnx2_free_tx_skbs(bp);
  4417. bnx2_free_rx_skbs(bp);
  4418. }
  4419. static int
  4420. bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
  4421. {
  4422. int rc;
  4423. rc = bnx2_reset_chip(bp, reset_code);
  4424. bnx2_free_skbs(bp);
  4425. if (rc)
  4426. return rc;
  4427. if ((rc = bnx2_init_chip(bp)) != 0)
  4428. return rc;
  4429. bnx2_init_all_rings(bp);
  4430. return 0;
  4431. }
  4432. static int
  4433. bnx2_init_nic(struct bnx2 *bp, int reset_phy)
  4434. {
  4435. int rc;
  4436. if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
  4437. return rc;
  4438. spin_lock_bh(&bp->phy_lock);
  4439. bnx2_init_phy(bp, reset_phy);
  4440. bnx2_set_link(bp);
  4441. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  4442. bnx2_remote_phy_event(bp);
  4443. spin_unlock_bh(&bp->phy_lock);
  4444. return 0;
  4445. }
  4446. static int
  4447. bnx2_shutdown_chip(struct bnx2 *bp)
  4448. {
  4449. u32 reset_code;
  4450. if (bp->flags & BNX2_FLAG_NO_WOL)
  4451. reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
  4452. else if (bp->wol)
  4453. reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
  4454. else
  4455. reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
  4456. return bnx2_reset_chip(bp, reset_code);
  4457. }
  4458. static int
  4459. bnx2_test_registers(struct bnx2 *bp)
  4460. {
  4461. int ret;
  4462. int i, is_5709;
  4463. static const struct {
  4464. u16 offset;
  4465. u16 flags;
  4466. #define BNX2_FL_NOT_5709 1
  4467. u32 rw_mask;
  4468. u32 ro_mask;
  4469. } reg_tbl[] = {
  4470. { 0x006c, 0, 0x00000000, 0x0000003f },
  4471. { 0x0090, 0, 0xffffffff, 0x00000000 },
  4472. { 0x0094, 0, 0x00000000, 0x00000000 },
  4473. { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
  4474. { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4475. { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4476. { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
  4477. { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
  4478. { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
  4479. { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
  4480. { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4481. { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4482. { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4483. { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4484. { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4485. { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4486. { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4487. { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4488. { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
  4489. { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
  4490. { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
  4491. { 0x1000, 0, 0x00000000, 0x00000001 },
  4492. { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
  4493. { 0x1408, 0, 0x01c00800, 0x00000000 },
  4494. { 0x149c, 0, 0x8000ffff, 0x00000000 },
  4495. { 0x14a8, 0, 0x00000000, 0x000001ff },
  4496. { 0x14ac, 0, 0x0fffffff, 0x10000000 },
  4497. { 0x14b0, 0, 0x00000002, 0x00000001 },
  4498. { 0x14b8, 0, 0x00000000, 0x00000000 },
  4499. { 0x14c0, 0, 0x00000000, 0x00000009 },
  4500. { 0x14c4, 0, 0x00003fff, 0x00000000 },
  4501. { 0x14cc, 0, 0x00000000, 0x00000001 },
  4502. { 0x14d0, 0, 0xffffffff, 0x00000000 },
  4503. { 0x1800, 0, 0x00000000, 0x00000001 },
  4504. { 0x1804, 0, 0x00000000, 0x00000003 },
  4505. { 0x2800, 0, 0x00000000, 0x00000001 },
  4506. { 0x2804, 0, 0x00000000, 0x00003f01 },
  4507. { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
  4508. { 0x2810, 0, 0xffff0000, 0x00000000 },
  4509. { 0x2814, 0, 0xffff0000, 0x00000000 },
  4510. { 0x2818, 0, 0xffff0000, 0x00000000 },
  4511. { 0x281c, 0, 0xffff0000, 0x00000000 },
  4512. { 0x2834, 0, 0xffffffff, 0x00000000 },
  4513. { 0x2840, 0, 0x00000000, 0xffffffff },
  4514. { 0x2844, 0, 0x00000000, 0xffffffff },
  4515. { 0x2848, 0, 0xffffffff, 0x00000000 },
  4516. { 0x284c, 0, 0xf800f800, 0x07ff07ff },
  4517. { 0x2c00, 0, 0x00000000, 0x00000011 },
  4518. { 0x2c04, 0, 0x00000000, 0x00030007 },
  4519. { 0x3c00, 0, 0x00000000, 0x00000001 },
  4520. { 0x3c04, 0, 0x00000000, 0x00070000 },
  4521. { 0x3c08, 0, 0x00007f71, 0x07f00000 },
  4522. { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
  4523. { 0x3c10, 0, 0xffffffff, 0x00000000 },
  4524. { 0x3c14, 0, 0x00000000, 0xffffffff },
  4525. { 0x3c18, 0, 0x00000000, 0xffffffff },
  4526. { 0x3c1c, 0, 0xfffff000, 0x00000000 },
  4527. { 0x3c20, 0, 0xffffff00, 0x00000000 },
  4528. { 0x5004, 0, 0x00000000, 0x0000007f },
  4529. { 0x5008, 0, 0x0f0007ff, 0x00000000 },
  4530. { 0x5c00, 0, 0x00000000, 0x00000001 },
  4531. { 0x5c04, 0, 0x00000000, 0x0003000f },
  4532. { 0x5c08, 0, 0x00000003, 0x00000000 },
  4533. { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
  4534. { 0x5c10, 0, 0x00000000, 0xffffffff },
  4535. { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
  4536. { 0x5c84, 0, 0x00000000, 0x0000f333 },
  4537. { 0x5c88, 0, 0x00000000, 0x00077373 },
  4538. { 0x5c8c, 0, 0x00000000, 0x0007f737 },
  4539. { 0x6808, 0, 0x0000ff7f, 0x00000000 },
  4540. { 0x680c, 0, 0xffffffff, 0x00000000 },
  4541. { 0x6810, 0, 0xffffffff, 0x00000000 },
  4542. { 0x6814, 0, 0xffffffff, 0x00000000 },
  4543. { 0x6818, 0, 0xffffffff, 0x00000000 },
  4544. { 0x681c, 0, 0xffffffff, 0x00000000 },
  4545. { 0x6820, 0, 0x00ff00ff, 0x00000000 },
  4546. { 0x6824, 0, 0x00ff00ff, 0x00000000 },
  4547. { 0x6828, 0, 0x00ff00ff, 0x00000000 },
  4548. { 0x682c, 0, 0x03ff03ff, 0x00000000 },
  4549. { 0x6830, 0, 0x03ff03ff, 0x00000000 },
  4550. { 0x6834, 0, 0x03ff03ff, 0x00000000 },
  4551. { 0x6838, 0, 0x03ff03ff, 0x00000000 },
  4552. { 0x683c, 0, 0x0000ffff, 0x00000000 },
  4553. { 0x6840, 0, 0x00000ff0, 0x00000000 },
  4554. { 0x6844, 0, 0x00ffff00, 0x00000000 },
  4555. { 0x684c, 0, 0xffffffff, 0x00000000 },
  4556. { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
  4557. { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
  4558. { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
  4559. { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
  4560. { 0x6908, 0, 0x00000000, 0x0001ff0f },
  4561. { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
  4562. { 0xffff, 0, 0x00000000, 0x00000000 },
  4563. };
  4564. ret = 0;
  4565. is_5709 = 0;
  4566. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  4567. is_5709 = 1;
  4568. for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
  4569. u32 offset, rw_mask, ro_mask, save_val, val;
  4570. u16 flags = reg_tbl[i].flags;
  4571. if (is_5709 && (flags & BNX2_FL_NOT_5709))
  4572. continue;
  4573. offset = (u32) reg_tbl[i].offset;
  4574. rw_mask = reg_tbl[i].rw_mask;
  4575. ro_mask = reg_tbl[i].ro_mask;
  4576. save_val = readl(bp->regview + offset);
  4577. writel(0, bp->regview + offset);
  4578. val = readl(bp->regview + offset);
  4579. if ((val & rw_mask) != 0) {
  4580. goto reg_test_err;
  4581. }
  4582. if ((val & ro_mask) != (save_val & ro_mask)) {
  4583. goto reg_test_err;
  4584. }
  4585. writel(0xffffffff, bp->regview + offset);
  4586. val = readl(bp->regview + offset);
  4587. if ((val & rw_mask) != rw_mask) {
  4588. goto reg_test_err;
  4589. }
  4590. if ((val & ro_mask) != (save_val & ro_mask)) {
  4591. goto reg_test_err;
  4592. }
  4593. writel(save_val, bp->regview + offset);
  4594. continue;
  4595. reg_test_err:
  4596. writel(save_val, bp->regview + offset);
  4597. ret = -ENODEV;
  4598. break;
  4599. }
  4600. return ret;
  4601. }
  4602. static int
  4603. bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
  4604. {
  4605. static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
  4606. 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
  4607. int i;
  4608. for (i = 0; i < sizeof(test_pattern) / 4; i++) {
  4609. u32 offset;
  4610. for (offset = 0; offset < size; offset += 4) {
  4611. bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
  4612. if (bnx2_reg_rd_ind(bp, start + offset) !=
  4613. test_pattern[i]) {
  4614. return -ENODEV;
  4615. }
  4616. }
  4617. }
  4618. return 0;
  4619. }
  4620. static int
  4621. bnx2_test_memory(struct bnx2 *bp)
  4622. {
  4623. int ret = 0;
  4624. int i;
  4625. static struct mem_entry {
  4626. u32 offset;
  4627. u32 len;
  4628. } mem_tbl_5706[] = {
  4629. { 0x60000, 0x4000 },
  4630. { 0xa0000, 0x3000 },
  4631. { 0xe0000, 0x4000 },
  4632. { 0x120000, 0x4000 },
  4633. { 0x1a0000, 0x4000 },
  4634. { 0x160000, 0x4000 },
  4635. { 0xffffffff, 0 },
  4636. },
  4637. mem_tbl_5709[] = {
  4638. { 0x60000, 0x4000 },
  4639. { 0xa0000, 0x3000 },
  4640. { 0xe0000, 0x4000 },
  4641. { 0x120000, 0x4000 },
  4642. { 0x1a0000, 0x4000 },
  4643. { 0xffffffff, 0 },
  4644. };
  4645. struct mem_entry *mem_tbl;
  4646. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  4647. mem_tbl = mem_tbl_5709;
  4648. else
  4649. mem_tbl = mem_tbl_5706;
  4650. for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
  4651. if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
  4652. mem_tbl[i].len)) != 0) {
  4653. return ret;
  4654. }
  4655. }
  4656. return ret;
  4657. }
  4658. #define BNX2_MAC_LOOPBACK 0
  4659. #define BNX2_PHY_LOOPBACK 1
  4660. static int
  4661. bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
  4662. {
  4663. unsigned int pkt_size, num_pkts, i;
  4664. struct sk_buff *skb, *rx_skb;
  4665. unsigned char *packet;
  4666. u16 rx_start_idx, rx_idx;
  4667. dma_addr_t map;
  4668. struct tx_bd *txbd;
  4669. struct sw_bd *rx_buf;
  4670. struct l2_fhdr *rx_hdr;
  4671. int ret = -ENODEV;
  4672. struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
  4673. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  4674. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  4675. tx_napi = bnapi;
  4676. txr = &tx_napi->tx_ring;
  4677. rxr = &bnapi->rx_ring;
  4678. if (loopback_mode == BNX2_MAC_LOOPBACK) {
  4679. bp->loopback = MAC_LOOPBACK;
  4680. bnx2_set_mac_loopback(bp);
  4681. }
  4682. else if (loopback_mode == BNX2_PHY_LOOPBACK) {
  4683. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  4684. return 0;
  4685. bp->loopback = PHY_LOOPBACK;
  4686. bnx2_set_phy_loopback(bp);
  4687. }
  4688. else
  4689. return -EINVAL;
  4690. pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
  4691. skb = netdev_alloc_skb(bp->dev, pkt_size);
  4692. if (!skb)
  4693. return -ENOMEM;
  4694. packet = skb_put(skb, pkt_size);
  4695. memcpy(packet, bp->dev->dev_addr, 6);
  4696. memset(packet + 6, 0x0, 8);
  4697. for (i = 14; i < pkt_size; i++)
  4698. packet[i] = (unsigned char) (i & 0xff);
  4699. map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
  4700. PCI_DMA_TODEVICE);
  4701. if (dma_mapping_error(&bp->pdev->dev, map)) {
  4702. dev_kfree_skb(skb);
  4703. return -EIO;
  4704. }
  4705. REG_WR(bp, BNX2_HC_COMMAND,
  4706. bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
  4707. REG_RD(bp, BNX2_HC_COMMAND);
  4708. udelay(5);
  4709. rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
  4710. num_pkts = 0;
  4711. txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
  4712. txbd->tx_bd_haddr_hi = (u64) map >> 32;
  4713. txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
  4714. txbd->tx_bd_mss_nbytes = pkt_size;
  4715. txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
  4716. num_pkts++;
  4717. txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
  4718. txr->tx_prod_bseq += pkt_size;
  4719. REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
  4720. REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
  4721. udelay(100);
  4722. REG_WR(bp, BNX2_HC_COMMAND,
  4723. bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
  4724. REG_RD(bp, BNX2_HC_COMMAND);
  4725. udelay(5);
  4726. dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
  4727. dev_kfree_skb(skb);
  4728. if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
  4729. goto loopback_test_done;
  4730. rx_idx = bnx2_get_hw_rx_cons(bnapi);
  4731. if (rx_idx != rx_start_idx + num_pkts) {
  4732. goto loopback_test_done;
  4733. }
  4734. rx_buf = &rxr->rx_buf_ring[rx_start_idx];
  4735. rx_skb = rx_buf->skb;
  4736. rx_hdr = rx_buf->desc;
  4737. skb_reserve(rx_skb, BNX2_RX_OFFSET);
  4738. dma_sync_single_for_cpu(&bp->pdev->dev,
  4739. dma_unmap_addr(rx_buf, mapping),
  4740. bp->rx_buf_size, PCI_DMA_FROMDEVICE);
  4741. if (rx_hdr->l2_fhdr_status &
  4742. (L2_FHDR_ERRORS_BAD_CRC |
  4743. L2_FHDR_ERRORS_PHY_DECODE |
  4744. L2_FHDR_ERRORS_ALIGNMENT |
  4745. L2_FHDR_ERRORS_TOO_SHORT |
  4746. L2_FHDR_ERRORS_GIANT_FRAME)) {
  4747. goto loopback_test_done;
  4748. }
  4749. if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
  4750. goto loopback_test_done;
  4751. }
  4752. for (i = 14; i < pkt_size; i++) {
  4753. if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
  4754. goto loopback_test_done;
  4755. }
  4756. }
  4757. ret = 0;
  4758. loopback_test_done:
  4759. bp->loopback = 0;
  4760. return ret;
  4761. }
  4762. #define BNX2_MAC_LOOPBACK_FAILED 1
  4763. #define BNX2_PHY_LOOPBACK_FAILED 2
  4764. #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
  4765. BNX2_PHY_LOOPBACK_FAILED)
  4766. static int
  4767. bnx2_test_loopback(struct bnx2 *bp)
  4768. {
  4769. int rc = 0;
  4770. if (!netif_running(bp->dev))
  4771. return BNX2_LOOPBACK_FAILED;
  4772. bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
  4773. spin_lock_bh(&bp->phy_lock);
  4774. bnx2_init_phy(bp, 1);
  4775. spin_unlock_bh(&bp->phy_lock);
  4776. if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
  4777. rc |= BNX2_MAC_LOOPBACK_FAILED;
  4778. if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
  4779. rc |= BNX2_PHY_LOOPBACK_FAILED;
  4780. return rc;
  4781. }
  4782. #define NVRAM_SIZE 0x200
  4783. #define CRC32_RESIDUAL 0xdebb20e3
  4784. static int
  4785. bnx2_test_nvram(struct bnx2 *bp)
  4786. {
  4787. __be32 buf[NVRAM_SIZE / 4];
  4788. u8 *data = (u8 *) buf;
  4789. int rc = 0;
  4790. u32 magic, csum;
  4791. if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
  4792. goto test_nvram_done;
  4793. magic = be32_to_cpu(buf[0]);
  4794. if (magic != 0x669955aa) {
  4795. rc = -ENODEV;
  4796. goto test_nvram_done;
  4797. }
  4798. if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
  4799. goto test_nvram_done;
  4800. csum = ether_crc_le(0x100, data);
  4801. if (csum != CRC32_RESIDUAL) {
  4802. rc = -ENODEV;
  4803. goto test_nvram_done;
  4804. }
  4805. csum = ether_crc_le(0x100, data + 0x100);
  4806. if (csum != CRC32_RESIDUAL) {
  4807. rc = -ENODEV;
  4808. }
  4809. test_nvram_done:
  4810. return rc;
  4811. }
  4812. static int
  4813. bnx2_test_link(struct bnx2 *bp)
  4814. {
  4815. u32 bmsr;
  4816. if (!netif_running(bp->dev))
  4817. return -ENODEV;
  4818. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  4819. if (bp->link_up)
  4820. return 0;
  4821. return -ENODEV;
  4822. }
  4823. spin_lock_bh(&bp->phy_lock);
  4824. bnx2_enable_bmsr1(bp);
  4825. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  4826. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  4827. bnx2_disable_bmsr1(bp);
  4828. spin_unlock_bh(&bp->phy_lock);
  4829. if (bmsr & BMSR_LSTATUS) {
  4830. return 0;
  4831. }
  4832. return -ENODEV;
  4833. }
  4834. static int
  4835. bnx2_test_intr(struct bnx2 *bp)
  4836. {
  4837. int i;
  4838. u16 status_idx;
  4839. if (!netif_running(bp->dev))
  4840. return -ENODEV;
  4841. status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
  4842. /* This register is not touched during run-time. */
  4843. REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
  4844. REG_RD(bp, BNX2_HC_COMMAND);
  4845. for (i = 0; i < 10; i++) {
  4846. if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
  4847. status_idx) {
  4848. break;
  4849. }
  4850. msleep_interruptible(10);
  4851. }
  4852. if (i < 10)
  4853. return 0;
  4854. return -ENODEV;
  4855. }
  4856. /* Determining link for parallel detection. */
  4857. static int
  4858. bnx2_5706_serdes_has_link(struct bnx2 *bp)
  4859. {
  4860. u32 mode_ctl, an_dbg, exp;
  4861. if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
  4862. return 0;
  4863. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
  4864. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
  4865. if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
  4866. return 0;
  4867. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
  4868. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  4869. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  4870. if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
  4871. return 0;
  4872. bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
  4873. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
  4874. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
  4875. if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
  4876. return 0;
  4877. return 1;
  4878. }
  4879. static void
  4880. bnx2_5706_serdes_timer(struct bnx2 *bp)
  4881. {
  4882. int check_link = 1;
  4883. spin_lock(&bp->phy_lock);
  4884. if (bp->serdes_an_pending) {
  4885. bp->serdes_an_pending--;
  4886. check_link = 0;
  4887. } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
  4888. u32 bmcr;
  4889. bp->current_interval = BNX2_TIMER_INTERVAL;
  4890. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  4891. if (bmcr & BMCR_ANENABLE) {
  4892. if (bnx2_5706_serdes_has_link(bp)) {
  4893. bmcr &= ~BMCR_ANENABLE;
  4894. bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
  4895. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  4896. bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
  4897. }
  4898. }
  4899. }
  4900. else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
  4901. (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
  4902. u32 phy2;
  4903. bnx2_write_phy(bp, 0x17, 0x0f01);
  4904. bnx2_read_phy(bp, 0x15, &phy2);
  4905. if (phy2 & 0x20) {
  4906. u32 bmcr;
  4907. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  4908. bmcr |= BMCR_ANENABLE;
  4909. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  4910. bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
  4911. }
  4912. } else
  4913. bp->current_interval = BNX2_TIMER_INTERVAL;
  4914. if (check_link) {
  4915. u32 val;
  4916. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
  4917. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
  4918. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
  4919. if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
  4920. if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
  4921. bnx2_5706s_force_link_dn(bp, 1);
  4922. bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
  4923. } else
  4924. bnx2_set_link(bp);
  4925. } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
  4926. bnx2_set_link(bp);
  4927. }
  4928. spin_unlock(&bp->phy_lock);
  4929. }
  4930. static void
  4931. bnx2_5708_serdes_timer(struct bnx2 *bp)
  4932. {
  4933. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  4934. return;
  4935. if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
  4936. bp->serdes_an_pending = 0;
  4937. return;
  4938. }
  4939. spin_lock(&bp->phy_lock);
  4940. if (bp->serdes_an_pending)
  4941. bp->serdes_an_pending--;
  4942. else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
  4943. u32 bmcr;
  4944. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  4945. if (bmcr & BMCR_ANENABLE) {
  4946. bnx2_enable_forced_2g5(bp);
  4947. bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
  4948. } else {
  4949. bnx2_disable_forced_2g5(bp);
  4950. bp->serdes_an_pending = 2;
  4951. bp->current_interval = BNX2_TIMER_INTERVAL;
  4952. }
  4953. } else
  4954. bp->current_interval = BNX2_TIMER_INTERVAL;
  4955. spin_unlock(&bp->phy_lock);
  4956. }
  4957. static void
  4958. bnx2_timer(unsigned long data)
  4959. {
  4960. struct bnx2 *bp = (struct bnx2 *) data;
  4961. if (!netif_running(bp->dev))
  4962. return;
  4963. if (atomic_read(&bp->intr_sem) != 0)
  4964. goto bnx2_restart_timer;
  4965. if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
  4966. BNX2_FLAG_USING_MSI)
  4967. bnx2_chk_missed_msi(bp);
  4968. bnx2_send_heart_beat(bp);
  4969. bp->stats_blk->stat_FwRxDrop =
  4970. bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
  4971. /* workaround occasional corrupted counters */
  4972. if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
  4973. REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
  4974. BNX2_HC_COMMAND_STATS_NOW);
  4975. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  4976. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  4977. bnx2_5706_serdes_timer(bp);
  4978. else
  4979. bnx2_5708_serdes_timer(bp);
  4980. }
  4981. bnx2_restart_timer:
  4982. mod_timer(&bp->timer, jiffies + bp->current_interval);
  4983. }
  4984. static int
  4985. bnx2_request_irq(struct bnx2 *bp)
  4986. {
  4987. unsigned long flags;
  4988. struct bnx2_irq *irq;
  4989. int rc = 0, i;
  4990. if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
  4991. flags = 0;
  4992. else
  4993. flags = IRQF_SHARED;
  4994. for (i = 0; i < bp->irq_nvecs; i++) {
  4995. irq = &bp->irq_tbl[i];
  4996. rc = request_irq(irq->vector, irq->handler, flags, irq->name,
  4997. &bp->bnx2_napi[i]);
  4998. if (rc)
  4999. break;
  5000. irq->requested = 1;
  5001. }
  5002. return rc;
  5003. }
  5004. static void
  5005. bnx2_free_irq(struct bnx2 *bp)
  5006. {
  5007. struct bnx2_irq *irq;
  5008. int i;
  5009. for (i = 0; i < bp->irq_nvecs; i++) {
  5010. irq = &bp->irq_tbl[i];
  5011. if (irq->requested)
  5012. free_irq(irq->vector, &bp->bnx2_napi[i]);
  5013. irq->requested = 0;
  5014. }
  5015. if (bp->flags & BNX2_FLAG_USING_MSI)
  5016. pci_disable_msi(bp->pdev);
  5017. else if (bp->flags & BNX2_FLAG_USING_MSIX)
  5018. pci_disable_msix(bp->pdev);
  5019. bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
  5020. }
  5021. static void
  5022. bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
  5023. {
  5024. int i, rc;
  5025. struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
  5026. struct net_device *dev = bp->dev;
  5027. const int len = sizeof(bp->irq_tbl[0].name);
  5028. bnx2_setup_msix_tbl(bp);
  5029. REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
  5030. REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
  5031. REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
  5032. /* Need to flush the previous three writes to ensure MSI-X
  5033. * is setup properly */
  5034. REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
  5035. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
  5036. msix_ent[i].entry = i;
  5037. msix_ent[i].vector = 0;
  5038. }
  5039. rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
  5040. if (rc != 0)
  5041. return;
  5042. bp->irq_nvecs = msix_vecs;
  5043. bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
  5044. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
  5045. bp->irq_tbl[i].vector = msix_ent[i].vector;
  5046. snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
  5047. bp->irq_tbl[i].handler = bnx2_msi_1shot;
  5048. }
  5049. }
  5050. static void
  5051. bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
  5052. {
  5053. int cpus = num_online_cpus();
  5054. int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
  5055. bp->irq_tbl[0].handler = bnx2_interrupt;
  5056. strcpy(bp->irq_tbl[0].name, bp->dev->name);
  5057. bp->irq_nvecs = 1;
  5058. bp->irq_tbl[0].vector = bp->pdev->irq;
  5059. if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
  5060. bnx2_enable_msix(bp, msix_vecs);
  5061. if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
  5062. !(bp->flags & BNX2_FLAG_USING_MSIX)) {
  5063. if (pci_enable_msi(bp->pdev) == 0) {
  5064. bp->flags |= BNX2_FLAG_USING_MSI;
  5065. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  5066. bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
  5067. bp->irq_tbl[0].handler = bnx2_msi_1shot;
  5068. } else
  5069. bp->irq_tbl[0].handler = bnx2_msi;
  5070. bp->irq_tbl[0].vector = bp->pdev->irq;
  5071. }
  5072. }
  5073. bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
  5074. bp->dev->real_num_tx_queues = bp->num_tx_rings;
  5075. bp->num_rx_rings = bp->irq_nvecs;
  5076. }
  5077. /* Called with rtnl_lock */
  5078. static int
  5079. bnx2_open(struct net_device *dev)
  5080. {
  5081. struct bnx2 *bp = netdev_priv(dev);
  5082. int rc;
  5083. netif_carrier_off(dev);
  5084. bnx2_set_power_state(bp, PCI_D0);
  5085. bnx2_disable_int(bp);
  5086. bnx2_setup_int_mode(bp, disable_msi);
  5087. bnx2_init_napi(bp);
  5088. bnx2_napi_enable(bp);
  5089. rc = bnx2_alloc_mem(bp);
  5090. if (rc)
  5091. goto open_err;
  5092. rc = bnx2_request_irq(bp);
  5093. if (rc)
  5094. goto open_err;
  5095. rc = bnx2_init_nic(bp, 1);
  5096. if (rc)
  5097. goto open_err;
  5098. mod_timer(&bp->timer, jiffies + bp->current_interval);
  5099. atomic_set(&bp->intr_sem, 0);
  5100. memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
  5101. bnx2_enable_int(bp);
  5102. if (bp->flags & BNX2_FLAG_USING_MSI) {
  5103. /* Test MSI to make sure it is working
  5104. * If MSI test fails, go back to INTx mode
  5105. */
  5106. if (bnx2_test_intr(bp) != 0) {
  5107. netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
  5108. bnx2_disable_int(bp);
  5109. bnx2_free_irq(bp);
  5110. bnx2_setup_int_mode(bp, 1);
  5111. rc = bnx2_init_nic(bp, 0);
  5112. if (!rc)
  5113. rc = bnx2_request_irq(bp);
  5114. if (rc) {
  5115. del_timer_sync(&bp->timer);
  5116. goto open_err;
  5117. }
  5118. bnx2_enable_int(bp);
  5119. }
  5120. }
  5121. if (bp->flags & BNX2_FLAG_USING_MSI)
  5122. netdev_info(dev, "using MSI\n");
  5123. else if (bp->flags & BNX2_FLAG_USING_MSIX)
  5124. netdev_info(dev, "using MSIX\n");
  5125. netif_tx_start_all_queues(dev);
  5126. return 0;
  5127. open_err:
  5128. bnx2_napi_disable(bp);
  5129. bnx2_free_skbs(bp);
  5130. bnx2_free_irq(bp);
  5131. bnx2_free_mem(bp);
  5132. bnx2_del_napi(bp);
  5133. return rc;
  5134. }
  5135. static void
  5136. bnx2_reset_task(struct work_struct *work)
  5137. {
  5138. struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
  5139. rtnl_lock();
  5140. if (!netif_running(bp->dev)) {
  5141. rtnl_unlock();
  5142. return;
  5143. }
  5144. bnx2_netif_stop(bp, true);
  5145. bnx2_init_nic(bp, 1);
  5146. atomic_set(&bp->intr_sem, 1);
  5147. bnx2_netif_start(bp, true);
  5148. rtnl_unlock();
  5149. }
  5150. static void
  5151. bnx2_dump_state(struct bnx2 *bp)
  5152. {
  5153. struct net_device *dev = bp->dev;
  5154. u32 mcp_p0, mcp_p1, val1, val2;
  5155. pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
  5156. netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
  5157. atomic_read(&bp->intr_sem), val1);
  5158. pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
  5159. pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
  5160. netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
  5161. netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
  5162. REG_RD(bp, BNX2_EMAC_TX_STATUS),
  5163. REG_RD(bp, BNX2_EMAC_RX_STATUS));
  5164. netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
  5165. REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
  5166. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  5167. mcp_p0 = BNX2_MCP_STATE_P0;
  5168. mcp_p1 = BNX2_MCP_STATE_P1;
  5169. } else {
  5170. mcp_p0 = BNX2_MCP_STATE_P0_5708;
  5171. mcp_p1 = BNX2_MCP_STATE_P1_5708;
  5172. }
  5173. netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
  5174. bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
  5175. netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
  5176. REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
  5177. if (bp->flags & BNX2_FLAG_USING_MSIX)
  5178. netdev_err(dev, "DEBUG: PBA[%08x]\n",
  5179. REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
  5180. }
  5181. static void
  5182. bnx2_tx_timeout(struct net_device *dev)
  5183. {
  5184. struct bnx2 *bp = netdev_priv(dev);
  5185. bnx2_dump_state(bp);
  5186. /* This allows the netif to be shutdown gracefully before resetting */
  5187. schedule_work(&bp->reset_task);
  5188. }
  5189. #ifdef BCM_VLAN
  5190. /* Called with rtnl_lock */
  5191. static void
  5192. bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
  5193. {
  5194. struct bnx2 *bp = netdev_priv(dev);
  5195. if (netif_running(dev))
  5196. bnx2_netif_stop(bp, false);
  5197. bp->vlgrp = vlgrp;
  5198. if (!netif_running(dev))
  5199. return;
  5200. bnx2_set_rx_mode(dev);
  5201. if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
  5202. bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
  5203. bnx2_netif_start(bp, false);
  5204. }
  5205. #endif
  5206. /* Called with netif_tx_lock.
  5207. * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
  5208. * netif_wake_queue().
  5209. */
  5210. static netdev_tx_t
  5211. bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
  5212. {
  5213. struct bnx2 *bp = netdev_priv(dev);
  5214. dma_addr_t mapping;
  5215. struct tx_bd *txbd;
  5216. struct sw_tx_bd *tx_buf;
  5217. u32 len, vlan_tag_flags, last_frag, mss;
  5218. u16 prod, ring_prod;
  5219. int i;
  5220. struct bnx2_napi *bnapi;
  5221. struct bnx2_tx_ring_info *txr;
  5222. struct netdev_queue *txq;
  5223. /* Determine which tx ring we will be placed on */
  5224. i = skb_get_queue_mapping(skb);
  5225. bnapi = &bp->bnx2_napi[i];
  5226. txr = &bnapi->tx_ring;
  5227. txq = netdev_get_tx_queue(dev, i);
  5228. if (unlikely(bnx2_tx_avail(bp, txr) <
  5229. (skb_shinfo(skb)->nr_frags + 1))) {
  5230. netif_tx_stop_queue(txq);
  5231. netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
  5232. return NETDEV_TX_BUSY;
  5233. }
  5234. len = skb_headlen(skb);
  5235. prod = txr->tx_prod;
  5236. ring_prod = TX_RING_IDX(prod);
  5237. vlan_tag_flags = 0;
  5238. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  5239. vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
  5240. }
  5241. #ifdef BCM_VLAN
  5242. if (bp->vlgrp && vlan_tx_tag_present(skb)) {
  5243. vlan_tag_flags |=
  5244. (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
  5245. }
  5246. #endif
  5247. if ((mss = skb_shinfo(skb)->gso_size)) {
  5248. u32 tcp_opt_len;
  5249. struct iphdr *iph;
  5250. vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
  5251. tcp_opt_len = tcp_optlen(skb);
  5252. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
  5253. u32 tcp_off = skb_transport_offset(skb) -
  5254. sizeof(struct ipv6hdr) - ETH_HLEN;
  5255. vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
  5256. TX_BD_FLAGS_SW_FLAGS;
  5257. if (likely(tcp_off == 0))
  5258. vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
  5259. else {
  5260. tcp_off >>= 3;
  5261. vlan_tag_flags |= ((tcp_off & 0x3) <<
  5262. TX_BD_FLAGS_TCP6_OFF0_SHL) |
  5263. ((tcp_off & 0x10) <<
  5264. TX_BD_FLAGS_TCP6_OFF4_SHL);
  5265. mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
  5266. }
  5267. } else {
  5268. iph = ip_hdr(skb);
  5269. if (tcp_opt_len || (iph->ihl > 5)) {
  5270. vlan_tag_flags |= ((iph->ihl - 5) +
  5271. (tcp_opt_len >> 2)) << 8;
  5272. }
  5273. }
  5274. } else
  5275. mss = 0;
  5276. mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
  5277. if (dma_mapping_error(&bp->pdev->dev, mapping)) {
  5278. dev_kfree_skb(skb);
  5279. return NETDEV_TX_OK;
  5280. }
  5281. tx_buf = &txr->tx_buf_ring[ring_prod];
  5282. tx_buf->skb = skb;
  5283. dma_unmap_addr_set(tx_buf, mapping, mapping);
  5284. txbd = &txr->tx_desc_ring[ring_prod];
  5285. txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
  5286. txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  5287. txbd->tx_bd_mss_nbytes = len | (mss << 16);
  5288. txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
  5289. last_frag = skb_shinfo(skb)->nr_frags;
  5290. tx_buf->nr_frags = last_frag;
  5291. tx_buf->is_gso = skb_is_gso(skb);
  5292. for (i = 0; i < last_frag; i++) {
  5293. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  5294. prod = NEXT_TX_BD(prod);
  5295. ring_prod = TX_RING_IDX(prod);
  5296. txbd = &txr->tx_desc_ring[ring_prod];
  5297. len = frag->size;
  5298. mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
  5299. len, PCI_DMA_TODEVICE);
  5300. if (dma_mapping_error(&bp->pdev->dev, mapping))
  5301. goto dma_error;
  5302. dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
  5303. mapping);
  5304. txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
  5305. txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  5306. txbd->tx_bd_mss_nbytes = len | (mss << 16);
  5307. txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
  5308. }
  5309. txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
  5310. prod = NEXT_TX_BD(prod);
  5311. txr->tx_prod_bseq += skb->len;
  5312. REG_WR16(bp, txr->tx_bidx_addr, prod);
  5313. REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
  5314. mmiowb();
  5315. txr->tx_prod = prod;
  5316. if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
  5317. netif_tx_stop_queue(txq);
  5318. if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
  5319. netif_tx_wake_queue(txq);
  5320. }
  5321. return NETDEV_TX_OK;
  5322. dma_error:
  5323. /* save value of frag that failed */
  5324. last_frag = i;
  5325. /* start back at beginning and unmap skb */
  5326. prod = txr->tx_prod;
  5327. ring_prod = TX_RING_IDX(prod);
  5328. tx_buf = &txr->tx_buf_ring[ring_prod];
  5329. tx_buf->skb = NULL;
  5330. dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
  5331. skb_headlen(skb), PCI_DMA_TODEVICE);
  5332. /* unmap remaining mapped pages */
  5333. for (i = 0; i < last_frag; i++) {
  5334. prod = NEXT_TX_BD(prod);
  5335. ring_prod = TX_RING_IDX(prod);
  5336. tx_buf = &txr->tx_buf_ring[ring_prod];
  5337. dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
  5338. skb_shinfo(skb)->frags[i].size,
  5339. PCI_DMA_TODEVICE);
  5340. }
  5341. dev_kfree_skb(skb);
  5342. return NETDEV_TX_OK;
  5343. }
  5344. /* Called with rtnl_lock */
  5345. static int
  5346. bnx2_close(struct net_device *dev)
  5347. {
  5348. struct bnx2 *bp = netdev_priv(dev);
  5349. cancel_work_sync(&bp->reset_task);
  5350. bnx2_disable_int_sync(bp);
  5351. bnx2_napi_disable(bp);
  5352. del_timer_sync(&bp->timer);
  5353. bnx2_shutdown_chip(bp);
  5354. bnx2_free_irq(bp);
  5355. bnx2_free_skbs(bp);
  5356. bnx2_free_mem(bp);
  5357. bnx2_del_napi(bp);
  5358. bp->link_up = 0;
  5359. netif_carrier_off(bp->dev);
  5360. bnx2_set_power_state(bp, PCI_D3hot);
  5361. return 0;
  5362. }
  5363. static void
  5364. bnx2_save_stats(struct bnx2 *bp)
  5365. {
  5366. u32 *hw_stats = (u32 *) bp->stats_blk;
  5367. u32 *temp_stats = (u32 *) bp->temp_stats_blk;
  5368. int i;
  5369. /* The 1st 10 counters are 64-bit counters */
  5370. for (i = 0; i < 20; i += 2) {
  5371. u32 hi;
  5372. u64 lo;
  5373. hi = temp_stats[i] + hw_stats[i];
  5374. lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
  5375. if (lo > 0xffffffff)
  5376. hi++;
  5377. temp_stats[i] = hi;
  5378. temp_stats[i + 1] = lo & 0xffffffff;
  5379. }
  5380. for ( ; i < sizeof(struct statistics_block) / 4; i++)
  5381. temp_stats[i] += hw_stats[i];
  5382. }
  5383. #define GET_64BIT_NET_STATS64(ctr) \
  5384. (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
  5385. #define GET_64BIT_NET_STATS(ctr) \
  5386. GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
  5387. GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
  5388. #define GET_32BIT_NET_STATS(ctr) \
  5389. (unsigned long) (bp->stats_blk->ctr + \
  5390. bp->temp_stats_blk->ctr)
  5391. static struct rtnl_link_stats64 *
  5392. bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
  5393. {
  5394. struct bnx2 *bp = netdev_priv(dev);
  5395. if (bp->stats_blk == NULL)
  5396. return net_stats;
  5397. net_stats->rx_packets =
  5398. GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
  5399. GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
  5400. GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
  5401. net_stats->tx_packets =
  5402. GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
  5403. GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
  5404. GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
  5405. net_stats->rx_bytes =
  5406. GET_64BIT_NET_STATS(stat_IfHCInOctets);
  5407. net_stats->tx_bytes =
  5408. GET_64BIT_NET_STATS(stat_IfHCOutOctets);
  5409. net_stats->multicast =
  5410. GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
  5411. net_stats->collisions =
  5412. GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
  5413. net_stats->rx_length_errors =
  5414. GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
  5415. GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
  5416. net_stats->rx_over_errors =
  5417. GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
  5418. GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
  5419. net_stats->rx_frame_errors =
  5420. GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
  5421. net_stats->rx_crc_errors =
  5422. GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
  5423. net_stats->rx_errors = net_stats->rx_length_errors +
  5424. net_stats->rx_over_errors + net_stats->rx_frame_errors +
  5425. net_stats->rx_crc_errors;
  5426. net_stats->tx_aborted_errors =
  5427. GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
  5428. GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
  5429. if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
  5430. (CHIP_ID(bp) == CHIP_ID_5708_A0))
  5431. net_stats->tx_carrier_errors = 0;
  5432. else {
  5433. net_stats->tx_carrier_errors =
  5434. GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
  5435. }
  5436. net_stats->tx_errors =
  5437. GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
  5438. net_stats->tx_aborted_errors +
  5439. net_stats->tx_carrier_errors;
  5440. net_stats->rx_missed_errors =
  5441. GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
  5442. GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
  5443. GET_32BIT_NET_STATS(stat_FwRxDrop);
  5444. return net_stats;
  5445. }
  5446. /* All ethtool functions called with rtnl_lock */
  5447. static int
  5448. bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  5449. {
  5450. struct bnx2 *bp = netdev_priv(dev);
  5451. int support_serdes = 0, support_copper = 0;
  5452. cmd->supported = SUPPORTED_Autoneg;
  5453. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  5454. support_serdes = 1;
  5455. support_copper = 1;
  5456. } else if (bp->phy_port == PORT_FIBRE)
  5457. support_serdes = 1;
  5458. else
  5459. support_copper = 1;
  5460. if (support_serdes) {
  5461. cmd->supported |= SUPPORTED_1000baseT_Full |
  5462. SUPPORTED_FIBRE;
  5463. if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
  5464. cmd->supported |= SUPPORTED_2500baseX_Full;
  5465. }
  5466. if (support_copper) {
  5467. cmd->supported |= SUPPORTED_10baseT_Half |
  5468. SUPPORTED_10baseT_Full |
  5469. SUPPORTED_100baseT_Half |
  5470. SUPPORTED_100baseT_Full |
  5471. SUPPORTED_1000baseT_Full |
  5472. SUPPORTED_TP;
  5473. }
  5474. spin_lock_bh(&bp->phy_lock);
  5475. cmd->port = bp->phy_port;
  5476. cmd->advertising = bp->advertising;
  5477. if (bp->autoneg & AUTONEG_SPEED) {
  5478. cmd->autoneg = AUTONEG_ENABLE;
  5479. }
  5480. else {
  5481. cmd->autoneg = AUTONEG_DISABLE;
  5482. }
  5483. if (netif_carrier_ok(dev)) {
  5484. cmd->speed = bp->line_speed;
  5485. cmd->duplex = bp->duplex;
  5486. }
  5487. else {
  5488. cmd->speed = -1;
  5489. cmd->duplex = -1;
  5490. }
  5491. spin_unlock_bh(&bp->phy_lock);
  5492. cmd->transceiver = XCVR_INTERNAL;
  5493. cmd->phy_address = bp->phy_addr;
  5494. return 0;
  5495. }
  5496. static int
  5497. bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  5498. {
  5499. struct bnx2 *bp = netdev_priv(dev);
  5500. u8 autoneg = bp->autoneg;
  5501. u8 req_duplex = bp->req_duplex;
  5502. u16 req_line_speed = bp->req_line_speed;
  5503. u32 advertising = bp->advertising;
  5504. int err = -EINVAL;
  5505. spin_lock_bh(&bp->phy_lock);
  5506. if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
  5507. goto err_out_unlock;
  5508. if (cmd->port != bp->phy_port &&
  5509. !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
  5510. goto err_out_unlock;
  5511. /* If device is down, we can store the settings only if the user
  5512. * is setting the currently active port.
  5513. */
  5514. if (!netif_running(dev) && cmd->port != bp->phy_port)
  5515. goto err_out_unlock;
  5516. if (cmd->autoneg == AUTONEG_ENABLE) {
  5517. autoneg |= AUTONEG_SPEED;
  5518. advertising = cmd->advertising;
  5519. if (cmd->port == PORT_TP) {
  5520. advertising &= ETHTOOL_ALL_COPPER_SPEED;
  5521. if (!advertising)
  5522. advertising = ETHTOOL_ALL_COPPER_SPEED;
  5523. } else {
  5524. advertising &= ETHTOOL_ALL_FIBRE_SPEED;
  5525. if (!advertising)
  5526. advertising = ETHTOOL_ALL_FIBRE_SPEED;
  5527. }
  5528. advertising |= ADVERTISED_Autoneg;
  5529. }
  5530. else {
  5531. if (cmd->port == PORT_FIBRE) {
  5532. if ((cmd->speed != SPEED_1000 &&
  5533. cmd->speed != SPEED_2500) ||
  5534. (cmd->duplex != DUPLEX_FULL))
  5535. goto err_out_unlock;
  5536. if (cmd->speed == SPEED_2500 &&
  5537. !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  5538. goto err_out_unlock;
  5539. }
  5540. else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
  5541. goto err_out_unlock;
  5542. autoneg &= ~AUTONEG_SPEED;
  5543. req_line_speed = cmd->speed;
  5544. req_duplex = cmd->duplex;
  5545. advertising = 0;
  5546. }
  5547. bp->autoneg = autoneg;
  5548. bp->advertising = advertising;
  5549. bp->req_line_speed = req_line_speed;
  5550. bp->req_duplex = req_duplex;
  5551. err = 0;
  5552. /* If device is down, the new settings will be picked up when it is
  5553. * brought up.
  5554. */
  5555. if (netif_running(dev))
  5556. err = bnx2_setup_phy(bp, cmd->port);
  5557. err_out_unlock:
  5558. spin_unlock_bh(&bp->phy_lock);
  5559. return err;
  5560. }
  5561. static void
  5562. bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  5563. {
  5564. struct bnx2 *bp = netdev_priv(dev);
  5565. strcpy(info->driver, DRV_MODULE_NAME);
  5566. strcpy(info->version, DRV_MODULE_VERSION);
  5567. strcpy(info->bus_info, pci_name(bp->pdev));
  5568. strcpy(info->fw_version, bp->fw_version);
  5569. }
  5570. #define BNX2_REGDUMP_LEN (32 * 1024)
  5571. static int
  5572. bnx2_get_regs_len(struct net_device *dev)
  5573. {
  5574. return BNX2_REGDUMP_LEN;
  5575. }
  5576. static void
  5577. bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
  5578. {
  5579. u32 *p = _p, i, offset;
  5580. u8 *orig_p = _p;
  5581. struct bnx2 *bp = netdev_priv(dev);
  5582. u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
  5583. 0x0800, 0x0880, 0x0c00, 0x0c10,
  5584. 0x0c30, 0x0d08, 0x1000, 0x101c,
  5585. 0x1040, 0x1048, 0x1080, 0x10a4,
  5586. 0x1400, 0x1490, 0x1498, 0x14f0,
  5587. 0x1500, 0x155c, 0x1580, 0x15dc,
  5588. 0x1600, 0x1658, 0x1680, 0x16d8,
  5589. 0x1800, 0x1820, 0x1840, 0x1854,
  5590. 0x1880, 0x1894, 0x1900, 0x1984,
  5591. 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
  5592. 0x1c80, 0x1c94, 0x1d00, 0x1d84,
  5593. 0x2000, 0x2030, 0x23c0, 0x2400,
  5594. 0x2800, 0x2820, 0x2830, 0x2850,
  5595. 0x2b40, 0x2c10, 0x2fc0, 0x3058,
  5596. 0x3c00, 0x3c94, 0x4000, 0x4010,
  5597. 0x4080, 0x4090, 0x43c0, 0x4458,
  5598. 0x4c00, 0x4c18, 0x4c40, 0x4c54,
  5599. 0x4fc0, 0x5010, 0x53c0, 0x5444,
  5600. 0x5c00, 0x5c18, 0x5c80, 0x5c90,
  5601. 0x5fc0, 0x6000, 0x6400, 0x6428,
  5602. 0x6800, 0x6848, 0x684c, 0x6860,
  5603. 0x6888, 0x6910, 0x8000 };
  5604. regs->version = 0;
  5605. memset(p, 0, BNX2_REGDUMP_LEN);
  5606. if (!netif_running(bp->dev))
  5607. return;
  5608. i = 0;
  5609. offset = reg_boundaries[0];
  5610. p += offset;
  5611. while (offset < BNX2_REGDUMP_LEN) {
  5612. *p++ = REG_RD(bp, offset);
  5613. offset += 4;
  5614. if (offset == reg_boundaries[i + 1]) {
  5615. offset = reg_boundaries[i + 2];
  5616. p = (u32 *) (orig_p + offset);
  5617. i += 2;
  5618. }
  5619. }
  5620. }
  5621. static void
  5622. bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  5623. {
  5624. struct bnx2 *bp = netdev_priv(dev);
  5625. if (bp->flags & BNX2_FLAG_NO_WOL) {
  5626. wol->supported = 0;
  5627. wol->wolopts = 0;
  5628. }
  5629. else {
  5630. wol->supported = WAKE_MAGIC;
  5631. if (bp->wol)
  5632. wol->wolopts = WAKE_MAGIC;
  5633. else
  5634. wol->wolopts = 0;
  5635. }
  5636. memset(&wol->sopass, 0, sizeof(wol->sopass));
  5637. }
  5638. static int
  5639. bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  5640. {
  5641. struct bnx2 *bp = netdev_priv(dev);
  5642. if (wol->wolopts & ~WAKE_MAGIC)
  5643. return -EINVAL;
  5644. if (wol->wolopts & WAKE_MAGIC) {
  5645. if (bp->flags & BNX2_FLAG_NO_WOL)
  5646. return -EINVAL;
  5647. bp->wol = 1;
  5648. }
  5649. else {
  5650. bp->wol = 0;
  5651. }
  5652. return 0;
  5653. }
  5654. static int
  5655. bnx2_nway_reset(struct net_device *dev)
  5656. {
  5657. struct bnx2 *bp = netdev_priv(dev);
  5658. u32 bmcr;
  5659. if (!netif_running(dev))
  5660. return -EAGAIN;
  5661. if (!(bp->autoneg & AUTONEG_SPEED)) {
  5662. return -EINVAL;
  5663. }
  5664. spin_lock_bh(&bp->phy_lock);
  5665. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  5666. int rc;
  5667. rc = bnx2_setup_remote_phy(bp, bp->phy_port);
  5668. spin_unlock_bh(&bp->phy_lock);
  5669. return rc;
  5670. }
  5671. /* Force a link down visible on the other side */
  5672. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  5673. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
  5674. spin_unlock_bh(&bp->phy_lock);
  5675. msleep(20);
  5676. spin_lock_bh(&bp->phy_lock);
  5677. bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
  5678. bp->serdes_an_pending = 1;
  5679. mod_timer(&bp->timer, jiffies + bp->current_interval);
  5680. }
  5681. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  5682. bmcr &= ~BMCR_LOOPBACK;
  5683. bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
  5684. spin_unlock_bh(&bp->phy_lock);
  5685. return 0;
  5686. }
  5687. static u32
  5688. bnx2_get_link(struct net_device *dev)
  5689. {
  5690. struct bnx2 *bp = netdev_priv(dev);
  5691. return bp->link_up;
  5692. }
  5693. static int
  5694. bnx2_get_eeprom_len(struct net_device *dev)
  5695. {
  5696. struct bnx2 *bp = netdev_priv(dev);
  5697. if (bp->flash_info == NULL)
  5698. return 0;
  5699. return (int) bp->flash_size;
  5700. }
  5701. static int
  5702. bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
  5703. u8 *eebuf)
  5704. {
  5705. struct bnx2 *bp = netdev_priv(dev);
  5706. int rc;
  5707. if (!netif_running(dev))
  5708. return -EAGAIN;
  5709. /* parameters already validated in ethtool_get_eeprom */
  5710. rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
  5711. return rc;
  5712. }
  5713. static int
  5714. bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
  5715. u8 *eebuf)
  5716. {
  5717. struct bnx2 *bp = netdev_priv(dev);
  5718. int rc;
  5719. if (!netif_running(dev))
  5720. return -EAGAIN;
  5721. /* parameters already validated in ethtool_set_eeprom */
  5722. rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
  5723. return rc;
  5724. }
  5725. static int
  5726. bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
  5727. {
  5728. struct bnx2 *bp = netdev_priv(dev);
  5729. memset(coal, 0, sizeof(struct ethtool_coalesce));
  5730. coal->rx_coalesce_usecs = bp->rx_ticks;
  5731. coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
  5732. coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
  5733. coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
  5734. coal->tx_coalesce_usecs = bp->tx_ticks;
  5735. coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
  5736. coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
  5737. coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
  5738. coal->stats_block_coalesce_usecs = bp->stats_ticks;
  5739. return 0;
  5740. }
  5741. static int
  5742. bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
  5743. {
  5744. struct bnx2 *bp = netdev_priv(dev);
  5745. bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
  5746. if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
  5747. bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
  5748. if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
  5749. bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
  5750. if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
  5751. bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
  5752. if (bp->rx_quick_cons_trip_int > 0xff)
  5753. bp->rx_quick_cons_trip_int = 0xff;
  5754. bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
  5755. if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
  5756. bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
  5757. if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
  5758. bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
  5759. if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
  5760. bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
  5761. if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
  5762. 0xff;
  5763. bp->stats_ticks = coal->stats_block_coalesce_usecs;
  5764. if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
  5765. if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
  5766. bp->stats_ticks = USEC_PER_SEC;
  5767. }
  5768. if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
  5769. bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
  5770. bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
  5771. if (netif_running(bp->dev)) {
  5772. bnx2_netif_stop(bp, true);
  5773. bnx2_init_nic(bp, 0);
  5774. bnx2_netif_start(bp, true);
  5775. }
  5776. return 0;
  5777. }
  5778. static void
  5779. bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
  5780. {
  5781. struct bnx2 *bp = netdev_priv(dev);
  5782. ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
  5783. ering->rx_mini_max_pending = 0;
  5784. ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
  5785. ering->rx_pending = bp->rx_ring_size;
  5786. ering->rx_mini_pending = 0;
  5787. ering->rx_jumbo_pending = bp->rx_pg_ring_size;
  5788. ering->tx_max_pending = MAX_TX_DESC_CNT;
  5789. ering->tx_pending = bp->tx_ring_size;
  5790. }
  5791. static int
  5792. bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
  5793. {
  5794. if (netif_running(bp->dev)) {
  5795. /* Reset will erase chipset stats; save them */
  5796. bnx2_save_stats(bp);
  5797. bnx2_netif_stop(bp, true);
  5798. bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
  5799. bnx2_free_skbs(bp);
  5800. bnx2_free_mem(bp);
  5801. }
  5802. bnx2_set_rx_ring_size(bp, rx);
  5803. bp->tx_ring_size = tx;
  5804. if (netif_running(bp->dev)) {
  5805. int rc;
  5806. rc = bnx2_alloc_mem(bp);
  5807. if (!rc)
  5808. rc = bnx2_init_nic(bp, 0);
  5809. if (rc) {
  5810. bnx2_napi_enable(bp);
  5811. dev_close(bp->dev);
  5812. return rc;
  5813. }
  5814. #ifdef BCM_CNIC
  5815. mutex_lock(&bp->cnic_lock);
  5816. /* Let cnic know about the new status block. */
  5817. if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
  5818. bnx2_setup_cnic_irq_info(bp);
  5819. mutex_unlock(&bp->cnic_lock);
  5820. #endif
  5821. bnx2_netif_start(bp, true);
  5822. }
  5823. return 0;
  5824. }
  5825. static int
  5826. bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
  5827. {
  5828. struct bnx2 *bp = netdev_priv(dev);
  5829. int rc;
  5830. if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
  5831. (ering->tx_pending > MAX_TX_DESC_CNT) ||
  5832. (ering->tx_pending <= MAX_SKB_FRAGS)) {
  5833. return -EINVAL;
  5834. }
  5835. rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
  5836. return rc;
  5837. }
  5838. static void
  5839. bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
  5840. {
  5841. struct bnx2 *bp = netdev_priv(dev);
  5842. epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
  5843. epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
  5844. epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
  5845. }
  5846. static int
  5847. bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
  5848. {
  5849. struct bnx2 *bp = netdev_priv(dev);
  5850. bp->req_flow_ctrl = 0;
  5851. if (epause->rx_pause)
  5852. bp->req_flow_ctrl |= FLOW_CTRL_RX;
  5853. if (epause->tx_pause)
  5854. bp->req_flow_ctrl |= FLOW_CTRL_TX;
  5855. if (epause->autoneg) {
  5856. bp->autoneg |= AUTONEG_FLOW_CTRL;
  5857. }
  5858. else {
  5859. bp->autoneg &= ~AUTONEG_FLOW_CTRL;
  5860. }
  5861. if (netif_running(dev)) {
  5862. spin_lock_bh(&bp->phy_lock);
  5863. bnx2_setup_phy(bp, bp->phy_port);
  5864. spin_unlock_bh(&bp->phy_lock);
  5865. }
  5866. return 0;
  5867. }
  5868. static u32
  5869. bnx2_get_rx_csum(struct net_device *dev)
  5870. {
  5871. struct bnx2 *bp = netdev_priv(dev);
  5872. return bp->rx_csum;
  5873. }
  5874. static int
  5875. bnx2_set_rx_csum(struct net_device *dev, u32 data)
  5876. {
  5877. struct bnx2 *bp = netdev_priv(dev);
  5878. bp->rx_csum = data;
  5879. return 0;
  5880. }
  5881. static int
  5882. bnx2_set_tso(struct net_device *dev, u32 data)
  5883. {
  5884. struct bnx2 *bp = netdev_priv(dev);
  5885. if (data) {
  5886. dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
  5887. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  5888. dev->features |= NETIF_F_TSO6;
  5889. } else
  5890. dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
  5891. NETIF_F_TSO_ECN);
  5892. return 0;
  5893. }
  5894. static struct {
  5895. char string[ETH_GSTRING_LEN];
  5896. } bnx2_stats_str_arr[] = {
  5897. { "rx_bytes" },
  5898. { "rx_error_bytes" },
  5899. { "tx_bytes" },
  5900. { "tx_error_bytes" },
  5901. { "rx_ucast_packets" },
  5902. { "rx_mcast_packets" },
  5903. { "rx_bcast_packets" },
  5904. { "tx_ucast_packets" },
  5905. { "tx_mcast_packets" },
  5906. { "tx_bcast_packets" },
  5907. { "tx_mac_errors" },
  5908. { "tx_carrier_errors" },
  5909. { "rx_crc_errors" },
  5910. { "rx_align_errors" },
  5911. { "tx_single_collisions" },
  5912. { "tx_multi_collisions" },
  5913. { "tx_deferred" },
  5914. { "tx_excess_collisions" },
  5915. { "tx_late_collisions" },
  5916. { "tx_total_collisions" },
  5917. { "rx_fragments" },
  5918. { "rx_jabbers" },
  5919. { "rx_undersize_packets" },
  5920. { "rx_oversize_packets" },
  5921. { "rx_64_byte_packets" },
  5922. { "rx_65_to_127_byte_packets" },
  5923. { "rx_128_to_255_byte_packets" },
  5924. { "rx_256_to_511_byte_packets" },
  5925. { "rx_512_to_1023_byte_packets" },
  5926. { "rx_1024_to_1522_byte_packets" },
  5927. { "rx_1523_to_9022_byte_packets" },
  5928. { "tx_64_byte_packets" },
  5929. { "tx_65_to_127_byte_packets" },
  5930. { "tx_128_to_255_byte_packets" },
  5931. { "tx_256_to_511_byte_packets" },
  5932. { "tx_512_to_1023_byte_packets" },
  5933. { "tx_1024_to_1522_byte_packets" },
  5934. { "tx_1523_to_9022_byte_packets" },
  5935. { "rx_xon_frames" },
  5936. { "rx_xoff_frames" },
  5937. { "tx_xon_frames" },
  5938. { "tx_xoff_frames" },
  5939. { "rx_mac_ctrl_frames" },
  5940. { "rx_filtered_packets" },
  5941. { "rx_ftq_discards" },
  5942. { "rx_discards" },
  5943. { "rx_fw_discards" },
  5944. };
  5945. #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
  5946. sizeof(bnx2_stats_str_arr[0]))
  5947. #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
  5948. static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
  5949. STATS_OFFSET32(stat_IfHCInOctets_hi),
  5950. STATS_OFFSET32(stat_IfHCInBadOctets_hi),
  5951. STATS_OFFSET32(stat_IfHCOutOctets_hi),
  5952. STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
  5953. STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
  5954. STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
  5955. STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
  5956. STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
  5957. STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
  5958. STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
  5959. STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
  5960. STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
  5961. STATS_OFFSET32(stat_Dot3StatsFCSErrors),
  5962. STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
  5963. STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
  5964. STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
  5965. STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
  5966. STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
  5967. STATS_OFFSET32(stat_Dot3StatsLateCollisions),
  5968. STATS_OFFSET32(stat_EtherStatsCollisions),
  5969. STATS_OFFSET32(stat_EtherStatsFragments),
  5970. STATS_OFFSET32(stat_EtherStatsJabbers),
  5971. STATS_OFFSET32(stat_EtherStatsUndersizePkts),
  5972. STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
  5973. STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
  5974. STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
  5975. STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
  5976. STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
  5977. STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
  5978. STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
  5979. STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
  5980. STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
  5981. STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
  5982. STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
  5983. STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
  5984. STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
  5985. STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
  5986. STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
  5987. STATS_OFFSET32(stat_XonPauseFramesReceived),
  5988. STATS_OFFSET32(stat_XoffPauseFramesReceived),
  5989. STATS_OFFSET32(stat_OutXonSent),
  5990. STATS_OFFSET32(stat_OutXoffSent),
  5991. STATS_OFFSET32(stat_MacControlFramesReceived),
  5992. STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
  5993. STATS_OFFSET32(stat_IfInFTQDiscards),
  5994. STATS_OFFSET32(stat_IfInMBUFDiscards),
  5995. STATS_OFFSET32(stat_FwRxDrop),
  5996. };
  5997. /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
  5998. * skipped because of errata.
  5999. */
  6000. static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
  6001. 8,0,8,8,8,8,8,8,8,8,
  6002. 4,0,4,4,4,4,4,4,4,4,
  6003. 4,4,4,4,4,4,4,4,4,4,
  6004. 4,4,4,4,4,4,4,4,4,4,
  6005. 4,4,4,4,4,4,4,
  6006. };
  6007. static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
  6008. 8,0,8,8,8,8,8,8,8,8,
  6009. 4,4,4,4,4,4,4,4,4,4,
  6010. 4,4,4,4,4,4,4,4,4,4,
  6011. 4,4,4,4,4,4,4,4,4,4,
  6012. 4,4,4,4,4,4,4,
  6013. };
  6014. #define BNX2_NUM_TESTS 6
  6015. static struct {
  6016. char string[ETH_GSTRING_LEN];
  6017. } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
  6018. { "register_test (offline)" },
  6019. { "memory_test (offline)" },
  6020. { "loopback_test (offline)" },
  6021. { "nvram_test (online)" },
  6022. { "interrupt_test (online)" },
  6023. { "link_test (online)" },
  6024. };
  6025. static int
  6026. bnx2_get_sset_count(struct net_device *dev, int sset)
  6027. {
  6028. switch (sset) {
  6029. case ETH_SS_TEST:
  6030. return BNX2_NUM_TESTS;
  6031. case ETH_SS_STATS:
  6032. return BNX2_NUM_STATS;
  6033. default:
  6034. return -EOPNOTSUPP;
  6035. }
  6036. }
  6037. static void
  6038. bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
  6039. {
  6040. struct bnx2 *bp = netdev_priv(dev);
  6041. bnx2_set_power_state(bp, PCI_D0);
  6042. memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
  6043. if (etest->flags & ETH_TEST_FL_OFFLINE) {
  6044. int i;
  6045. bnx2_netif_stop(bp, true);
  6046. bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
  6047. bnx2_free_skbs(bp);
  6048. if (bnx2_test_registers(bp) != 0) {
  6049. buf[0] = 1;
  6050. etest->flags |= ETH_TEST_FL_FAILED;
  6051. }
  6052. if (bnx2_test_memory(bp) != 0) {
  6053. buf[1] = 1;
  6054. etest->flags |= ETH_TEST_FL_FAILED;
  6055. }
  6056. if ((buf[2] = bnx2_test_loopback(bp)) != 0)
  6057. etest->flags |= ETH_TEST_FL_FAILED;
  6058. if (!netif_running(bp->dev))
  6059. bnx2_shutdown_chip(bp);
  6060. else {
  6061. bnx2_init_nic(bp, 1);
  6062. bnx2_netif_start(bp, true);
  6063. }
  6064. /* wait for link up */
  6065. for (i = 0; i < 7; i++) {
  6066. if (bp->link_up)
  6067. break;
  6068. msleep_interruptible(1000);
  6069. }
  6070. }
  6071. if (bnx2_test_nvram(bp) != 0) {
  6072. buf[3] = 1;
  6073. etest->flags |= ETH_TEST_FL_FAILED;
  6074. }
  6075. if (bnx2_test_intr(bp) != 0) {
  6076. buf[4] = 1;
  6077. etest->flags |= ETH_TEST_FL_FAILED;
  6078. }
  6079. if (bnx2_test_link(bp) != 0) {
  6080. buf[5] = 1;
  6081. etest->flags |= ETH_TEST_FL_FAILED;
  6082. }
  6083. if (!netif_running(bp->dev))
  6084. bnx2_set_power_state(bp, PCI_D3hot);
  6085. }
  6086. static void
  6087. bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
  6088. {
  6089. switch (stringset) {
  6090. case ETH_SS_STATS:
  6091. memcpy(buf, bnx2_stats_str_arr,
  6092. sizeof(bnx2_stats_str_arr));
  6093. break;
  6094. case ETH_SS_TEST:
  6095. memcpy(buf, bnx2_tests_str_arr,
  6096. sizeof(bnx2_tests_str_arr));
  6097. break;
  6098. }
  6099. }
  6100. static void
  6101. bnx2_get_ethtool_stats(struct net_device *dev,
  6102. struct ethtool_stats *stats, u64 *buf)
  6103. {
  6104. struct bnx2 *bp = netdev_priv(dev);
  6105. int i;
  6106. u32 *hw_stats = (u32 *) bp->stats_blk;
  6107. u32 *temp_stats = (u32 *) bp->temp_stats_blk;
  6108. u8 *stats_len_arr = NULL;
  6109. if (hw_stats == NULL) {
  6110. memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
  6111. return;
  6112. }
  6113. if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
  6114. (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
  6115. (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
  6116. (CHIP_ID(bp) == CHIP_ID_5708_A0))
  6117. stats_len_arr = bnx2_5706_stats_len_arr;
  6118. else
  6119. stats_len_arr = bnx2_5708_stats_len_arr;
  6120. for (i = 0; i < BNX2_NUM_STATS; i++) {
  6121. unsigned long offset;
  6122. if (stats_len_arr[i] == 0) {
  6123. /* skip this counter */
  6124. buf[i] = 0;
  6125. continue;
  6126. }
  6127. offset = bnx2_stats_offset_arr[i];
  6128. if (stats_len_arr[i] == 4) {
  6129. /* 4-byte counter */
  6130. buf[i] = (u64) *(hw_stats + offset) +
  6131. *(temp_stats + offset);
  6132. continue;
  6133. }
  6134. /* 8-byte counter */
  6135. buf[i] = (((u64) *(hw_stats + offset)) << 32) +
  6136. *(hw_stats + offset + 1) +
  6137. (((u64) *(temp_stats + offset)) << 32) +
  6138. *(temp_stats + offset + 1);
  6139. }
  6140. }
  6141. static int
  6142. bnx2_phys_id(struct net_device *dev, u32 data)
  6143. {
  6144. struct bnx2 *bp = netdev_priv(dev);
  6145. int i;
  6146. u32 save;
  6147. bnx2_set_power_state(bp, PCI_D0);
  6148. if (data == 0)
  6149. data = 2;
  6150. save = REG_RD(bp, BNX2_MISC_CFG);
  6151. REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
  6152. for (i = 0; i < (data * 2); i++) {
  6153. if ((i % 2) == 0) {
  6154. REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
  6155. }
  6156. else {
  6157. REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
  6158. BNX2_EMAC_LED_1000MB_OVERRIDE |
  6159. BNX2_EMAC_LED_100MB_OVERRIDE |
  6160. BNX2_EMAC_LED_10MB_OVERRIDE |
  6161. BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
  6162. BNX2_EMAC_LED_TRAFFIC);
  6163. }
  6164. msleep_interruptible(500);
  6165. if (signal_pending(current))
  6166. break;
  6167. }
  6168. REG_WR(bp, BNX2_EMAC_LED, 0);
  6169. REG_WR(bp, BNX2_MISC_CFG, save);
  6170. if (!netif_running(dev))
  6171. bnx2_set_power_state(bp, PCI_D3hot);
  6172. return 0;
  6173. }
  6174. static int
  6175. bnx2_set_tx_csum(struct net_device *dev, u32 data)
  6176. {
  6177. struct bnx2 *bp = netdev_priv(dev);
  6178. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  6179. return (ethtool_op_set_tx_ipv6_csum(dev, data));
  6180. else
  6181. return (ethtool_op_set_tx_csum(dev, data));
  6182. }
  6183. static int
  6184. bnx2_set_flags(struct net_device *dev, u32 data)
  6185. {
  6186. return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
  6187. }
  6188. static const struct ethtool_ops bnx2_ethtool_ops = {
  6189. .get_settings = bnx2_get_settings,
  6190. .set_settings = bnx2_set_settings,
  6191. .get_drvinfo = bnx2_get_drvinfo,
  6192. .get_regs_len = bnx2_get_regs_len,
  6193. .get_regs = bnx2_get_regs,
  6194. .get_wol = bnx2_get_wol,
  6195. .set_wol = bnx2_set_wol,
  6196. .nway_reset = bnx2_nway_reset,
  6197. .get_link = bnx2_get_link,
  6198. .get_eeprom_len = bnx2_get_eeprom_len,
  6199. .get_eeprom = bnx2_get_eeprom,
  6200. .set_eeprom = bnx2_set_eeprom,
  6201. .get_coalesce = bnx2_get_coalesce,
  6202. .set_coalesce = bnx2_set_coalesce,
  6203. .get_ringparam = bnx2_get_ringparam,
  6204. .set_ringparam = bnx2_set_ringparam,
  6205. .get_pauseparam = bnx2_get_pauseparam,
  6206. .set_pauseparam = bnx2_set_pauseparam,
  6207. .get_rx_csum = bnx2_get_rx_csum,
  6208. .set_rx_csum = bnx2_set_rx_csum,
  6209. .set_tx_csum = bnx2_set_tx_csum,
  6210. .set_sg = ethtool_op_set_sg,
  6211. .set_tso = bnx2_set_tso,
  6212. .self_test = bnx2_self_test,
  6213. .get_strings = bnx2_get_strings,
  6214. .phys_id = bnx2_phys_id,
  6215. .get_ethtool_stats = bnx2_get_ethtool_stats,
  6216. .get_sset_count = bnx2_get_sset_count,
  6217. .set_flags = bnx2_set_flags,
  6218. .get_flags = ethtool_op_get_flags,
  6219. };
  6220. /* Called with rtnl_lock */
  6221. static int
  6222. bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  6223. {
  6224. struct mii_ioctl_data *data = if_mii(ifr);
  6225. struct bnx2 *bp = netdev_priv(dev);
  6226. int err;
  6227. switch(cmd) {
  6228. case SIOCGMIIPHY:
  6229. data->phy_id = bp->phy_addr;
  6230. /* fallthru */
  6231. case SIOCGMIIREG: {
  6232. u32 mii_regval;
  6233. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  6234. return -EOPNOTSUPP;
  6235. if (!netif_running(dev))
  6236. return -EAGAIN;
  6237. spin_lock_bh(&bp->phy_lock);
  6238. err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
  6239. spin_unlock_bh(&bp->phy_lock);
  6240. data->val_out = mii_regval;
  6241. return err;
  6242. }
  6243. case SIOCSMIIREG:
  6244. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  6245. return -EOPNOTSUPP;
  6246. if (!netif_running(dev))
  6247. return -EAGAIN;
  6248. spin_lock_bh(&bp->phy_lock);
  6249. err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
  6250. spin_unlock_bh(&bp->phy_lock);
  6251. return err;
  6252. default:
  6253. /* do nothing */
  6254. break;
  6255. }
  6256. return -EOPNOTSUPP;
  6257. }
  6258. /* Called with rtnl_lock */
  6259. static int
  6260. bnx2_change_mac_addr(struct net_device *dev, void *p)
  6261. {
  6262. struct sockaddr *addr = p;
  6263. struct bnx2 *bp = netdev_priv(dev);
  6264. if (!is_valid_ether_addr(addr->sa_data))
  6265. return -EINVAL;
  6266. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  6267. if (netif_running(dev))
  6268. bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
  6269. return 0;
  6270. }
  6271. /* Called with rtnl_lock */
  6272. static int
  6273. bnx2_change_mtu(struct net_device *dev, int new_mtu)
  6274. {
  6275. struct bnx2 *bp = netdev_priv(dev);
  6276. if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
  6277. ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
  6278. return -EINVAL;
  6279. dev->mtu = new_mtu;
  6280. return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
  6281. }
  6282. #ifdef CONFIG_NET_POLL_CONTROLLER
  6283. static void
  6284. poll_bnx2(struct net_device *dev)
  6285. {
  6286. struct bnx2 *bp = netdev_priv(dev);
  6287. int i;
  6288. for (i = 0; i < bp->irq_nvecs; i++) {
  6289. struct bnx2_irq *irq = &bp->irq_tbl[i];
  6290. disable_irq(irq->vector);
  6291. irq->handler(irq->vector, &bp->bnx2_napi[i]);
  6292. enable_irq(irq->vector);
  6293. }
  6294. }
  6295. #endif
  6296. static void __devinit
  6297. bnx2_get_5709_media(struct bnx2 *bp)
  6298. {
  6299. u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
  6300. u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
  6301. u32 strap;
  6302. if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
  6303. return;
  6304. else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
  6305. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  6306. return;
  6307. }
  6308. if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
  6309. strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
  6310. else
  6311. strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
  6312. if (PCI_FUNC(bp->pdev->devfn) == 0) {
  6313. switch (strap) {
  6314. case 0x4:
  6315. case 0x5:
  6316. case 0x6:
  6317. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  6318. return;
  6319. }
  6320. } else {
  6321. switch (strap) {
  6322. case 0x1:
  6323. case 0x2:
  6324. case 0x4:
  6325. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  6326. return;
  6327. }
  6328. }
  6329. }
  6330. static void __devinit
  6331. bnx2_get_pci_speed(struct bnx2 *bp)
  6332. {
  6333. u32 reg;
  6334. reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
  6335. if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
  6336. u32 clkreg;
  6337. bp->flags |= BNX2_FLAG_PCIX;
  6338. clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
  6339. clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
  6340. switch (clkreg) {
  6341. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
  6342. bp->bus_speed_mhz = 133;
  6343. break;
  6344. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
  6345. bp->bus_speed_mhz = 100;
  6346. break;
  6347. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
  6348. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
  6349. bp->bus_speed_mhz = 66;
  6350. break;
  6351. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
  6352. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
  6353. bp->bus_speed_mhz = 50;
  6354. break;
  6355. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
  6356. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
  6357. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
  6358. bp->bus_speed_mhz = 33;
  6359. break;
  6360. }
  6361. }
  6362. else {
  6363. if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
  6364. bp->bus_speed_mhz = 66;
  6365. else
  6366. bp->bus_speed_mhz = 33;
  6367. }
  6368. if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
  6369. bp->flags |= BNX2_FLAG_PCI_32BIT;
  6370. }
  6371. static void __devinit
  6372. bnx2_read_vpd_fw_ver(struct bnx2 *bp)
  6373. {
  6374. int rc, i, j;
  6375. u8 *data;
  6376. unsigned int block_end, rosize, len;
  6377. #define BNX2_VPD_NVRAM_OFFSET 0x300
  6378. #define BNX2_VPD_LEN 128
  6379. #define BNX2_MAX_VER_SLEN 30
  6380. data = kmalloc(256, GFP_KERNEL);
  6381. if (!data)
  6382. return;
  6383. rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
  6384. BNX2_VPD_LEN);
  6385. if (rc)
  6386. goto vpd_done;
  6387. for (i = 0; i < BNX2_VPD_LEN; i += 4) {
  6388. data[i] = data[i + BNX2_VPD_LEN + 3];
  6389. data[i + 1] = data[i + BNX2_VPD_LEN + 2];
  6390. data[i + 2] = data[i + BNX2_VPD_LEN + 1];
  6391. data[i + 3] = data[i + BNX2_VPD_LEN];
  6392. }
  6393. i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
  6394. if (i < 0)
  6395. goto vpd_done;
  6396. rosize = pci_vpd_lrdt_size(&data[i]);
  6397. i += PCI_VPD_LRDT_TAG_SIZE;
  6398. block_end = i + rosize;
  6399. if (block_end > BNX2_VPD_LEN)
  6400. goto vpd_done;
  6401. j = pci_vpd_find_info_keyword(data, i, rosize,
  6402. PCI_VPD_RO_KEYWORD_MFR_ID);
  6403. if (j < 0)
  6404. goto vpd_done;
  6405. len = pci_vpd_info_field_size(&data[j]);
  6406. j += PCI_VPD_INFO_FLD_HDR_SIZE;
  6407. if (j + len > block_end || len != 4 ||
  6408. memcmp(&data[j], "1028", 4))
  6409. goto vpd_done;
  6410. j = pci_vpd_find_info_keyword(data, i, rosize,
  6411. PCI_VPD_RO_KEYWORD_VENDOR0);
  6412. if (j < 0)
  6413. goto vpd_done;
  6414. len = pci_vpd_info_field_size(&data[j]);
  6415. j += PCI_VPD_INFO_FLD_HDR_SIZE;
  6416. if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
  6417. goto vpd_done;
  6418. memcpy(bp->fw_version, &data[j], len);
  6419. bp->fw_version[len] = ' ';
  6420. vpd_done:
  6421. kfree(data);
  6422. }
  6423. static int __devinit
  6424. bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
  6425. {
  6426. struct bnx2 *bp;
  6427. unsigned long mem_len;
  6428. int rc, i, j;
  6429. u32 reg;
  6430. u64 dma_mask, persist_dma_mask;
  6431. SET_NETDEV_DEV(dev, &pdev->dev);
  6432. bp = netdev_priv(dev);
  6433. bp->flags = 0;
  6434. bp->phy_flags = 0;
  6435. bp->temp_stats_blk =
  6436. kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
  6437. if (bp->temp_stats_blk == NULL) {
  6438. rc = -ENOMEM;
  6439. goto err_out;
  6440. }
  6441. /* enable device (incl. PCI PM wakeup), and bus-mastering */
  6442. rc = pci_enable_device(pdev);
  6443. if (rc) {
  6444. dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
  6445. goto err_out;
  6446. }
  6447. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  6448. dev_err(&pdev->dev,
  6449. "Cannot find PCI device base address, aborting\n");
  6450. rc = -ENODEV;
  6451. goto err_out_disable;
  6452. }
  6453. rc = pci_request_regions(pdev, DRV_MODULE_NAME);
  6454. if (rc) {
  6455. dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
  6456. goto err_out_disable;
  6457. }
  6458. pci_set_master(pdev);
  6459. pci_save_state(pdev);
  6460. bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
  6461. if (bp->pm_cap == 0) {
  6462. dev_err(&pdev->dev,
  6463. "Cannot find power management capability, aborting\n");
  6464. rc = -EIO;
  6465. goto err_out_release;
  6466. }
  6467. bp->dev = dev;
  6468. bp->pdev = pdev;
  6469. spin_lock_init(&bp->phy_lock);
  6470. spin_lock_init(&bp->indirect_lock);
  6471. #ifdef BCM_CNIC
  6472. mutex_init(&bp->cnic_lock);
  6473. #endif
  6474. INIT_WORK(&bp->reset_task, bnx2_reset_task);
  6475. dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
  6476. mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
  6477. dev->mem_end = dev->mem_start + mem_len;
  6478. dev->irq = pdev->irq;
  6479. bp->regview = ioremap_nocache(dev->base_addr, mem_len);
  6480. if (!bp->regview) {
  6481. dev_err(&pdev->dev, "Cannot map register space, aborting\n");
  6482. rc = -ENOMEM;
  6483. goto err_out_release;
  6484. }
  6485. /* Configure byte swap and enable write to the reg_window registers.
  6486. * Rely on CPU to do target byte swapping on big endian systems
  6487. * The chip's target access swapping will not swap all accesses
  6488. */
  6489. pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
  6490. BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
  6491. BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
  6492. bnx2_set_power_state(bp, PCI_D0);
  6493. bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
  6494. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  6495. if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
  6496. dev_err(&pdev->dev,
  6497. "Cannot find PCIE capability, aborting\n");
  6498. rc = -EIO;
  6499. goto err_out_unmap;
  6500. }
  6501. bp->flags |= BNX2_FLAG_PCIE;
  6502. if (CHIP_REV(bp) == CHIP_REV_Ax)
  6503. bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
  6504. } else {
  6505. bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
  6506. if (bp->pcix_cap == 0) {
  6507. dev_err(&pdev->dev,
  6508. "Cannot find PCIX capability, aborting\n");
  6509. rc = -EIO;
  6510. goto err_out_unmap;
  6511. }
  6512. bp->flags |= BNX2_FLAG_BROKEN_STATS;
  6513. }
  6514. if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
  6515. if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
  6516. bp->flags |= BNX2_FLAG_MSIX_CAP;
  6517. }
  6518. if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
  6519. if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
  6520. bp->flags |= BNX2_FLAG_MSI_CAP;
  6521. }
  6522. /* 5708 cannot support DMA addresses > 40-bit. */
  6523. if (CHIP_NUM(bp) == CHIP_NUM_5708)
  6524. persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
  6525. else
  6526. persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
  6527. /* Configure DMA attributes. */
  6528. if (pci_set_dma_mask(pdev, dma_mask) == 0) {
  6529. dev->features |= NETIF_F_HIGHDMA;
  6530. rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
  6531. if (rc) {
  6532. dev_err(&pdev->dev,
  6533. "pci_set_consistent_dma_mask failed, aborting\n");
  6534. goto err_out_unmap;
  6535. }
  6536. } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
  6537. dev_err(&pdev->dev, "System does not support DMA, aborting\n");
  6538. goto err_out_unmap;
  6539. }
  6540. if (!(bp->flags & BNX2_FLAG_PCIE))
  6541. bnx2_get_pci_speed(bp);
  6542. /* 5706A0 may falsely detect SERR and PERR. */
  6543. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  6544. reg = REG_RD(bp, PCI_COMMAND);
  6545. reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
  6546. REG_WR(bp, PCI_COMMAND, reg);
  6547. }
  6548. else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
  6549. !(bp->flags & BNX2_FLAG_PCIX)) {
  6550. dev_err(&pdev->dev,
  6551. "5706 A1 can only be used in a PCIX bus, aborting\n");
  6552. goto err_out_unmap;
  6553. }
  6554. bnx2_init_nvram(bp);
  6555. reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
  6556. if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
  6557. BNX2_SHM_HDR_SIGNATURE_SIG) {
  6558. u32 off = PCI_FUNC(pdev->devfn) << 2;
  6559. bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
  6560. } else
  6561. bp->shmem_base = HOST_VIEW_SHMEM_BASE;
  6562. /* Get the permanent MAC address. First we need to make sure the
  6563. * firmware is actually running.
  6564. */
  6565. reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
  6566. if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
  6567. BNX2_DEV_INFO_SIGNATURE_MAGIC) {
  6568. dev_err(&pdev->dev, "Firmware not running, aborting\n");
  6569. rc = -ENODEV;
  6570. goto err_out_unmap;
  6571. }
  6572. bnx2_read_vpd_fw_ver(bp);
  6573. j = strlen(bp->fw_version);
  6574. reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
  6575. for (i = 0; i < 3 && j < 24; i++) {
  6576. u8 num, k, skip0;
  6577. if (i == 0) {
  6578. bp->fw_version[j++] = 'b';
  6579. bp->fw_version[j++] = 'c';
  6580. bp->fw_version[j++] = ' ';
  6581. }
  6582. num = (u8) (reg >> (24 - (i * 8)));
  6583. for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
  6584. if (num >= k || !skip0 || k == 1) {
  6585. bp->fw_version[j++] = (num / k) + '0';
  6586. skip0 = 0;
  6587. }
  6588. }
  6589. if (i != 2)
  6590. bp->fw_version[j++] = '.';
  6591. }
  6592. reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
  6593. if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
  6594. bp->wol = 1;
  6595. if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
  6596. bp->flags |= BNX2_FLAG_ASF_ENABLE;
  6597. for (i = 0; i < 30; i++) {
  6598. reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
  6599. if (reg & BNX2_CONDITION_MFW_RUN_MASK)
  6600. break;
  6601. msleep(10);
  6602. }
  6603. }
  6604. reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
  6605. reg &= BNX2_CONDITION_MFW_RUN_MASK;
  6606. if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
  6607. reg != BNX2_CONDITION_MFW_RUN_NONE) {
  6608. u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
  6609. if (j < 32)
  6610. bp->fw_version[j++] = ' ';
  6611. for (i = 0; i < 3 && j < 28; i++) {
  6612. reg = bnx2_reg_rd_ind(bp, addr + i * 4);
  6613. reg = swab32(reg);
  6614. memcpy(&bp->fw_version[j], &reg, 4);
  6615. j += 4;
  6616. }
  6617. }
  6618. reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
  6619. bp->mac_addr[0] = (u8) (reg >> 8);
  6620. bp->mac_addr[1] = (u8) reg;
  6621. reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
  6622. bp->mac_addr[2] = (u8) (reg >> 24);
  6623. bp->mac_addr[3] = (u8) (reg >> 16);
  6624. bp->mac_addr[4] = (u8) (reg >> 8);
  6625. bp->mac_addr[5] = (u8) reg;
  6626. bp->tx_ring_size = MAX_TX_DESC_CNT;
  6627. bnx2_set_rx_ring_size(bp, 255);
  6628. bp->rx_csum = 1;
  6629. bp->tx_quick_cons_trip_int = 2;
  6630. bp->tx_quick_cons_trip = 20;
  6631. bp->tx_ticks_int = 18;
  6632. bp->tx_ticks = 80;
  6633. bp->rx_quick_cons_trip_int = 2;
  6634. bp->rx_quick_cons_trip = 12;
  6635. bp->rx_ticks_int = 18;
  6636. bp->rx_ticks = 18;
  6637. bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
  6638. bp->current_interval = BNX2_TIMER_INTERVAL;
  6639. bp->phy_addr = 1;
  6640. /* Disable WOL support if we are running on a SERDES chip. */
  6641. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  6642. bnx2_get_5709_media(bp);
  6643. else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
  6644. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  6645. bp->phy_port = PORT_TP;
  6646. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  6647. bp->phy_port = PORT_FIBRE;
  6648. reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
  6649. if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
  6650. bp->flags |= BNX2_FLAG_NO_WOL;
  6651. bp->wol = 0;
  6652. }
  6653. if (CHIP_NUM(bp) == CHIP_NUM_5706) {
  6654. /* Don't do parallel detect on this board because of
  6655. * some board problems. The link will not go down
  6656. * if we do parallel detect.
  6657. */
  6658. if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
  6659. pdev->subsystem_device == 0x310c)
  6660. bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
  6661. } else {
  6662. bp->phy_addr = 2;
  6663. if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
  6664. bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
  6665. }
  6666. } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
  6667. CHIP_NUM(bp) == CHIP_NUM_5708)
  6668. bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
  6669. else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
  6670. (CHIP_REV(bp) == CHIP_REV_Ax ||
  6671. CHIP_REV(bp) == CHIP_REV_Bx))
  6672. bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
  6673. bnx2_init_fw_cap(bp);
  6674. if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
  6675. (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
  6676. (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
  6677. !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
  6678. bp->flags |= BNX2_FLAG_NO_WOL;
  6679. bp->wol = 0;
  6680. }
  6681. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  6682. bp->tx_quick_cons_trip_int =
  6683. bp->tx_quick_cons_trip;
  6684. bp->tx_ticks_int = bp->tx_ticks;
  6685. bp->rx_quick_cons_trip_int =
  6686. bp->rx_quick_cons_trip;
  6687. bp->rx_ticks_int = bp->rx_ticks;
  6688. bp->comp_prod_trip_int = bp->comp_prod_trip;
  6689. bp->com_ticks_int = bp->com_ticks;
  6690. bp->cmd_ticks_int = bp->cmd_ticks;
  6691. }
  6692. /* Disable MSI on 5706 if AMD 8132 bridge is found.
  6693. *
  6694. * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
  6695. * with byte enables disabled on the unused 32-bit word. This is legal
  6696. * but causes problems on the AMD 8132 which will eventually stop
  6697. * responding after a while.
  6698. *
  6699. * AMD believes this incompatibility is unique to the 5706, and
  6700. * prefers to locally disable MSI rather than globally disabling it.
  6701. */
  6702. if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
  6703. struct pci_dev *amd_8132 = NULL;
  6704. while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
  6705. PCI_DEVICE_ID_AMD_8132_BRIDGE,
  6706. amd_8132))) {
  6707. if (amd_8132->revision >= 0x10 &&
  6708. amd_8132->revision <= 0x13) {
  6709. disable_msi = 1;
  6710. pci_dev_put(amd_8132);
  6711. break;
  6712. }
  6713. }
  6714. }
  6715. bnx2_set_default_link(bp);
  6716. bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
  6717. init_timer(&bp->timer);
  6718. bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
  6719. bp->timer.data = (unsigned long) bp;
  6720. bp->timer.function = bnx2_timer;
  6721. return 0;
  6722. err_out_unmap:
  6723. if (bp->regview) {
  6724. iounmap(bp->regview);
  6725. bp->regview = NULL;
  6726. }
  6727. err_out_release:
  6728. pci_release_regions(pdev);
  6729. err_out_disable:
  6730. pci_disable_device(pdev);
  6731. pci_set_drvdata(pdev, NULL);
  6732. err_out:
  6733. return rc;
  6734. }
  6735. static char * __devinit
  6736. bnx2_bus_string(struct bnx2 *bp, char *str)
  6737. {
  6738. char *s = str;
  6739. if (bp->flags & BNX2_FLAG_PCIE) {
  6740. s += sprintf(s, "PCI Express");
  6741. } else {
  6742. s += sprintf(s, "PCI");
  6743. if (bp->flags & BNX2_FLAG_PCIX)
  6744. s += sprintf(s, "-X");
  6745. if (bp->flags & BNX2_FLAG_PCI_32BIT)
  6746. s += sprintf(s, " 32-bit");
  6747. else
  6748. s += sprintf(s, " 64-bit");
  6749. s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
  6750. }
  6751. return str;
  6752. }
  6753. static void
  6754. bnx2_del_napi(struct bnx2 *bp)
  6755. {
  6756. int i;
  6757. for (i = 0; i < bp->irq_nvecs; i++)
  6758. netif_napi_del(&bp->bnx2_napi[i].napi);
  6759. }
  6760. static void
  6761. bnx2_init_napi(struct bnx2 *bp)
  6762. {
  6763. int i;
  6764. for (i = 0; i < bp->irq_nvecs; i++) {
  6765. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  6766. int (*poll)(struct napi_struct *, int);
  6767. if (i == 0)
  6768. poll = bnx2_poll;
  6769. else
  6770. poll = bnx2_poll_msix;
  6771. netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
  6772. bnapi->bp = bp;
  6773. }
  6774. }
  6775. static const struct net_device_ops bnx2_netdev_ops = {
  6776. .ndo_open = bnx2_open,
  6777. .ndo_start_xmit = bnx2_start_xmit,
  6778. .ndo_stop = bnx2_close,
  6779. .ndo_get_stats64 = bnx2_get_stats64,
  6780. .ndo_set_rx_mode = bnx2_set_rx_mode,
  6781. .ndo_do_ioctl = bnx2_ioctl,
  6782. .ndo_validate_addr = eth_validate_addr,
  6783. .ndo_set_mac_address = bnx2_change_mac_addr,
  6784. .ndo_change_mtu = bnx2_change_mtu,
  6785. .ndo_tx_timeout = bnx2_tx_timeout,
  6786. #ifdef BCM_VLAN
  6787. .ndo_vlan_rx_register = bnx2_vlan_rx_register,
  6788. #endif
  6789. #ifdef CONFIG_NET_POLL_CONTROLLER
  6790. .ndo_poll_controller = poll_bnx2,
  6791. #endif
  6792. };
  6793. static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
  6794. {
  6795. #ifdef BCM_VLAN
  6796. dev->vlan_features |= flags;
  6797. #endif
  6798. }
  6799. static int __devinit
  6800. bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  6801. {
  6802. static int version_printed = 0;
  6803. struct net_device *dev = NULL;
  6804. struct bnx2 *bp;
  6805. int rc;
  6806. char str[40];
  6807. if (version_printed++ == 0)
  6808. pr_info("%s", version);
  6809. /* dev zeroed in init_etherdev */
  6810. dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
  6811. if (!dev)
  6812. return -ENOMEM;
  6813. rc = bnx2_init_board(pdev, dev);
  6814. if (rc < 0) {
  6815. free_netdev(dev);
  6816. return rc;
  6817. }
  6818. dev->netdev_ops = &bnx2_netdev_ops;
  6819. dev->watchdog_timeo = TX_TIMEOUT;
  6820. dev->ethtool_ops = &bnx2_ethtool_ops;
  6821. bp = netdev_priv(dev);
  6822. pci_set_drvdata(pdev, dev);
  6823. rc = bnx2_request_firmware(bp);
  6824. if (rc)
  6825. goto error;
  6826. memcpy(dev->dev_addr, bp->mac_addr, 6);
  6827. memcpy(dev->perm_addr, bp->mac_addr, 6);
  6828. dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO |
  6829. NETIF_F_RXHASH;
  6830. vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
  6831. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  6832. dev->features |= NETIF_F_IPV6_CSUM;
  6833. vlan_features_add(dev, NETIF_F_IPV6_CSUM);
  6834. }
  6835. #ifdef BCM_VLAN
  6836. dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  6837. #endif
  6838. dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
  6839. vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
  6840. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  6841. dev->features |= NETIF_F_TSO6;
  6842. vlan_features_add(dev, NETIF_F_TSO6);
  6843. }
  6844. if ((rc = register_netdev(dev))) {
  6845. dev_err(&pdev->dev, "Cannot register net device\n");
  6846. goto error;
  6847. }
  6848. netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
  6849. board_info[ent->driver_data].name,
  6850. ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
  6851. ((CHIP_ID(bp) & 0x0ff0) >> 4),
  6852. bnx2_bus_string(bp, str),
  6853. dev->base_addr,
  6854. bp->pdev->irq, dev->dev_addr);
  6855. return 0;
  6856. error:
  6857. if (bp->mips_firmware)
  6858. release_firmware(bp->mips_firmware);
  6859. if (bp->rv2p_firmware)
  6860. release_firmware(bp->rv2p_firmware);
  6861. if (bp->regview)
  6862. iounmap(bp->regview);
  6863. pci_release_regions(pdev);
  6864. pci_disable_device(pdev);
  6865. pci_set_drvdata(pdev, NULL);
  6866. free_netdev(dev);
  6867. return rc;
  6868. }
  6869. static void __devexit
  6870. bnx2_remove_one(struct pci_dev *pdev)
  6871. {
  6872. struct net_device *dev = pci_get_drvdata(pdev);
  6873. struct bnx2 *bp = netdev_priv(dev);
  6874. flush_scheduled_work();
  6875. unregister_netdev(dev);
  6876. if (bp->mips_firmware)
  6877. release_firmware(bp->mips_firmware);
  6878. if (bp->rv2p_firmware)
  6879. release_firmware(bp->rv2p_firmware);
  6880. if (bp->regview)
  6881. iounmap(bp->regview);
  6882. kfree(bp->temp_stats_blk);
  6883. free_netdev(dev);
  6884. pci_release_regions(pdev);
  6885. pci_disable_device(pdev);
  6886. pci_set_drvdata(pdev, NULL);
  6887. }
  6888. static int
  6889. bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
  6890. {
  6891. struct net_device *dev = pci_get_drvdata(pdev);
  6892. struct bnx2 *bp = netdev_priv(dev);
  6893. /* PCI register 4 needs to be saved whether netif_running() or not.
  6894. * MSI address and data need to be saved if using MSI and
  6895. * netif_running().
  6896. */
  6897. pci_save_state(pdev);
  6898. if (!netif_running(dev))
  6899. return 0;
  6900. flush_scheduled_work();
  6901. bnx2_netif_stop(bp, true);
  6902. netif_device_detach(dev);
  6903. del_timer_sync(&bp->timer);
  6904. bnx2_shutdown_chip(bp);
  6905. bnx2_free_skbs(bp);
  6906. bnx2_set_power_state(bp, pci_choose_state(pdev, state));
  6907. return 0;
  6908. }
  6909. static int
  6910. bnx2_resume(struct pci_dev *pdev)
  6911. {
  6912. struct net_device *dev = pci_get_drvdata(pdev);
  6913. struct bnx2 *bp = netdev_priv(dev);
  6914. pci_restore_state(pdev);
  6915. if (!netif_running(dev))
  6916. return 0;
  6917. bnx2_set_power_state(bp, PCI_D0);
  6918. netif_device_attach(dev);
  6919. bnx2_init_nic(bp, 1);
  6920. bnx2_netif_start(bp, true);
  6921. return 0;
  6922. }
  6923. /**
  6924. * bnx2_io_error_detected - called when PCI error is detected
  6925. * @pdev: Pointer to PCI device
  6926. * @state: The current pci connection state
  6927. *
  6928. * This function is called after a PCI bus error affecting
  6929. * this device has been detected.
  6930. */
  6931. static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
  6932. pci_channel_state_t state)
  6933. {
  6934. struct net_device *dev = pci_get_drvdata(pdev);
  6935. struct bnx2 *bp = netdev_priv(dev);
  6936. rtnl_lock();
  6937. netif_device_detach(dev);
  6938. if (state == pci_channel_io_perm_failure) {
  6939. rtnl_unlock();
  6940. return PCI_ERS_RESULT_DISCONNECT;
  6941. }
  6942. if (netif_running(dev)) {
  6943. bnx2_netif_stop(bp, true);
  6944. del_timer_sync(&bp->timer);
  6945. bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
  6946. }
  6947. pci_disable_device(pdev);
  6948. rtnl_unlock();
  6949. /* Request a slot slot reset. */
  6950. return PCI_ERS_RESULT_NEED_RESET;
  6951. }
  6952. /**
  6953. * bnx2_io_slot_reset - called after the pci bus has been reset.
  6954. * @pdev: Pointer to PCI device
  6955. *
  6956. * Restart the card from scratch, as if from a cold-boot.
  6957. */
  6958. static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
  6959. {
  6960. struct net_device *dev = pci_get_drvdata(pdev);
  6961. struct bnx2 *bp = netdev_priv(dev);
  6962. rtnl_lock();
  6963. if (pci_enable_device(pdev)) {
  6964. dev_err(&pdev->dev,
  6965. "Cannot re-enable PCI device after reset\n");
  6966. rtnl_unlock();
  6967. return PCI_ERS_RESULT_DISCONNECT;
  6968. }
  6969. pci_set_master(pdev);
  6970. pci_restore_state(pdev);
  6971. pci_save_state(pdev);
  6972. if (netif_running(dev)) {
  6973. bnx2_set_power_state(bp, PCI_D0);
  6974. bnx2_init_nic(bp, 1);
  6975. }
  6976. rtnl_unlock();
  6977. return PCI_ERS_RESULT_RECOVERED;
  6978. }
  6979. /**
  6980. * bnx2_io_resume - called when traffic can start flowing again.
  6981. * @pdev: Pointer to PCI device
  6982. *
  6983. * This callback is called when the error recovery driver tells us that
  6984. * its OK to resume normal operation.
  6985. */
  6986. static void bnx2_io_resume(struct pci_dev *pdev)
  6987. {
  6988. struct net_device *dev = pci_get_drvdata(pdev);
  6989. struct bnx2 *bp = netdev_priv(dev);
  6990. rtnl_lock();
  6991. if (netif_running(dev))
  6992. bnx2_netif_start(bp, true);
  6993. netif_device_attach(dev);
  6994. rtnl_unlock();
  6995. }
  6996. static struct pci_error_handlers bnx2_err_handler = {
  6997. .error_detected = bnx2_io_error_detected,
  6998. .slot_reset = bnx2_io_slot_reset,
  6999. .resume = bnx2_io_resume,
  7000. };
  7001. static struct pci_driver bnx2_pci_driver = {
  7002. .name = DRV_MODULE_NAME,
  7003. .id_table = bnx2_pci_tbl,
  7004. .probe = bnx2_init_one,
  7005. .remove = __devexit_p(bnx2_remove_one),
  7006. .suspend = bnx2_suspend,
  7007. .resume = bnx2_resume,
  7008. .err_handler = &bnx2_err_handler,
  7009. };
  7010. static int __init bnx2_init(void)
  7011. {
  7012. return pci_register_driver(&bnx2_pci_driver);
  7013. }
  7014. static void __exit bnx2_cleanup(void)
  7015. {
  7016. pci_unregister_driver(&bnx2_pci_driver);
  7017. }
  7018. module_init(bnx2_init);
  7019. module_exit(bnx2_cleanup);