bnx2.c 214 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781
  1. /* bnx2.c: Broadcom NX2 network driver.
  2. *
  3. * Copyright (c) 2004-2011 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. *
  9. * Written by: Michael Chan (mchan@broadcom.com)
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/moduleparam.h>
  14. #include <linux/stringify.h>
  15. #include <linux/kernel.h>
  16. #include <linux/timer.h>
  17. #include <linux/errno.h>
  18. #include <linux/ioport.h>
  19. #include <linux/slab.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/pci.h>
  23. #include <linux/init.h>
  24. #include <linux/netdevice.h>
  25. #include <linux/etherdevice.h>
  26. #include <linux/skbuff.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/bitops.h>
  29. #include <asm/io.h>
  30. #include <asm/irq.h>
  31. #include <linux/delay.h>
  32. #include <asm/byteorder.h>
  33. #include <asm/page.h>
  34. #include <linux/time.h>
  35. #include <linux/ethtool.h>
  36. #include <linux/mii.h>
  37. #include <linux/if.h>
  38. #include <linux/if_vlan.h>
  39. #include <net/ip.h>
  40. #include <net/tcp.h>
  41. #include <net/checksum.h>
  42. #include <linux/workqueue.h>
  43. #include <linux/crc32.h>
  44. #include <linux/prefetch.h>
  45. #include <linux/cache.h>
  46. #include <linux/firmware.h>
  47. #include <linux/log2.h>
  48. #include <linux/aer.h>
  49. #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
  50. #define BCM_CNIC 1
  51. #include "cnic_if.h"
  52. #endif
  53. #include "bnx2.h"
  54. #include "bnx2_fw.h"
  55. #define DRV_MODULE_NAME "bnx2"
  56. #define DRV_MODULE_VERSION "2.2.3"
  57. #define DRV_MODULE_RELDATE "June 27, 2012"
  58. #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
  59. #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
  60. #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
  61. #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
  62. #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
  63. #define RUN_AT(x) (jiffies + (x))
  64. /* Time in jiffies before concluding the transmitter is hung. */
  65. #define TX_TIMEOUT (5*HZ)
  66. static char version[] =
  67. "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  68. MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
  69. MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
  70. MODULE_LICENSE("GPL");
  71. MODULE_VERSION(DRV_MODULE_VERSION);
  72. MODULE_FIRMWARE(FW_MIPS_FILE_06);
  73. MODULE_FIRMWARE(FW_RV2P_FILE_06);
  74. MODULE_FIRMWARE(FW_MIPS_FILE_09);
  75. MODULE_FIRMWARE(FW_RV2P_FILE_09);
  76. MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
  77. static int disable_msi = 0;
  78. module_param(disable_msi, int, 0);
  79. MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  80. typedef enum {
  81. BCM5706 = 0,
  82. NC370T,
  83. NC370I,
  84. BCM5706S,
  85. NC370F,
  86. BCM5708,
  87. BCM5708S,
  88. BCM5709,
  89. BCM5709S,
  90. BCM5716,
  91. BCM5716S,
  92. } board_t;
  93. /* indexed by board_t, above */
  94. static struct {
  95. char *name;
  96. } board_info[] = {
  97. { "Broadcom NetXtreme II BCM5706 1000Base-T" },
  98. { "HP NC370T Multifunction Gigabit Server Adapter" },
  99. { "HP NC370i Multifunction Gigabit Server Adapter" },
  100. { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
  101. { "HP NC370F Multifunction Gigabit Server Adapter" },
  102. { "Broadcom NetXtreme II BCM5708 1000Base-T" },
  103. { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
  104. { "Broadcom NetXtreme II BCM5709 1000Base-T" },
  105. { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
  106. { "Broadcom NetXtreme II BCM5716 1000Base-T" },
  107. { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
  108. };
  109. static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
  110. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  111. PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
  112. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  113. PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
  114. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  115. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
  116. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
  117. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
  118. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
  119. PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
  120. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
  121. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
  122. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
  123. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
  124. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
  125. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
  126. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
  127. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
  128. { PCI_VENDOR_ID_BROADCOM, 0x163b,
  129. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
  130. { PCI_VENDOR_ID_BROADCOM, 0x163c,
  131. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
  132. { 0, }
  133. };
  134. static const struct flash_spec flash_table[] =
  135. {
  136. #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
  137. #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
  138. /* Slow EEPROM */
  139. {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
  140. BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
  141. SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
  142. "EEPROM - slow"},
  143. /* Expansion entry 0001 */
  144. {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
  145. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  146. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  147. "Entry 0001"},
  148. /* Saifun SA25F010 (non-buffered flash) */
  149. /* strap, cfg1, & write1 need updates */
  150. {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
  151. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  152. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
  153. "Non-buffered flash (128kB)"},
  154. /* Saifun SA25F020 (non-buffered flash) */
  155. /* strap, cfg1, & write1 need updates */
  156. {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
  157. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  158. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
  159. "Non-buffered flash (256kB)"},
  160. /* Expansion entry 0100 */
  161. {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
  162. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  163. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  164. "Entry 0100"},
  165. /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
  166. {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
  167. NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
  168. ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
  169. "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
  170. /* Entry 0110: ST M45PE20 (non-buffered flash)*/
  171. {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
  172. NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
  173. ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
  174. "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
  175. /* Saifun SA25F005 (non-buffered flash) */
  176. /* strap, cfg1, & write1 need updates */
  177. {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
  178. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  179. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
  180. "Non-buffered flash (64kB)"},
  181. /* Fast EEPROM */
  182. {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
  183. BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
  184. SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
  185. "EEPROM - fast"},
  186. /* Expansion entry 1001 */
  187. {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
  188. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  189. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  190. "Entry 1001"},
  191. /* Expansion entry 1010 */
  192. {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
  193. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  194. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  195. "Entry 1010"},
  196. /* ATMEL AT45DB011B (buffered flash) */
  197. {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
  198. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  199. BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
  200. "Buffered flash (128kB)"},
  201. /* Expansion entry 1100 */
  202. {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
  203. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  204. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  205. "Entry 1100"},
  206. /* Expansion entry 1101 */
  207. {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
  208. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  209. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  210. "Entry 1101"},
  211. /* Ateml Expansion entry 1110 */
  212. {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
  213. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  214. BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
  215. "Entry 1110 (Atmel)"},
  216. /* ATMEL AT45DB021B (buffered flash) */
  217. {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
  218. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  219. BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
  220. "Buffered flash (256kB)"},
  221. };
  222. static const struct flash_spec flash_5709 = {
  223. .flags = BNX2_NV_BUFFERED,
  224. .page_bits = BCM5709_FLASH_PAGE_BITS,
  225. .page_size = BCM5709_FLASH_PAGE_SIZE,
  226. .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
  227. .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
  228. .name = "5709 Buffered flash (256kB)",
  229. };
  230. MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
  231. static void bnx2_init_napi(struct bnx2 *bp);
  232. static void bnx2_del_napi(struct bnx2 *bp);
  233. static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
  234. {
  235. u32 diff;
  236. /* Tell compiler to fetch tx_prod and tx_cons from memory. */
  237. barrier();
  238. /* The ring uses 256 indices for 255 entries, one of them
  239. * needs to be skipped.
  240. */
  241. diff = txr->tx_prod - txr->tx_cons;
  242. if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
  243. diff &= 0xffff;
  244. if (diff == BNX2_TX_DESC_CNT)
  245. diff = BNX2_MAX_TX_DESC_CNT;
  246. }
  247. return bp->tx_ring_size - diff;
  248. }
  249. static u32
  250. bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
  251. {
  252. u32 val;
  253. spin_lock_bh(&bp->indirect_lock);
  254. BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
  255. val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
  256. spin_unlock_bh(&bp->indirect_lock);
  257. return val;
  258. }
  259. static void
  260. bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
  261. {
  262. spin_lock_bh(&bp->indirect_lock);
  263. BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
  264. BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
  265. spin_unlock_bh(&bp->indirect_lock);
  266. }
  267. static void
  268. bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
  269. {
  270. bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
  271. }
  272. static u32
  273. bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
  274. {
  275. return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
  276. }
  277. static void
  278. bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
  279. {
  280. offset += cid_addr;
  281. spin_lock_bh(&bp->indirect_lock);
  282. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  283. int i;
  284. BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
  285. BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
  286. offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
  287. for (i = 0; i < 5; i++) {
  288. val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
  289. if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
  290. break;
  291. udelay(5);
  292. }
  293. } else {
  294. BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
  295. BNX2_WR(bp, BNX2_CTX_DATA, val);
  296. }
  297. spin_unlock_bh(&bp->indirect_lock);
  298. }
  299. #ifdef BCM_CNIC
  300. static int
  301. bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
  302. {
  303. struct bnx2 *bp = netdev_priv(dev);
  304. struct drv_ctl_io *io = &info->data.io;
  305. switch (info->cmd) {
  306. case DRV_CTL_IO_WR_CMD:
  307. bnx2_reg_wr_ind(bp, io->offset, io->data);
  308. break;
  309. case DRV_CTL_IO_RD_CMD:
  310. io->data = bnx2_reg_rd_ind(bp, io->offset);
  311. break;
  312. case DRV_CTL_CTX_WR_CMD:
  313. bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
  314. break;
  315. default:
  316. return -EINVAL;
  317. }
  318. return 0;
  319. }
  320. static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
  321. {
  322. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  323. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  324. int sb_id;
  325. if (bp->flags & BNX2_FLAG_USING_MSIX) {
  326. cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
  327. bnapi->cnic_present = 0;
  328. sb_id = bp->irq_nvecs;
  329. cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
  330. } else {
  331. cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
  332. bnapi->cnic_tag = bnapi->last_status_idx;
  333. bnapi->cnic_present = 1;
  334. sb_id = 0;
  335. cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
  336. }
  337. cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
  338. cp->irq_arr[0].status_blk = (void *)
  339. ((unsigned long) bnapi->status_blk.msi +
  340. (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
  341. cp->irq_arr[0].status_blk_num = sb_id;
  342. cp->num_irq = 1;
  343. }
  344. static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
  345. void *data)
  346. {
  347. struct bnx2 *bp = netdev_priv(dev);
  348. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  349. if (ops == NULL)
  350. return -EINVAL;
  351. if (cp->drv_state & CNIC_DRV_STATE_REGD)
  352. return -EBUSY;
  353. if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
  354. return -ENODEV;
  355. bp->cnic_data = data;
  356. rcu_assign_pointer(bp->cnic_ops, ops);
  357. cp->num_irq = 0;
  358. cp->drv_state = CNIC_DRV_STATE_REGD;
  359. bnx2_setup_cnic_irq_info(bp);
  360. return 0;
  361. }
  362. static int bnx2_unregister_cnic(struct net_device *dev)
  363. {
  364. struct bnx2 *bp = netdev_priv(dev);
  365. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  366. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  367. mutex_lock(&bp->cnic_lock);
  368. cp->drv_state = 0;
  369. bnapi->cnic_present = 0;
  370. RCU_INIT_POINTER(bp->cnic_ops, NULL);
  371. mutex_unlock(&bp->cnic_lock);
  372. synchronize_rcu();
  373. return 0;
  374. }
  375. struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
  376. {
  377. struct bnx2 *bp = netdev_priv(dev);
  378. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  379. if (!cp->max_iscsi_conn)
  380. return NULL;
  381. cp->drv_owner = THIS_MODULE;
  382. cp->chip_id = bp->chip_id;
  383. cp->pdev = bp->pdev;
  384. cp->io_base = bp->regview;
  385. cp->drv_ctl = bnx2_drv_ctl;
  386. cp->drv_register_cnic = bnx2_register_cnic;
  387. cp->drv_unregister_cnic = bnx2_unregister_cnic;
  388. return cp;
  389. }
  390. EXPORT_SYMBOL(bnx2_cnic_probe);
  391. static void
  392. bnx2_cnic_stop(struct bnx2 *bp)
  393. {
  394. struct cnic_ops *c_ops;
  395. struct cnic_ctl_info info;
  396. mutex_lock(&bp->cnic_lock);
  397. c_ops = rcu_dereference_protected(bp->cnic_ops,
  398. lockdep_is_held(&bp->cnic_lock));
  399. if (c_ops) {
  400. info.cmd = CNIC_CTL_STOP_CMD;
  401. c_ops->cnic_ctl(bp->cnic_data, &info);
  402. }
  403. mutex_unlock(&bp->cnic_lock);
  404. }
  405. static void
  406. bnx2_cnic_start(struct bnx2 *bp)
  407. {
  408. struct cnic_ops *c_ops;
  409. struct cnic_ctl_info info;
  410. mutex_lock(&bp->cnic_lock);
  411. c_ops = rcu_dereference_protected(bp->cnic_ops,
  412. lockdep_is_held(&bp->cnic_lock));
  413. if (c_ops) {
  414. if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
  415. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  416. bnapi->cnic_tag = bnapi->last_status_idx;
  417. }
  418. info.cmd = CNIC_CTL_START_CMD;
  419. c_ops->cnic_ctl(bp->cnic_data, &info);
  420. }
  421. mutex_unlock(&bp->cnic_lock);
  422. }
  423. #else
  424. static void
  425. bnx2_cnic_stop(struct bnx2 *bp)
  426. {
  427. }
  428. static void
  429. bnx2_cnic_start(struct bnx2 *bp)
  430. {
  431. }
  432. #endif
  433. static int
  434. bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
  435. {
  436. u32 val1;
  437. int i, ret;
  438. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  439. val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  440. val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  441. BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  442. BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  443. udelay(40);
  444. }
  445. val1 = (bp->phy_addr << 21) | (reg << 16) |
  446. BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
  447. BNX2_EMAC_MDIO_COMM_START_BUSY;
  448. BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
  449. for (i = 0; i < 50; i++) {
  450. udelay(10);
  451. val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
  452. if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
  453. udelay(5);
  454. val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
  455. val1 &= BNX2_EMAC_MDIO_COMM_DATA;
  456. break;
  457. }
  458. }
  459. if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
  460. *val = 0x0;
  461. ret = -EBUSY;
  462. }
  463. else {
  464. *val = val1;
  465. ret = 0;
  466. }
  467. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  468. val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  469. val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  470. BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  471. BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  472. udelay(40);
  473. }
  474. return ret;
  475. }
  476. static int
  477. bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
  478. {
  479. u32 val1;
  480. int i, ret;
  481. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  482. val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  483. val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  484. BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  485. BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  486. udelay(40);
  487. }
  488. val1 = (bp->phy_addr << 21) | (reg << 16) | val |
  489. BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
  490. BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
  491. BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
  492. for (i = 0; i < 50; i++) {
  493. udelay(10);
  494. val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
  495. if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
  496. udelay(5);
  497. break;
  498. }
  499. }
  500. if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
  501. ret = -EBUSY;
  502. else
  503. ret = 0;
  504. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  505. val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  506. val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  507. BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  508. BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  509. udelay(40);
  510. }
  511. return ret;
  512. }
  513. static void
  514. bnx2_disable_int(struct bnx2 *bp)
  515. {
  516. int i;
  517. struct bnx2_napi *bnapi;
  518. for (i = 0; i < bp->irq_nvecs; i++) {
  519. bnapi = &bp->bnx2_napi[i];
  520. BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  521. BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  522. }
  523. BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
  524. }
  525. static void
  526. bnx2_enable_int(struct bnx2 *bp)
  527. {
  528. int i;
  529. struct bnx2_napi *bnapi;
  530. for (i = 0; i < bp->irq_nvecs; i++) {
  531. bnapi = &bp->bnx2_napi[i];
  532. BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  533. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  534. BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
  535. bnapi->last_status_idx);
  536. BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  537. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  538. bnapi->last_status_idx);
  539. }
  540. BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
  541. }
  542. static void
  543. bnx2_disable_int_sync(struct bnx2 *bp)
  544. {
  545. int i;
  546. atomic_inc(&bp->intr_sem);
  547. if (!netif_running(bp->dev))
  548. return;
  549. bnx2_disable_int(bp);
  550. for (i = 0; i < bp->irq_nvecs; i++)
  551. synchronize_irq(bp->irq_tbl[i].vector);
  552. }
  553. static void
  554. bnx2_napi_disable(struct bnx2 *bp)
  555. {
  556. int i;
  557. for (i = 0; i < bp->irq_nvecs; i++)
  558. napi_disable(&bp->bnx2_napi[i].napi);
  559. }
  560. static void
  561. bnx2_napi_enable(struct bnx2 *bp)
  562. {
  563. int i;
  564. for (i = 0; i < bp->irq_nvecs; i++)
  565. napi_enable(&bp->bnx2_napi[i].napi);
  566. }
  567. static void
  568. bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
  569. {
  570. if (stop_cnic)
  571. bnx2_cnic_stop(bp);
  572. if (netif_running(bp->dev)) {
  573. bnx2_napi_disable(bp);
  574. netif_tx_disable(bp->dev);
  575. }
  576. bnx2_disable_int_sync(bp);
  577. netif_carrier_off(bp->dev); /* prevent tx timeout */
  578. }
  579. static void
  580. bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
  581. {
  582. if (atomic_dec_and_test(&bp->intr_sem)) {
  583. if (netif_running(bp->dev)) {
  584. netif_tx_wake_all_queues(bp->dev);
  585. spin_lock_bh(&bp->phy_lock);
  586. if (bp->link_up)
  587. netif_carrier_on(bp->dev);
  588. spin_unlock_bh(&bp->phy_lock);
  589. bnx2_napi_enable(bp);
  590. bnx2_enable_int(bp);
  591. if (start_cnic)
  592. bnx2_cnic_start(bp);
  593. }
  594. }
  595. }
  596. static void
  597. bnx2_free_tx_mem(struct bnx2 *bp)
  598. {
  599. int i;
  600. for (i = 0; i < bp->num_tx_rings; i++) {
  601. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  602. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  603. if (txr->tx_desc_ring) {
  604. dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
  605. txr->tx_desc_ring,
  606. txr->tx_desc_mapping);
  607. txr->tx_desc_ring = NULL;
  608. }
  609. kfree(txr->tx_buf_ring);
  610. txr->tx_buf_ring = NULL;
  611. }
  612. }
  613. static void
  614. bnx2_free_rx_mem(struct bnx2 *bp)
  615. {
  616. int i;
  617. for (i = 0; i < bp->num_rx_rings; i++) {
  618. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  619. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  620. int j;
  621. for (j = 0; j < bp->rx_max_ring; j++) {
  622. if (rxr->rx_desc_ring[j])
  623. dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
  624. rxr->rx_desc_ring[j],
  625. rxr->rx_desc_mapping[j]);
  626. rxr->rx_desc_ring[j] = NULL;
  627. }
  628. vfree(rxr->rx_buf_ring);
  629. rxr->rx_buf_ring = NULL;
  630. for (j = 0; j < bp->rx_max_pg_ring; j++) {
  631. if (rxr->rx_pg_desc_ring[j])
  632. dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
  633. rxr->rx_pg_desc_ring[j],
  634. rxr->rx_pg_desc_mapping[j]);
  635. rxr->rx_pg_desc_ring[j] = NULL;
  636. }
  637. vfree(rxr->rx_pg_ring);
  638. rxr->rx_pg_ring = NULL;
  639. }
  640. }
  641. static int
  642. bnx2_alloc_tx_mem(struct bnx2 *bp)
  643. {
  644. int i;
  645. for (i = 0; i < bp->num_tx_rings; i++) {
  646. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  647. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  648. txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
  649. if (txr->tx_buf_ring == NULL)
  650. return -ENOMEM;
  651. txr->tx_desc_ring =
  652. dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
  653. &txr->tx_desc_mapping, GFP_KERNEL);
  654. if (txr->tx_desc_ring == NULL)
  655. return -ENOMEM;
  656. }
  657. return 0;
  658. }
  659. static int
  660. bnx2_alloc_rx_mem(struct bnx2 *bp)
  661. {
  662. int i;
  663. for (i = 0; i < bp->num_rx_rings; i++) {
  664. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  665. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  666. int j;
  667. rxr->rx_buf_ring =
  668. vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
  669. if (rxr->rx_buf_ring == NULL)
  670. return -ENOMEM;
  671. for (j = 0; j < bp->rx_max_ring; j++) {
  672. rxr->rx_desc_ring[j] =
  673. dma_alloc_coherent(&bp->pdev->dev,
  674. RXBD_RING_SIZE,
  675. &rxr->rx_desc_mapping[j],
  676. GFP_KERNEL);
  677. if (rxr->rx_desc_ring[j] == NULL)
  678. return -ENOMEM;
  679. }
  680. if (bp->rx_pg_ring_size) {
  681. rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
  682. bp->rx_max_pg_ring);
  683. if (rxr->rx_pg_ring == NULL)
  684. return -ENOMEM;
  685. }
  686. for (j = 0; j < bp->rx_max_pg_ring; j++) {
  687. rxr->rx_pg_desc_ring[j] =
  688. dma_alloc_coherent(&bp->pdev->dev,
  689. RXBD_RING_SIZE,
  690. &rxr->rx_pg_desc_mapping[j],
  691. GFP_KERNEL);
  692. if (rxr->rx_pg_desc_ring[j] == NULL)
  693. return -ENOMEM;
  694. }
  695. }
  696. return 0;
  697. }
  698. static void
  699. bnx2_free_mem(struct bnx2 *bp)
  700. {
  701. int i;
  702. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  703. bnx2_free_tx_mem(bp);
  704. bnx2_free_rx_mem(bp);
  705. for (i = 0; i < bp->ctx_pages; i++) {
  706. if (bp->ctx_blk[i]) {
  707. dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
  708. bp->ctx_blk[i],
  709. bp->ctx_blk_mapping[i]);
  710. bp->ctx_blk[i] = NULL;
  711. }
  712. }
  713. if (bnapi->status_blk.msi) {
  714. dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
  715. bnapi->status_blk.msi,
  716. bp->status_blk_mapping);
  717. bnapi->status_blk.msi = NULL;
  718. bp->stats_blk = NULL;
  719. }
  720. }
  721. static int
  722. bnx2_alloc_mem(struct bnx2 *bp)
  723. {
  724. int i, status_blk_size, err;
  725. struct bnx2_napi *bnapi;
  726. void *status_blk;
  727. /* Combine status and statistics blocks into one allocation. */
  728. status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
  729. if (bp->flags & BNX2_FLAG_MSIX_CAP)
  730. status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
  731. BNX2_SBLK_MSIX_ALIGN_SIZE);
  732. bp->status_stats_size = status_blk_size +
  733. sizeof(struct statistics_block);
  734. status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
  735. &bp->status_blk_mapping, GFP_KERNEL);
  736. if (status_blk == NULL)
  737. goto alloc_mem_err;
  738. memset(status_blk, 0, bp->status_stats_size);
  739. bnapi = &bp->bnx2_napi[0];
  740. bnapi->status_blk.msi = status_blk;
  741. bnapi->hw_tx_cons_ptr =
  742. &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
  743. bnapi->hw_rx_cons_ptr =
  744. &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
  745. if (bp->flags & BNX2_FLAG_MSIX_CAP) {
  746. for (i = 1; i < bp->irq_nvecs; i++) {
  747. struct status_block_msix *sblk;
  748. bnapi = &bp->bnx2_napi[i];
  749. sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
  750. bnapi->status_blk.msix = sblk;
  751. bnapi->hw_tx_cons_ptr =
  752. &sblk->status_tx_quick_consumer_index;
  753. bnapi->hw_rx_cons_ptr =
  754. &sblk->status_rx_quick_consumer_index;
  755. bnapi->int_num = i << 24;
  756. }
  757. }
  758. bp->stats_blk = status_blk + status_blk_size;
  759. bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
  760. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  761. bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
  762. if (bp->ctx_pages == 0)
  763. bp->ctx_pages = 1;
  764. for (i = 0; i < bp->ctx_pages; i++) {
  765. bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
  766. BNX2_PAGE_SIZE,
  767. &bp->ctx_blk_mapping[i],
  768. GFP_KERNEL);
  769. if (bp->ctx_blk[i] == NULL)
  770. goto alloc_mem_err;
  771. }
  772. }
  773. err = bnx2_alloc_rx_mem(bp);
  774. if (err)
  775. goto alloc_mem_err;
  776. err = bnx2_alloc_tx_mem(bp);
  777. if (err)
  778. goto alloc_mem_err;
  779. return 0;
  780. alloc_mem_err:
  781. bnx2_free_mem(bp);
  782. return -ENOMEM;
  783. }
  784. static void
  785. bnx2_report_fw_link(struct bnx2 *bp)
  786. {
  787. u32 fw_link_status = 0;
  788. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  789. return;
  790. if (bp->link_up) {
  791. u32 bmsr;
  792. switch (bp->line_speed) {
  793. case SPEED_10:
  794. if (bp->duplex == DUPLEX_HALF)
  795. fw_link_status = BNX2_LINK_STATUS_10HALF;
  796. else
  797. fw_link_status = BNX2_LINK_STATUS_10FULL;
  798. break;
  799. case SPEED_100:
  800. if (bp->duplex == DUPLEX_HALF)
  801. fw_link_status = BNX2_LINK_STATUS_100HALF;
  802. else
  803. fw_link_status = BNX2_LINK_STATUS_100FULL;
  804. break;
  805. case SPEED_1000:
  806. if (bp->duplex == DUPLEX_HALF)
  807. fw_link_status = BNX2_LINK_STATUS_1000HALF;
  808. else
  809. fw_link_status = BNX2_LINK_STATUS_1000FULL;
  810. break;
  811. case SPEED_2500:
  812. if (bp->duplex == DUPLEX_HALF)
  813. fw_link_status = BNX2_LINK_STATUS_2500HALF;
  814. else
  815. fw_link_status = BNX2_LINK_STATUS_2500FULL;
  816. break;
  817. }
  818. fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
  819. if (bp->autoneg) {
  820. fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
  821. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  822. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  823. if (!(bmsr & BMSR_ANEGCOMPLETE) ||
  824. bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
  825. fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
  826. else
  827. fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
  828. }
  829. }
  830. else
  831. fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
  832. bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
  833. }
  834. static char *
  835. bnx2_xceiver_str(struct bnx2 *bp)
  836. {
  837. return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
  838. ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
  839. "Copper");
  840. }
  841. static void
  842. bnx2_report_link(struct bnx2 *bp)
  843. {
  844. if (bp->link_up) {
  845. netif_carrier_on(bp->dev);
  846. netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
  847. bnx2_xceiver_str(bp),
  848. bp->line_speed,
  849. bp->duplex == DUPLEX_FULL ? "full" : "half");
  850. if (bp->flow_ctrl) {
  851. if (bp->flow_ctrl & FLOW_CTRL_RX) {
  852. pr_cont(", receive ");
  853. if (bp->flow_ctrl & FLOW_CTRL_TX)
  854. pr_cont("& transmit ");
  855. }
  856. else {
  857. pr_cont(", transmit ");
  858. }
  859. pr_cont("flow control ON");
  860. }
  861. pr_cont("\n");
  862. } else {
  863. netif_carrier_off(bp->dev);
  864. netdev_err(bp->dev, "NIC %s Link is Down\n",
  865. bnx2_xceiver_str(bp));
  866. }
  867. bnx2_report_fw_link(bp);
  868. }
  869. static void
  870. bnx2_resolve_flow_ctrl(struct bnx2 *bp)
  871. {
  872. u32 local_adv, remote_adv;
  873. bp->flow_ctrl = 0;
  874. if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
  875. (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
  876. if (bp->duplex == DUPLEX_FULL) {
  877. bp->flow_ctrl = bp->req_flow_ctrl;
  878. }
  879. return;
  880. }
  881. if (bp->duplex != DUPLEX_FULL) {
  882. return;
  883. }
  884. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  885. (CHIP_NUM(bp) == CHIP_NUM_5708)) {
  886. u32 val;
  887. bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
  888. if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
  889. bp->flow_ctrl |= FLOW_CTRL_TX;
  890. if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
  891. bp->flow_ctrl |= FLOW_CTRL_RX;
  892. return;
  893. }
  894. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  895. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  896. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  897. u32 new_local_adv = 0;
  898. u32 new_remote_adv = 0;
  899. if (local_adv & ADVERTISE_1000XPAUSE)
  900. new_local_adv |= ADVERTISE_PAUSE_CAP;
  901. if (local_adv & ADVERTISE_1000XPSE_ASYM)
  902. new_local_adv |= ADVERTISE_PAUSE_ASYM;
  903. if (remote_adv & ADVERTISE_1000XPAUSE)
  904. new_remote_adv |= ADVERTISE_PAUSE_CAP;
  905. if (remote_adv & ADVERTISE_1000XPSE_ASYM)
  906. new_remote_adv |= ADVERTISE_PAUSE_ASYM;
  907. local_adv = new_local_adv;
  908. remote_adv = new_remote_adv;
  909. }
  910. /* See Table 28B-3 of 802.3ab-1999 spec. */
  911. if (local_adv & ADVERTISE_PAUSE_CAP) {
  912. if(local_adv & ADVERTISE_PAUSE_ASYM) {
  913. if (remote_adv & ADVERTISE_PAUSE_CAP) {
  914. bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
  915. }
  916. else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
  917. bp->flow_ctrl = FLOW_CTRL_RX;
  918. }
  919. }
  920. else {
  921. if (remote_adv & ADVERTISE_PAUSE_CAP) {
  922. bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
  923. }
  924. }
  925. }
  926. else if (local_adv & ADVERTISE_PAUSE_ASYM) {
  927. if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
  928. (remote_adv & ADVERTISE_PAUSE_ASYM)) {
  929. bp->flow_ctrl = FLOW_CTRL_TX;
  930. }
  931. }
  932. }
  933. static int
  934. bnx2_5709s_linkup(struct bnx2 *bp)
  935. {
  936. u32 val, speed;
  937. bp->link_up = 1;
  938. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
  939. bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
  940. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  941. if ((bp->autoneg & AUTONEG_SPEED) == 0) {
  942. bp->line_speed = bp->req_line_speed;
  943. bp->duplex = bp->req_duplex;
  944. return 0;
  945. }
  946. speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
  947. switch (speed) {
  948. case MII_BNX2_GP_TOP_AN_SPEED_10:
  949. bp->line_speed = SPEED_10;
  950. break;
  951. case MII_BNX2_GP_TOP_AN_SPEED_100:
  952. bp->line_speed = SPEED_100;
  953. break;
  954. case MII_BNX2_GP_TOP_AN_SPEED_1G:
  955. case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
  956. bp->line_speed = SPEED_1000;
  957. break;
  958. case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
  959. bp->line_speed = SPEED_2500;
  960. break;
  961. }
  962. if (val & MII_BNX2_GP_TOP_AN_FD)
  963. bp->duplex = DUPLEX_FULL;
  964. else
  965. bp->duplex = DUPLEX_HALF;
  966. return 0;
  967. }
  968. static int
  969. bnx2_5708s_linkup(struct bnx2 *bp)
  970. {
  971. u32 val;
  972. bp->link_up = 1;
  973. bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
  974. switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
  975. case BCM5708S_1000X_STAT1_SPEED_10:
  976. bp->line_speed = SPEED_10;
  977. break;
  978. case BCM5708S_1000X_STAT1_SPEED_100:
  979. bp->line_speed = SPEED_100;
  980. break;
  981. case BCM5708S_1000X_STAT1_SPEED_1G:
  982. bp->line_speed = SPEED_1000;
  983. break;
  984. case BCM5708S_1000X_STAT1_SPEED_2G5:
  985. bp->line_speed = SPEED_2500;
  986. break;
  987. }
  988. if (val & BCM5708S_1000X_STAT1_FD)
  989. bp->duplex = DUPLEX_FULL;
  990. else
  991. bp->duplex = DUPLEX_HALF;
  992. return 0;
  993. }
  994. static int
  995. bnx2_5706s_linkup(struct bnx2 *bp)
  996. {
  997. u32 bmcr, local_adv, remote_adv, common;
  998. bp->link_up = 1;
  999. bp->line_speed = SPEED_1000;
  1000. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1001. if (bmcr & BMCR_FULLDPLX) {
  1002. bp->duplex = DUPLEX_FULL;
  1003. }
  1004. else {
  1005. bp->duplex = DUPLEX_HALF;
  1006. }
  1007. if (!(bmcr & BMCR_ANENABLE)) {
  1008. return 0;
  1009. }
  1010. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  1011. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  1012. common = local_adv & remote_adv;
  1013. if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
  1014. if (common & ADVERTISE_1000XFULL) {
  1015. bp->duplex = DUPLEX_FULL;
  1016. }
  1017. else {
  1018. bp->duplex = DUPLEX_HALF;
  1019. }
  1020. }
  1021. return 0;
  1022. }
  1023. static int
  1024. bnx2_copper_linkup(struct bnx2 *bp)
  1025. {
  1026. u32 bmcr;
  1027. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1028. if (bmcr & BMCR_ANENABLE) {
  1029. u32 local_adv, remote_adv, common;
  1030. bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
  1031. bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
  1032. common = local_adv & (remote_adv >> 2);
  1033. if (common & ADVERTISE_1000FULL) {
  1034. bp->line_speed = SPEED_1000;
  1035. bp->duplex = DUPLEX_FULL;
  1036. }
  1037. else if (common & ADVERTISE_1000HALF) {
  1038. bp->line_speed = SPEED_1000;
  1039. bp->duplex = DUPLEX_HALF;
  1040. }
  1041. else {
  1042. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  1043. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  1044. common = local_adv & remote_adv;
  1045. if (common & ADVERTISE_100FULL) {
  1046. bp->line_speed = SPEED_100;
  1047. bp->duplex = DUPLEX_FULL;
  1048. }
  1049. else if (common & ADVERTISE_100HALF) {
  1050. bp->line_speed = SPEED_100;
  1051. bp->duplex = DUPLEX_HALF;
  1052. }
  1053. else if (common & ADVERTISE_10FULL) {
  1054. bp->line_speed = SPEED_10;
  1055. bp->duplex = DUPLEX_FULL;
  1056. }
  1057. else if (common & ADVERTISE_10HALF) {
  1058. bp->line_speed = SPEED_10;
  1059. bp->duplex = DUPLEX_HALF;
  1060. }
  1061. else {
  1062. bp->line_speed = 0;
  1063. bp->link_up = 0;
  1064. }
  1065. }
  1066. }
  1067. else {
  1068. if (bmcr & BMCR_SPEED100) {
  1069. bp->line_speed = SPEED_100;
  1070. }
  1071. else {
  1072. bp->line_speed = SPEED_10;
  1073. }
  1074. if (bmcr & BMCR_FULLDPLX) {
  1075. bp->duplex = DUPLEX_FULL;
  1076. }
  1077. else {
  1078. bp->duplex = DUPLEX_HALF;
  1079. }
  1080. }
  1081. return 0;
  1082. }
  1083. static void
  1084. bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
  1085. {
  1086. u32 val, rx_cid_addr = GET_CID_ADDR(cid);
  1087. val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
  1088. val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
  1089. val |= 0x02 << 8;
  1090. if (bp->flow_ctrl & FLOW_CTRL_TX)
  1091. val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
  1092. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
  1093. }
  1094. static void
  1095. bnx2_init_all_rx_contexts(struct bnx2 *bp)
  1096. {
  1097. int i;
  1098. u32 cid;
  1099. for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
  1100. if (i == 1)
  1101. cid = RX_RSS_CID;
  1102. bnx2_init_rx_context(bp, cid);
  1103. }
  1104. }
  1105. static void
  1106. bnx2_set_mac_link(struct bnx2 *bp)
  1107. {
  1108. u32 val;
  1109. BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
  1110. if (bp->link_up && (bp->line_speed == SPEED_1000) &&
  1111. (bp->duplex == DUPLEX_HALF)) {
  1112. BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
  1113. }
  1114. /* Configure the EMAC mode register. */
  1115. val = BNX2_RD(bp, BNX2_EMAC_MODE);
  1116. val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
  1117. BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
  1118. BNX2_EMAC_MODE_25G_MODE);
  1119. if (bp->link_up) {
  1120. switch (bp->line_speed) {
  1121. case SPEED_10:
  1122. if (CHIP_NUM(bp) != CHIP_NUM_5706) {
  1123. val |= BNX2_EMAC_MODE_PORT_MII_10M;
  1124. break;
  1125. }
  1126. /* fall through */
  1127. case SPEED_100:
  1128. val |= BNX2_EMAC_MODE_PORT_MII;
  1129. break;
  1130. case SPEED_2500:
  1131. val |= BNX2_EMAC_MODE_25G_MODE;
  1132. /* fall through */
  1133. case SPEED_1000:
  1134. val |= BNX2_EMAC_MODE_PORT_GMII;
  1135. break;
  1136. }
  1137. }
  1138. else {
  1139. val |= BNX2_EMAC_MODE_PORT_GMII;
  1140. }
  1141. /* Set the MAC to operate in the appropriate duplex mode. */
  1142. if (bp->duplex == DUPLEX_HALF)
  1143. val |= BNX2_EMAC_MODE_HALF_DUPLEX;
  1144. BNX2_WR(bp, BNX2_EMAC_MODE, val);
  1145. /* Enable/disable rx PAUSE. */
  1146. bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
  1147. if (bp->flow_ctrl & FLOW_CTRL_RX)
  1148. bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
  1149. BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
  1150. /* Enable/disable tx PAUSE. */
  1151. val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
  1152. val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
  1153. if (bp->flow_ctrl & FLOW_CTRL_TX)
  1154. val |= BNX2_EMAC_TX_MODE_FLOW_EN;
  1155. BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
  1156. /* Acknowledge the interrupt. */
  1157. BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
  1158. bnx2_init_all_rx_contexts(bp);
  1159. }
  1160. static void
  1161. bnx2_enable_bmsr1(struct bnx2 *bp)
  1162. {
  1163. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1164. (CHIP_NUM(bp) == CHIP_NUM_5709))
  1165. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1166. MII_BNX2_BLK_ADDR_GP_STATUS);
  1167. }
  1168. static void
  1169. bnx2_disable_bmsr1(struct bnx2 *bp)
  1170. {
  1171. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1172. (CHIP_NUM(bp) == CHIP_NUM_5709))
  1173. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1174. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1175. }
  1176. static int
  1177. bnx2_test_and_enable_2g5(struct bnx2 *bp)
  1178. {
  1179. u32 up1;
  1180. int ret = 1;
  1181. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1182. return 0;
  1183. if (bp->autoneg & AUTONEG_SPEED)
  1184. bp->advertising |= ADVERTISED_2500baseX_Full;
  1185. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1186. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
  1187. bnx2_read_phy(bp, bp->mii_up1, &up1);
  1188. if (!(up1 & BCM5708S_UP1_2G5)) {
  1189. up1 |= BCM5708S_UP1_2G5;
  1190. bnx2_write_phy(bp, bp->mii_up1, up1);
  1191. ret = 0;
  1192. }
  1193. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1194. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1195. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1196. return ret;
  1197. }
  1198. static int
  1199. bnx2_test_and_disable_2g5(struct bnx2 *bp)
  1200. {
  1201. u32 up1;
  1202. int ret = 0;
  1203. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1204. return 0;
  1205. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1206. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
  1207. bnx2_read_phy(bp, bp->mii_up1, &up1);
  1208. if (up1 & BCM5708S_UP1_2G5) {
  1209. up1 &= ~BCM5708S_UP1_2G5;
  1210. bnx2_write_phy(bp, bp->mii_up1, up1);
  1211. ret = 1;
  1212. }
  1213. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1214. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1215. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1216. return ret;
  1217. }
  1218. static void
  1219. bnx2_enable_forced_2g5(struct bnx2 *bp)
  1220. {
  1221. u32 uninitialized_var(bmcr);
  1222. int err;
  1223. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1224. return;
  1225. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  1226. u32 val;
  1227. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1228. MII_BNX2_BLK_ADDR_SERDES_DIG);
  1229. if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
  1230. val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
  1231. val |= MII_BNX2_SD_MISC1_FORCE |
  1232. MII_BNX2_SD_MISC1_FORCE_2_5G;
  1233. bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
  1234. }
  1235. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1236. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1237. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1238. } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
  1239. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1240. if (!err)
  1241. bmcr |= BCM5708S_BMCR_FORCE_2500;
  1242. } else {
  1243. return;
  1244. }
  1245. if (err)
  1246. return;
  1247. if (bp->autoneg & AUTONEG_SPEED) {
  1248. bmcr &= ~BMCR_ANENABLE;
  1249. if (bp->req_duplex == DUPLEX_FULL)
  1250. bmcr |= BMCR_FULLDPLX;
  1251. }
  1252. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1253. }
  1254. static void
  1255. bnx2_disable_forced_2g5(struct bnx2 *bp)
  1256. {
  1257. u32 uninitialized_var(bmcr);
  1258. int err;
  1259. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1260. return;
  1261. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  1262. u32 val;
  1263. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1264. MII_BNX2_BLK_ADDR_SERDES_DIG);
  1265. if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
  1266. val &= ~MII_BNX2_SD_MISC1_FORCE;
  1267. bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
  1268. }
  1269. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1270. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1271. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1272. } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
  1273. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1274. if (!err)
  1275. bmcr &= ~BCM5708S_BMCR_FORCE_2500;
  1276. } else {
  1277. return;
  1278. }
  1279. if (err)
  1280. return;
  1281. if (bp->autoneg & AUTONEG_SPEED)
  1282. bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
  1283. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1284. }
  1285. static void
  1286. bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
  1287. {
  1288. u32 val;
  1289. bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
  1290. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
  1291. if (start)
  1292. bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
  1293. else
  1294. bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
  1295. }
  1296. static int
  1297. bnx2_set_link(struct bnx2 *bp)
  1298. {
  1299. u32 bmsr;
  1300. u8 link_up;
  1301. if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
  1302. bp->link_up = 1;
  1303. return 0;
  1304. }
  1305. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  1306. return 0;
  1307. link_up = bp->link_up;
  1308. bnx2_enable_bmsr1(bp);
  1309. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  1310. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  1311. bnx2_disable_bmsr1(bp);
  1312. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1313. (CHIP_NUM(bp) == CHIP_NUM_5706)) {
  1314. u32 val, an_dbg;
  1315. if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
  1316. bnx2_5706s_force_link_dn(bp, 0);
  1317. bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
  1318. }
  1319. val = BNX2_RD(bp, BNX2_EMAC_STATUS);
  1320. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
  1321. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  1322. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  1323. if ((val & BNX2_EMAC_STATUS_LINK) &&
  1324. !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
  1325. bmsr |= BMSR_LSTATUS;
  1326. else
  1327. bmsr &= ~BMSR_LSTATUS;
  1328. }
  1329. if (bmsr & BMSR_LSTATUS) {
  1330. bp->link_up = 1;
  1331. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1332. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  1333. bnx2_5706s_linkup(bp);
  1334. else if (CHIP_NUM(bp) == CHIP_NUM_5708)
  1335. bnx2_5708s_linkup(bp);
  1336. else if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1337. bnx2_5709s_linkup(bp);
  1338. }
  1339. else {
  1340. bnx2_copper_linkup(bp);
  1341. }
  1342. bnx2_resolve_flow_ctrl(bp);
  1343. }
  1344. else {
  1345. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1346. (bp->autoneg & AUTONEG_SPEED))
  1347. bnx2_disable_forced_2g5(bp);
  1348. if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
  1349. u32 bmcr;
  1350. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1351. bmcr |= BMCR_ANENABLE;
  1352. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1353. bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
  1354. }
  1355. bp->link_up = 0;
  1356. }
  1357. if (bp->link_up != link_up) {
  1358. bnx2_report_link(bp);
  1359. }
  1360. bnx2_set_mac_link(bp);
  1361. return 0;
  1362. }
  1363. static int
  1364. bnx2_reset_phy(struct bnx2 *bp)
  1365. {
  1366. int i;
  1367. u32 reg;
  1368. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
  1369. #define PHY_RESET_MAX_WAIT 100
  1370. for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
  1371. udelay(10);
  1372. bnx2_read_phy(bp, bp->mii_bmcr, &reg);
  1373. if (!(reg & BMCR_RESET)) {
  1374. udelay(20);
  1375. break;
  1376. }
  1377. }
  1378. if (i == PHY_RESET_MAX_WAIT) {
  1379. return -EBUSY;
  1380. }
  1381. return 0;
  1382. }
  1383. static u32
  1384. bnx2_phy_get_pause_adv(struct bnx2 *bp)
  1385. {
  1386. u32 adv = 0;
  1387. if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
  1388. (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
  1389. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1390. adv = ADVERTISE_1000XPAUSE;
  1391. }
  1392. else {
  1393. adv = ADVERTISE_PAUSE_CAP;
  1394. }
  1395. }
  1396. else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
  1397. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1398. adv = ADVERTISE_1000XPSE_ASYM;
  1399. }
  1400. else {
  1401. adv = ADVERTISE_PAUSE_ASYM;
  1402. }
  1403. }
  1404. else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
  1405. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1406. adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
  1407. }
  1408. else {
  1409. adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  1410. }
  1411. }
  1412. return adv;
  1413. }
  1414. static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
  1415. static int
  1416. bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
  1417. __releases(&bp->phy_lock)
  1418. __acquires(&bp->phy_lock)
  1419. {
  1420. u32 speed_arg = 0, pause_adv;
  1421. pause_adv = bnx2_phy_get_pause_adv(bp);
  1422. if (bp->autoneg & AUTONEG_SPEED) {
  1423. speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
  1424. if (bp->advertising & ADVERTISED_10baseT_Half)
  1425. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
  1426. if (bp->advertising & ADVERTISED_10baseT_Full)
  1427. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
  1428. if (bp->advertising & ADVERTISED_100baseT_Half)
  1429. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
  1430. if (bp->advertising & ADVERTISED_100baseT_Full)
  1431. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
  1432. if (bp->advertising & ADVERTISED_1000baseT_Full)
  1433. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
  1434. if (bp->advertising & ADVERTISED_2500baseX_Full)
  1435. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
  1436. } else {
  1437. if (bp->req_line_speed == SPEED_2500)
  1438. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
  1439. else if (bp->req_line_speed == SPEED_1000)
  1440. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
  1441. else if (bp->req_line_speed == SPEED_100) {
  1442. if (bp->req_duplex == DUPLEX_FULL)
  1443. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
  1444. else
  1445. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
  1446. } else if (bp->req_line_speed == SPEED_10) {
  1447. if (bp->req_duplex == DUPLEX_FULL)
  1448. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
  1449. else
  1450. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
  1451. }
  1452. }
  1453. if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
  1454. speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
  1455. if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
  1456. speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
  1457. if (port == PORT_TP)
  1458. speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
  1459. BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
  1460. bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
  1461. spin_unlock_bh(&bp->phy_lock);
  1462. bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
  1463. spin_lock_bh(&bp->phy_lock);
  1464. return 0;
  1465. }
  1466. static int
  1467. bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
  1468. __releases(&bp->phy_lock)
  1469. __acquires(&bp->phy_lock)
  1470. {
  1471. u32 adv, bmcr;
  1472. u32 new_adv = 0;
  1473. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  1474. return bnx2_setup_remote_phy(bp, port);
  1475. if (!(bp->autoneg & AUTONEG_SPEED)) {
  1476. u32 new_bmcr;
  1477. int force_link_down = 0;
  1478. if (bp->req_line_speed == SPEED_2500) {
  1479. if (!bnx2_test_and_enable_2g5(bp))
  1480. force_link_down = 1;
  1481. } else if (bp->req_line_speed == SPEED_1000) {
  1482. if (bnx2_test_and_disable_2g5(bp))
  1483. force_link_down = 1;
  1484. }
  1485. bnx2_read_phy(bp, bp->mii_adv, &adv);
  1486. adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
  1487. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1488. new_bmcr = bmcr & ~BMCR_ANENABLE;
  1489. new_bmcr |= BMCR_SPEED1000;
  1490. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  1491. if (bp->req_line_speed == SPEED_2500)
  1492. bnx2_enable_forced_2g5(bp);
  1493. else if (bp->req_line_speed == SPEED_1000) {
  1494. bnx2_disable_forced_2g5(bp);
  1495. new_bmcr &= ~0x2000;
  1496. }
  1497. } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
  1498. if (bp->req_line_speed == SPEED_2500)
  1499. new_bmcr |= BCM5708S_BMCR_FORCE_2500;
  1500. else
  1501. new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
  1502. }
  1503. if (bp->req_duplex == DUPLEX_FULL) {
  1504. adv |= ADVERTISE_1000XFULL;
  1505. new_bmcr |= BMCR_FULLDPLX;
  1506. }
  1507. else {
  1508. adv |= ADVERTISE_1000XHALF;
  1509. new_bmcr &= ~BMCR_FULLDPLX;
  1510. }
  1511. if ((new_bmcr != bmcr) || (force_link_down)) {
  1512. /* Force a link down visible on the other side */
  1513. if (bp->link_up) {
  1514. bnx2_write_phy(bp, bp->mii_adv, adv &
  1515. ~(ADVERTISE_1000XFULL |
  1516. ADVERTISE_1000XHALF));
  1517. bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
  1518. BMCR_ANRESTART | BMCR_ANENABLE);
  1519. bp->link_up = 0;
  1520. netif_carrier_off(bp->dev);
  1521. bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
  1522. bnx2_report_link(bp);
  1523. }
  1524. bnx2_write_phy(bp, bp->mii_adv, adv);
  1525. bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
  1526. } else {
  1527. bnx2_resolve_flow_ctrl(bp);
  1528. bnx2_set_mac_link(bp);
  1529. }
  1530. return 0;
  1531. }
  1532. bnx2_test_and_enable_2g5(bp);
  1533. if (bp->advertising & ADVERTISED_1000baseT_Full)
  1534. new_adv |= ADVERTISE_1000XFULL;
  1535. new_adv |= bnx2_phy_get_pause_adv(bp);
  1536. bnx2_read_phy(bp, bp->mii_adv, &adv);
  1537. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1538. bp->serdes_an_pending = 0;
  1539. if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
  1540. /* Force a link down visible on the other side */
  1541. if (bp->link_up) {
  1542. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
  1543. spin_unlock_bh(&bp->phy_lock);
  1544. msleep(20);
  1545. spin_lock_bh(&bp->phy_lock);
  1546. }
  1547. bnx2_write_phy(bp, bp->mii_adv, new_adv);
  1548. bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
  1549. BMCR_ANENABLE);
  1550. /* Speed up link-up time when the link partner
  1551. * does not autonegotiate which is very common
  1552. * in blade servers. Some blade servers use
  1553. * IPMI for kerboard input and it's important
  1554. * to minimize link disruptions. Autoneg. involves
  1555. * exchanging base pages plus 3 next pages and
  1556. * normally completes in about 120 msec.
  1557. */
  1558. bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
  1559. bp->serdes_an_pending = 1;
  1560. mod_timer(&bp->timer, jiffies + bp->current_interval);
  1561. } else {
  1562. bnx2_resolve_flow_ctrl(bp);
  1563. bnx2_set_mac_link(bp);
  1564. }
  1565. return 0;
  1566. }
  1567. #define ETHTOOL_ALL_FIBRE_SPEED \
  1568. (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
  1569. (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
  1570. (ADVERTISED_1000baseT_Full)
  1571. #define ETHTOOL_ALL_COPPER_SPEED \
  1572. (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
  1573. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
  1574. ADVERTISED_1000baseT_Full)
  1575. #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
  1576. ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
  1577. #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
  1578. static void
  1579. bnx2_set_default_remote_link(struct bnx2 *bp)
  1580. {
  1581. u32 link;
  1582. if (bp->phy_port == PORT_TP)
  1583. link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
  1584. else
  1585. link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
  1586. if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
  1587. bp->req_line_speed = 0;
  1588. bp->autoneg |= AUTONEG_SPEED;
  1589. bp->advertising = ADVERTISED_Autoneg;
  1590. if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
  1591. bp->advertising |= ADVERTISED_10baseT_Half;
  1592. if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
  1593. bp->advertising |= ADVERTISED_10baseT_Full;
  1594. if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
  1595. bp->advertising |= ADVERTISED_100baseT_Half;
  1596. if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
  1597. bp->advertising |= ADVERTISED_100baseT_Full;
  1598. if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
  1599. bp->advertising |= ADVERTISED_1000baseT_Full;
  1600. if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
  1601. bp->advertising |= ADVERTISED_2500baseX_Full;
  1602. } else {
  1603. bp->autoneg = 0;
  1604. bp->advertising = 0;
  1605. bp->req_duplex = DUPLEX_FULL;
  1606. if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
  1607. bp->req_line_speed = SPEED_10;
  1608. if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
  1609. bp->req_duplex = DUPLEX_HALF;
  1610. }
  1611. if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
  1612. bp->req_line_speed = SPEED_100;
  1613. if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
  1614. bp->req_duplex = DUPLEX_HALF;
  1615. }
  1616. if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
  1617. bp->req_line_speed = SPEED_1000;
  1618. if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
  1619. bp->req_line_speed = SPEED_2500;
  1620. }
  1621. }
  1622. static void
  1623. bnx2_set_default_link(struct bnx2 *bp)
  1624. {
  1625. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  1626. bnx2_set_default_remote_link(bp);
  1627. return;
  1628. }
  1629. bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
  1630. bp->req_line_speed = 0;
  1631. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1632. u32 reg;
  1633. bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
  1634. reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
  1635. reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
  1636. if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
  1637. bp->autoneg = 0;
  1638. bp->req_line_speed = bp->line_speed = SPEED_1000;
  1639. bp->req_duplex = DUPLEX_FULL;
  1640. }
  1641. } else
  1642. bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
  1643. }
  1644. static void
  1645. bnx2_send_heart_beat(struct bnx2 *bp)
  1646. {
  1647. u32 msg;
  1648. u32 addr;
  1649. spin_lock(&bp->indirect_lock);
  1650. msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
  1651. addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
  1652. BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
  1653. BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
  1654. spin_unlock(&bp->indirect_lock);
  1655. }
  1656. static void
  1657. bnx2_remote_phy_event(struct bnx2 *bp)
  1658. {
  1659. u32 msg;
  1660. u8 link_up = bp->link_up;
  1661. u8 old_port;
  1662. msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
  1663. if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
  1664. bnx2_send_heart_beat(bp);
  1665. msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
  1666. if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
  1667. bp->link_up = 0;
  1668. else {
  1669. u32 speed;
  1670. bp->link_up = 1;
  1671. speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
  1672. bp->duplex = DUPLEX_FULL;
  1673. switch (speed) {
  1674. case BNX2_LINK_STATUS_10HALF:
  1675. bp->duplex = DUPLEX_HALF;
  1676. /* fall through */
  1677. case BNX2_LINK_STATUS_10FULL:
  1678. bp->line_speed = SPEED_10;
  1679. break;
  1680. case BNX2_LINK_STATUS_100HALF:
  1681. bp->duplex = DUPLEX_HALF;
  1682. /* fall through */
  1683. case BNX2_LINK_STATUS_100BASE_T4:
  1684. case BNX2_LINK_STATUS_100FULL:
  1685. bp->line_speed = SPEED_100;
  1686. break;
  1687. case BNX2_LINK_STATUS_1000HALF:
  1688. bp->duplex = DUPLEX_HALF;
  1689. /* fall through */
  1690. case BNX2_LINK_STATUS_1000FULL:
  1691. bp->line_speed = SPEED_1000;
  1692. break;
  1693. case BNX2_LINK_STATUS_2500HALF:
  1694. bp->duplex = DUPLEX_HALF;
  1695. /* fall through */
  1696. case BNX2_LINK_STATUS_2500FULL:
  1697. bp->line_speed = SPEED_2500;
  1698. break;
  1699. default:
  1700. bp->line_speed = 0;
  1701. break;
  1702. }
  1703. bp->flow_ctrl = 0;
  1704. if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
  1705. (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
  1706. if (bp->duplex == DUPLEX_FULL)
  1707. bp->flow_ctrl = bp->req_flow_ctrl;
  1708. } else {
  1709. if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
  1710. bp->flow_ctrl |= FLOW_CTRL_TX;
  1711. if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
  1712. bp->flow_ctrl |= FLOW_CTRL_RX;
  1713. }
  1714. old_port = bp->phy_port;
  1715. if (msg & BNX2_LINK_STATUS_SERDES_LINK)
  1716. bp->phy_port = PORT_FIBRE;
  1717. else
  1718. bp->phy_port = PORT_TP;
  1719. if (old_port != bp->phy_port)
  1720. bnx2_set_default_link(bp);
  1721. }
  1722. if (bp->link_up != link_up)
  1723. bnx2_report_link(bp);
  1724. bnx2_set_mac_link(bp);
  1725. }
  1726. static int
  1727. bnx2_set_remote_link(struct bnx2 *bp)
  1728. {
  1729. u32 evt_code;
  1730. evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
  1731. switch (evt_code) {
  1732. case BNX2_FW_EVT_CODE_LINK_EVENT:
  1733. bnx2_remote_phy_event(bp);
  1734. break;
  1735. case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
  1736. default:
  1737. bnx2_send_heart_beat(bp);
  1738. break;
  1739. }
  1740. return 0;
  1741. }
  1742. static int
  1743. bnx2_setup_copper_phy(struct bnx2 *bp)
  1744. __releases(&bp->phy_lock)
  1745. __acquires(&bp->phy_lock)
  1746. {
  1747. u32 bmcr;
  1748. u32 new_bmcr;
  1749. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1750. if (bp->autoneg & AUTONEG_SPEED) {
  1751. u32 adv_reg, adv1000_reg;
  1752. u32 new_adv = 0;
  1753. u32 new_adv1000 = 0;
  1754. bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
  1755. adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
  1756. ADVERTISE_PAUSE_ASYM);
  1757. bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
  1758. adv1000_reg &= PHY_ALL_1000_SPEED;
  1759. new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
  1760. new_adv |= ADVERTISE_CSMA;
  1761. new_adv |= bnx2_phy_get_pause_adv(bp);
  1762. new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
  1763. if ((adv1000_reg != new_adv1000) ||
  1764. (adv_reg != new_adv) ||
  1765. ((bmcr & BMCR_ANENABLE) == 0)) {
  1766. bnx2_write_phy(bp, bp->mii_adv, new_adv);
  1767. bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
  1768. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
  1769. BMCR_ANENABLE);
  1770. }
  1771. else if (bp->link_up) {
  1772. /* Flow ctrl may have changed from auto to forced */
  1773. /* or vice-versa. */
  1774. bnx2_resolve_flow_ctrl(bp);
  1775. bnx2_set_mac_link(bp);
  1776. }
  1777. return 0;
  1778. }
  1779. new_bmcr = 0;
  1780. if (bp->req_line_speed == SPEED_100) {
  1781. new_bmcr |= BMCR_SPEED100;
  1782. }
  1783. if (bp->req_duplex == DUPLEX_FULL) {
  1784. new_bmcr |= BMCR_FULLDPLX;
  1785. }
  1786. if (new_bmcr != bmcr) {
  1787. u32 bmsr;
  1788. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1789. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1790. if (bmsr & BMSR_LSTATUS) {
  1791. /* Force link down */
  1792. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
  1793. spin_unlock_bh(&bp->phy_lock);
  1794. msleep(50);
  1795. spin_lock_bh(&bp->phy_lock);
  1796. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1797. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1798. }
  1799. bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
  1800. /* Normally, the new speed is setup after the link has
  1801. * gone down and up again. In some cases, link will not go
  1802. * down so we need to set up the new speed here.
  1803. */
  1804. if (bmsr & BMSR_LSTATUS) {
  1805. bp->line_speed = bp->req_line_speed;
  1806. bp->duplex = bp->req_duplex;
  1807. bnx2_resolve_flow_ctrl(bp);
  1808. bnx2_set_mac_link(bp);
  1809. }
  1810. } else {
  1811. bnx2_resolve_flow_ctrl(bp);
  1812. bnx2_set_mac_link(bp);
  1813. }
  1814. return 0;
  1815. }
  1816. static int
  1817. bnx2_setup_phy(struct bnx2 *bp, u8 port)
  1818. __releases(&bp->phy_lock)
  1819. __acquires(&bp->phy_lock)
  1820. {
  1821. if (bp->loopback == MAC_LOOPBACK)
  1822. return 0;
  1823. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1824. return bnx2_setup_serdes_phy(bp, port);
  1825. }
  1826. else {
  1827. return bnx2_setup_copper_phy(bp);
  1828. }
  1829. }
  1830. static int
  1831. bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
  1832. {
  1833. u32 val;
  1834. bp->mii_bmcr = MII_BMCR + 0x10;
  1835. bp->mii_bmsr = MII_BMSR + 0x10;
  1836. bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
  1837. bp->mii_adv = MII_ADVERTISE + 0x10;
  1838. bp->mii_lpa = MII_LPA + 0x10;
  1839. bp->mii_up1 = MII_BNX2_OVER1G_UP1;
  1840. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
  1841. bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
  1842. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1843. if (reset_phy)
  1844. bnx2_reset_phy(bp);
  1845. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
  1846. bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
  1847. val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
  1848. val |= MII_BNX2_SD_1000XCTL1_FIBER;
  1849. bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
  1850. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
  1851. bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
  1852. if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
  1853. val |= BCM5708S_UP1_2G5;
  1854. else
  1855. val &= ~BCM5708S_UP1_2G5;
  1856. bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
  1857. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
  1858. bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
  1859. val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
  1860. bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
  1861. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
  1862. val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
  1863. MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
  1864. bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
  1865. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1866. return 0;
  1867. }
  1868. static int
  1869. bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
  1870. {
  1871. u32 val;
  1872. if (reset_phy)
  1873. bnx2_reset_phy(bp);
  1874. bp->mii_up1 = BCM5708S_UP1;
  1875. bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
  1876. bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
  1877. bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
  1878. bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
  1879. val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
  1880. bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
  1881. bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
  1882. val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
  1883. bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
  1884. if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
  1885. bnx2_read_phy(bp, BCM5708S_UP1, &val);
  1886. val |= BCM5708S_UP1_2G5;
  1887. bnx2_write_phy(bp, BCM5708S_UP1, val);
  1888. }
  1889. if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
  1890. (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
  1891. (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
  1892. /* increase tx signal amplitude */
  1893. bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
  1894. BCM5708S_BLK_ADDR_TX_MISC);
  1895. bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
  1896. val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
  1897. bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
  1898. bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
  1899. }
  1900. val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
  1901. BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
  1902. if (val) {
  1903. u32 is_backplane;
  1904. is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
  1905. if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
  1906. bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
  1907. BCM5708S_BLK_ADDR_TX_MISC);
  1908. bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
  1909. bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
  1910. BCM5708S_BLK_ADDR_DIG);
  1911. }
  1912. }
  1913. return 0;
  1914. }
  1915. static int
  1916. bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
  1917. {
  1918. if (reset_phy)
  1919. bnx2_reset_phy(bp);
  1920. bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
  1921. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  1922. BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
  1923. if (bp->dev->mtu > 1500) {
  1924. u32 val;
  1925. /* Set extended packet length bit */
  1926. bnx2_write_phy(bp, 0x18, 0x7);
  1927. bnx2_read_phy(bp, 0x18, &val);
  1928. bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
  1929. bnx2_write_phy(bp, 0x1c, 0x6c00);
  1930. bnx2_read_phy(bp, 0x1c, &val);
  1931. bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
  1932. }
  1933. else {
  1934. u32 val;
  1935. bnx2_write_phy(bp, 0x18, 0x7);
  1936. bnx2_read_phy(bp, 0x18, &val);
  1937. bnx2_write_phy(bp, 0x18, val & ~0x4007);
  1938. bnx2_write_phy(bp, 0x1c, 0x6c00);
  1939. bnx2_read_phy(bp, 0x1c, &val);
  1940. bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
  1941. }
  1942. return 0;
  1943. }
  1944. static int
  1945. bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
  1946. {
  1947. u32 val;
  1948. if (reset_phy)
  1949. bnx2_reset_phy(bp);
  1950. if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
  1951. bnx2_write_phy(bp, 0x18, 0x0c00);
  1952. bnx2_write_phy(bp, 0x17, 0x000a);
  1953. bnx2_write_phy(bp, 0x15, 0x310b);
  1954. bnx2_write_phy(bp, 0x17, 0x201f);
  1955. bnx2_write_phy(bp, 0x15, 0x9506);
  1956. bnx2_write_phy(bp, 0x17, 0x401f);
  1957. bnx2_write_phy(bp, 0x15, 0x14e2);
  1958. bnx2_write_phy(bp, 0x18, 0x0400);
  1959. }
  1960. if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
  1961. bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
  1962. MII_BNX2_DSP_EXPAND_REG | 0x8);
  1963. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
  1964. val &= ~(1 << 8);
  1965. bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
  1966. }
  1967. if (bp->dev->mtu > 1500) {
  1968. /* Set extended packet length bit */
  1969. bnx2_write_phy(bp, 0x18, 0x7);
  1970. bnx2_read_phy(bp, 0x18, &val);
  1971. bnx2_write_phy(bp, 0x18, val | 0x4000);
  1972. bnx2_read_phy(bp, 0x10, &val);
  1973. bnx2_write_phy(bp, 0x10, val | 0x1);
  1974. }
  1975. else {
  1976. bnx2_write_phy(bp, 0x18, 0x7);
  1977. bnx2_read_phy(bp, 0x18, &val);
  1978. bnx2_write_phy(bp, 0x18, val & ~0x4007);
  1979. bnx2_read_phy(bp, 0x10, &val);
  1980. bnx2_write_phy(bp, 0x10, val & ~0x1);
  1981. }
  1982. /* ethernet@wirespeed */
  1983. bnx2_write_phy(bp, 0x18, 0x7007);
  1984. bnx2_read_phy(bp, 0x18, &val);
  1985. bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
  1986. return 0;
  1987. }
  1988. static int
  1989. bnx2_init_phy(struct bnx2 *bp, int reset_phy)
  1990. __releases(&bp->phy_lock)
  1991. __acquires(&bp->phy_lock)
  1992. {
  1993. u32 val;
  1994. int rc = 0;
  1995. bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
  1996. bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
  1997. bp->mii_bmcr = MII_BMCR;
  1998. bp->mii_bmsr = MII_BMSR;
  1999. bp->mii_bmsr1 = MII_BMSR;
  2000. bp->mii_adv = MII_ADVERTISE;
  2001. bp->mii_lpa = MII_LPA;
  2002. BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
  2003. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  2004. goto setup_phy;
  2005. bnx2_read_phy(bp, MII_PHYSID1, &val);
  2006. bp->phy_id = val << 16;
  2007. bnx2_read_phy(bp, MII_PHYSID2, &val);
  2008. bp->phy_id |= val & 0xffff;
  2009. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  2010. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  2011. rc = bnx2_init_5706s_phy(bp, reset_phy);
  2012. else if (CHIP_NUM(bp) == CHIP_NUM_5708)
  2013. rc = bnx2_init_5708s_phy(bp, reset_phy);
  2014. else if (CHIP_NUM(bp) == CHIP_NUM_5709)
  2015. rc = bnx2_init_5709s_phy(bp, reset_phy);
  2016. }
  2017. else {
  2018. rc = bnx2_init_copper_phy(bp, reset_phy);
  2019. }
  2020. setup_phy:
  2021. if (!rc)
  2022. rc = bnx2_setup_phy(bp, bp->phy_port);
  2023. return rc;
  2024. }
  2025. static int
  2026. bnx2_set_mac_loopback(struct bnx2 *bp)
  2027. {
  2028. u32 mac_mode;
  2029. mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
  2030. mac_mode &= ~BNX2_EMAC_MODE_PORT;
  2031. mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
  2032. BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
  2033. bp->link_up = 1;
  2034. return 0;
  2035. }
  2036. static int bnx2_test_link(struct bnx2 *);
  2037. static int
  2038. bnx2_set_phy_loopback(struct bnx2 *bp)
  2039. {
  2040. u32 mac_mode;
  2041. int rc, i;
  2042. spin_lock_bh(&bp->phy_lock);
  2043. rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
  2044. BMCR_SPEED1000);
  2045. spin_unlock_bh(&bp->phy_lock);
  2046. if (rc)
  2047. return rc;
  2048. for (i = 0; i < 10; i++) {
  2049. if (bnx2_test_link(bp) == 0)
  2050. break;
  2051. msleep(100);
  2052. }
  2053. mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
  2054. mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
  2055. BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
  2056. BNX2_EMAC_MODE_25G_MODE);
  2057. mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
  2058. BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
  2059. bp->link_up = 1;
  2060. return 0;
  2061. }
  2062. static void
  2063. bnx2_dump_mcp_state(struct bnx2 *bp)
  2064. {
  2065. struct net_device *dev = bp->dev;
  2066. u32 mcp_p0, mcp_p1;
  2067. netdev_err(dev, "<--- start MCP states dump --->\n");
  2068. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  2069. mcp_p0 = BNX2_MCP_STATE_P0;
  2070. mcp_p1 = BNX2_MCP_STATE_P1;
  2071. } else {
  2072. mcp_p0 = BNX2_MCP_STATE_P0_5708;
  2073. mcp_p1 = BNX2_MCP_STATE_P1_5708;
  2074. }
  2075. netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
  2076. bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
  2077. netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
  2078. bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
  2079. bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
  2080. bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
  2081. netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
  2082. bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
  2083. bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
  2084. bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
  2085. netdev_err(dev, "DEBUG: shmem states:\n");
  2086. netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
  2087. bnx2_shmem_rd(bp, BNX2_DRV_MB),
  2088. bnx2_shmem_rd(bp, BNX2_FW_MB),
  2089. bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
  2090. pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
  2091. netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
  2092. bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
  2093. bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
  2094. pr_cont(" condition[%08x]\n",
  2095. bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
  2096. DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
  2097. DP_SHMEM_LINE(bp, 0x3cc);
  2098. DP_SHMEM_LINE(bp, 0x3dc);
  2099. DP_SHMEM_LINE(bp, 0x3ec);
  2100. netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
  2101. netdev_err(dev, "<--- end MCP states dump --->\n");
  2102. }
  2103. static int
  2104. bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
  2105. {
  2106. int i;
  2107. u32 val;
  2108. bp->fw_wr_seq++;
  2109. msg_data |= bp->fw_wr_seq;
  2110. bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
  2111. if (!ack)
  2112. return 0;
  2113. /* wait for an acknowledgement. */
  2114. for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
  2115. msleep(10);
  2116. val = bnx2_shmem_rd(bp, BNX2_FW_MB);
  2117. if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
  2118. break;
  2119. }
  2120. if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
  2121. return 0;
  2122. /* If we timed out, inform the firmware that this is the case. */
  2123. if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
  2124. msg_data &= ~BNX2_DRV_MSG_CODE;
  2125. msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
  2126. bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
  2127. if (!silent) {
  2128. pr_err("fw sync timeout, reset code = %x\n", msg_data);
  2129. bnx2_dump_mcp_state(bp);
  2130. }
  2131. return -EBUSY;
  2132. }
  2133. if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
  2134. return -EIO;
  2135. return 0;
  2136. }
  2137. static int
  2138. bnx2_init_5709_context(struct bnx2 *bp)
  2139. {
  2140. int i, ret = 0;
  2141. u32 val;
  2142. val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
  2143. val |= (BNX2_PAGE_BITS - 8) << 16;
  2144. BNX2_WR(bp, BNX2_CTX_COMMAND, val);
  2145. for (i = 0; i < 10; i++) {
  2146. val = BNX2_RD(bp, BNX2_CTX_COMMAND);
  2147. if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
  2148. break;
  2149. udelay(2);
  2150. }
  2151. if (val & BNX2_CTX_COMMAND_MEM_INIT)
  2152. return -EBUSY;
  2153. for (i = 0; i < bp->ctx_pages; i++) {
  2154. int j;
  2155. if (bp->ctx_blk[i])
  2156. memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
  2157. else
  2158. return -ENOMEM;
  2159. BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
  2160. (bp->ctx_blk_mapping[i] & 0xffffffff) |
  2161. BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
  2162. BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
  2163. (u64) bp->ctx_blk_mapping[i] >> 32);
  2164. BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
  2165. BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
  2166. for (j = 0; j < 10; j++) {
  2167. val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
  2168. if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
  2169. break;
  2170. udelay(5);
  2171. }
  2172. if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
  2173. ret = -EBUSY;
  2174. break;
  2175. }
  2176. }
  2177. return ret;
  2178. }
  2179. static void
  2180. bnx2_init_context(struct bnx2 *bp)
  2181. {
  2182. u32 vcid;
  2183. vcid = 96;
  2184. while (vcid) {
  2185. u32 vcid_addr, pcid_addr, offset;
  2186. int i;
  2187. vcid--;
  2188. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  2189. u32 new_vcid;
  2190. vcid_addr = GET_PCID_ADDR(vcid);
  2191. if (vcid & 0x8) {
  2192. new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
  2193. }
  2194. else {
  2195. new_vcid = vcid;
  2196. }
  2197. pcid_addr = GET_PCID_ADDR(new_vcid);
  2198. }
  2199. else {
  2200. vcid_addr = GET_CID_ADDR(vcid);
  2201. pcid_addr = vcid_addr;
  2202. }
  2203. for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
  2204. vcid_addr += (i << PHY_CTX_SHIFT);
  2205. pcid_addr += (i << PHY_CTX_SHIFT);
  2206. BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
  2207. BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
  2208. /* Zero out the context. */
  2209. for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
  2210. bnx2_ctx_wr(bp, vcid_addr, offset, 0);
  2211. }
  2212. }
  2213. }
  2214. static int
  2215. bnx2_alloc_bad_rbuf(struct bnx2 *bp)
  2216. {
  2217. u16 *good_mbuf;
  2218. u32 good_mbuf_cnt;
  2219. u32 val;
  2220. good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
  2221. if (good_mbuf == NULL)
  2222. return -ENOMEM;
  2223. BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
  2224. BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
  2225. good_mbuf_cnt = 0;
  2226. /* Allocate a bunch of mbufs and save the good ones in an array. */
  2227. val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
  2228. while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
  2229. bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
  2230. BNX2_RBUF_COMMAND_ALLOC_REQ);
  2231. val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
  2232. val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
  2233. /* The addresses with Bit 9 set are bad memory blocks. */
  2234. if (!(val & (1 << 9))) {
  2235. good_mbuf[good_mbuf_cnt] = (u16) val;
  2236. good_mbuf_cnt++;
  2237. }
  2238. val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
  2239. }
  2240. /* Free the good ones back to the mbuf pool thus discarding
  2241. * all the bad ones. */
  2242. while (good_mbuf_cnt) {
  2243. good_mbuf_cnt--;
  2244. val = good_mbuf[good_mbuf_cnt];
  2245. val = (val << 9) | val | 1;
  2246. bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
  2247. }
  2248. kfree(good_mbuf);
  2249. return 0;
  2250. }
  2251. static void
  2252. bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
  2253. {
  2254. u32 val;
  2255. val = (mac_addr[0] << 8) | mac_addr[1];
  2256. BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
  2257. val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
  2258. (mac_addr[4] << 8) | mac_addr[5];
  2259. BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
  2260. }
  2261. static inline int
  2262. bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
  2263. {
  2264. dma_addr_t mapping;
  2265. struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
  2266. struct bnx2_rx_bd *rxbd =
  2267. &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
  2268. struct page *page = alloc_page(gfp);
  2269. if (!page)
  2270. return -ENOMEM;
  2271. mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
  2272. PCI_DMA_FROMDEVICE);
  2273. if (dma_mapping_error(&bp->pdev->dev, mapping)) {
  2274. __free_page(page);
  2275. return -EIO;
  2276. }
  2277. rx_pg->page = page;
  2278. dma_unmap_addr_set(rx_pg, mapping, mapping);
  2279. rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
  2280. rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  2281. return 0;
  2282. }
  2283. static void
  2284. bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
  2285. {
  2286. struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
  2287. struct page *page = rx_pg->page;
  2288. if (!page)
  2289. return;
  2290. dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
  2291. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  2292. __free_page(page);
  2293. rx_pg->page = NULL;
  2294. }
  2295. static inline int
  2296. bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
  2297. {
  2298. u8 *data;
  2299. struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
  2300. dma_addr_t mapping;
  2301. struct bnx2_rx_bd *rxbd =
  2302. &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
  2303. data = kmalloc(bp->rx_buf_size, gfp);
  2304. if (!data)
  2305. return -ENOMEM;
  2306. mapping = dma_map_single(&bp->pdev->dev,
  2307. get_l2_fhdr(data),
  2308. bp->rx_buf_use_size,
  2309. PCI_DMA_FROMDEVICE);
  2310. if (dma_mapping_error(&bp->pdev->dev, mapping)) {
  2311. kfree(data);
  2312. return -EIO;
  2313. }
  2314. rx_buf->data = data;
  2315. dma_unmap_addr_set(rx_buf, mapping, mapping);
  2316. rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
  2317. rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  2318. rxr->rx_prod_bseq += bp->rx_buf_use_size;
  2319. return 0;
  2320. }
  2321. static int
  2322. bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
  2323. {
  2324. struct status_block *sblk = bnapi->status_blk.msi;
  2325. u32 new_link_state, old_link_state;
  2326. int is_set = 1;
  2327. new_link_state = sblk->status_attn_bits & event;
  2328. old_link_state = sblk->status_attn_bits_ack & event;
  2329. if (new_link_state != old_link_state) {
  2330. if (new_link_state)
  2331. BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
  2332. else
  2333. BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
  2334. } else
  2335. is_set = 0;
  2336. return is_set;
  2337. }
  2338. static void
  2339. bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
  2340. {
  2341. spin_lock(&bp->phy_lock);
  2342. if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
  2343. bnx2_set_link(bp);
  2344. if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
  2345. bnx2_set_remote_link(bp);
  2346. spin_unlock(&bp->phy_lock);
  2347. }
  2348. static inline u16
  2349. bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
  2350. {
  2351. u16 cons;
  2352. /* Tell compiler that status block fields can change. */
  2353. barrier();
  2354. cons = *bnapi->hw_tx_cons_ptr;
  2355. barrier();
  2356. if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
  2357. cons++;
  2358. return cons;
  2359. }
  2360. static int
  2361. bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
  2362. {
  2363. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  2364. u16 hw_cons, sw_cons, sw_ring_cons;
  2365. int tx_pkt = 0, index;
  2366. unsigned int tx_bytes = 0;
  2367. struct netdev_queue *txq;
  2368. index = (bnapi - bp->bnx2_napi);
  2369. txq = netdev_get_tx_queue(bp->dev, index);
  2370. hw_cons = bnx2_get_hw_tx_cons(bnapi);
  2371. sw_cons = txr->tx_cons;
  2372. while (sw_cons != hw_cons) {
  2373. struct bnx2_sw_tx_bd *tx_buf;
  2374. struct sk_buff *skb;
  2375. int i, last;
  2376. sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
  2377. tx_buf = &txr->tx_buf_ring[sw_ring_cons];
  2378. skb = tx_buf->skb;
  2379. /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
  2380. prefetch(&skb->end);
  2381. /* partial BD completions possible with TSO packets */
  2382. if (tx_buf->is_gso) {
  2383. u16 last_idx, last_ring_idx;
  2384. last_idx = sw_cons + tx_buf->nr_frags + 1;
  2385. last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
  2386. if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
  2387. last_idx++;
  2388. }
  2389. if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
  2390. break;
  2391. }
  2392. }
  2393. dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
  2394. skb_headlen(skb), PCI_DMA_TODEVICE);
  2395. tx_buf->skb = NULL;
  2396. last = tx_buf->nr_frags;
  2397. for (i = 0; i < last; i++) {
  2398. struct bnx2_sw_tx_bd *tx_buf;
  2399. sw_cons = BNX2_NEXT_TX_BD(sw_cons);
  2400. tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
  2401. dma_unmap_page(&bp->pdev->dev,
  2402. dma_unmap_addr(tx_buf, mapping),
  2403. skb_frag_size(&skb_shinfo(skb)->frags[i]),
  2404. PCI_DMA_TODEVICE);
  2405. }
  2406. sw_cons = BNX2_NEXT_TX_BD(sw_cons);
  2407. tx_bytes += skb->len;
  2408. dev_kfree_skb(skb);
  2409. tx_pkt++;
  2410. if (tx_pkt == budget)
  2411. break;
  2412. if (hw_cons == sw_cons)
  2413. hw_cons = bnx2_get_hw_tx_cons(bnapi);
  2414. }
  2415. netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
  2416. txr->hw_tx_cons = hw_cons;
  2417. txr->tx_cons = sw_cons;
  2418. /* Need to make the tx_cons update visible to bnx2_start_xmit()
  2419. * before checking for netif_tx_queue_stopped(). Without the
  2420. * memory barrier, there is a small possibility that bnx2_start_xmit()
  2421. * will miss it and cause the queue to be stopped forever.
  2422. */
  2423. smp_mb();
  2424. if (unlikely(netif_tx_queue_stopped(txq)) &&
  2425. (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
  2426. __netif_tx_lock(txq, smp_processor_id());
  2427. if ((netif_tx_queue_stopped(txq)) &&
  2428. (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
  2429. netif_tx_wake_queue(txq);
  2430. __netif_tx_unlock(txq);
  2431. }
  2432. return tx_pkt;
  2433. }
  2434. static void
  2435. bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
  2436. struct sk_buff *skb, int count)
  2437. {
  2438. struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
  2439. struct bnx2_rx_bd *cons_bd, *prod_bd;
  2440. int i;
  2441. u16 hw_prod, prod;
  2442. u16 cons = rxr->rx_pg_cons;
  2443. cons_rx_pg = &rxr->rx_pg_ring[cons];
  2444. /* The caller was unable to allocate a new page to replace the
  2445. * last one in the frags array, so we need to recycle that page
  2446. * and then free the skb.
  2447. */
  2448. if (skb) {
  2449. struct page *page;
  2450. struct skb_shared_info *shinfo;
  2451. shinfo = skb_shinfo(skb);
  2452. shinfo->nr_frags--;
  2453. page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
  2454. __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
  2455. cons_rx_pg->page = page;
  2456. dev_kfree_skb(skb);
  2457. }
  2458. hw_prod = rxr->rx_pg_prod;
  2459. for (i = 0; i < count; i++) {
  2460. prod = BNX2_RX_PG_RING_IDX(hw_prod);
  2461. prod_rx_pg = &rxr->rx_pg_ring[prod];
  2462. cons_rx_pg = &rxr->rx_pg_ring[cons];
  2463. cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
  2464. [BNX2_RX_IDX(cons)];
  2465. prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
  2466. [BNX2_RX_IDX(prod)];
  2467. if (prod != cons) {
  2468. prod_rx_pg->page = cons_rx_pg->page;
  2469. cons_rx_pg->page = NULL;
  2470. dma_unmap_addr_set(prod_rx_pg, mapping,
  2471. dma_unmap_addr(cons_rx_pg, mapping));
  2472. prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
  2473. prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
  2474. }
  2475. cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
  2476. hw_prod = BNX2_NEXT_RX_BD(hw_prod);
  2477. }
  2478. rxr->rx_pg_prod = hw_prod;
  2479. rxr->rx_pg_cons = cons;
  2480. }
  2481. static inline void
  2482. bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
  2483. u8 *data, u16 cons, u16 prod)
  2484. {
  2485. struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
  2486. struct bnx2_rx_bd *cons_bd, *prod_bd;
  2487. cons_rx_buf = &rxr->rx_buf_ring[cons];
  2488. prod_rx_buf = &rxr->rx_buf_ring[prod];
  2489. dma_sync_single_for_device(&bp->pdev->dev,
  2490. dma_unmap_addr(cons_rx_buf, mapping),
  2491. BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
  2492. rxr->rx_prod_bseq += bp->rx_buf_use_size;
  2493. prod_rx_buf->data = data;
  2494. if (cons == prod)
  2495. return;
  2496. dma_unmap_addr_set(prod_rx_buf, mapping,
  2497. dma_unmap_addr(cons_rx_buf, mapping));
  2498. cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
  2499. prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
  2500. prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
  2501. prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
  2502. }
  2503. static struct sk_buff *
  2504. bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
  2505. unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
  2506. u32 ring_idx)
  2507. {
  2508. int err;
  2509. u16 prod = ring_idx & 0xffff;
  2510. struct sk_buff *skb;
  2511. err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
  2512. if (unlikely(err)) {
  2513. bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
  2514. error:
  2515. if (hdr_len) {
  2516. unsigned int raw_len = len + 4;
  2517. int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
  2518. bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
  2519. }
  2520. return NULL;
  2521. }
  2522. dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
  2523. PCI_DMA_FROMDEVICE);
  2524. skb = build_skb(data, 0);
  2525. if (!skb) {
  2526. kfree(data);
  2527. goto error;
  2528. }
  2529. skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
  2530. if (hdr_len == 0) {
  2531. skb_put(skb, len);
  2532. return skb;
  2533. } else {
  2534. unsigned int i, frag_len, frag_size, pages;
  2535. struct bnx2_sw_pg *rx_pg;
  2536. u16 pg_cons = rxr->rx_pg_cons;
  2537. u16 pg_prod = rxr->rx_pg_prod;
  2538. frag_size = len + 4 - hdr_len;
  2539. pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
  2540. skb_put(skb, hdr_len);
  2541. for (i = 0; i < pages; i++) {
  2542. dma_addr_t mapping_old;
  2543. frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
  2544. if (unlikely(frag_len <= 4)) {
  2545. unsigned int tail = 4 - frag_len;
  2546. rxr->rx_pg_cons = pg_cons;
  2547. rxr->rx_pg_prod = pg_prod;
  2548. bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
  2549. pages - i);
  2550. skb->len -= tail;
  2551. if (i == 0) {
  2552. skb->tail -= tail;
  2553. } else {
  2554. skb_frag_t *frag =
  2555. &skb_shinfo(skb)->frags[i - 1];
  2556. skb_frag_size_sub(frag, tail);
  2557. skb->data_len -= tail;
  2558. }
  2559. return skb;
  2560. }
  2561. rx_pg = &rxr->rx_pg_ring[pg_cons];
  2562. /* Don't unmap yet. If we're unable to allocate a new
  2563. * page, we need to recycle the page and the DMA addr.
  2564. */
  2565. mapping_old = dma_unmap_addr(rx_pg, mapping);
  2566. if (i == pages - 1)
  2567. frag_len -= 4;
  2568. skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
  2569. rx_pg->page = NULL;
  2570. err = bnx2_alloc_rx_page(bp, rxr,
  2571. BNX2_RX_PG_RING_IDX(pg_prod),
  2572. GFP_ATOMIC);
  2573. if (unlikely(err)) {
  2574. rxr->rx_pg_cons = pg_cons;
  2575. rxr->rx_pg_prod = pg_prod;
  2576. bnx2_reuse_rx_skb_pages(bp, rxr, skb,
  2577. pages - i);
  2578. return NULL;
  2579. }
  2580. dma_unmap_page(&bp->pdev->dev, mapping_old,
  2581. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  2582. frag_size -= frag_len;
  2583. skb->data_len += frag_len;
  2584. skb->truesize += PAGE_SIZE;
  2585. skb->len += frag_len;
  2586. pg_prod = BNX2_NEXT_RX_BD(pg_prod);
  2587. pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
  2588. }
  2589. rxr->rx_pg_prod = pg_prod;
  2590. rxr->rx_pg_cons = pg_cons;
  2591. }
  2592. return skb;
  2593. }
  2594. static inline u16
  2595. bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
  2596. {
  2597. u16 cons;
  2598. /* Tell compiler that status block fields can change. */
  2599. barrier();
  2600. cons = *bnapi->hw_rx_cons_ptr;
  2601. barrier();
  2602. if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
  2603. cons++;
  2604. return cons;
  2605. }
  2606. static int
  2607. bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
  2608. {
  2609. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  2610. u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
  2611. struct l2_fhdr *rx_hdr;
  2612. int rx_pkt = 0, pg_ring_used = 0;
  2613. hw_cons = bnx2_get_hw_rx_cons(bnapi);
  2614. sw_cons = rxr->rx_cons;
  2615. sw_prod = rxr->rx_prod;
  2616. /* Memory barrier necessary as speculative reads of the rx
  2617. * buffer can be ahead of the index in the status block
  2618. */
  2619. rmb();
  2620. while (sw_cons != hw_cons) {
  2621. unsigned int len, hdr_len;
  2622. u32 status;
  2623. struct bnx2_sw_bd *rx_buf, *next_rx_buf;
  2624. struct sk_buff *skb;
  2625. dma_addr_t dma_addr;
  2626. u8 *data;
  2627. u16 next_ring_idx;
  2628. sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
  2629. sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
  2630. rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
  2631. data = rx_buf->data;
  2632. rx_buf->data = NULL;
  2633. rx_hdr = get_l2_fhdr(data);
  2634. prefetch(rx_hdr);
  2635. dma_addr = dma_unmap_addr(rx_buf, mapping);
  2636. dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
  2637. BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
  2638. PCI_DMA_FROMDEVICE);
  2639. next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
  2640. next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
  2641. prefetch(get_l2_fhdr(next_rx_buf->data));
  2642. len = rx_hdr->l2_fhdr_pkt_len;
  2643. status = rx_hdr->l2_fhdr_status;
  2644. hdr_len = 0;
  2645. if (status & L2_FHDR_STATUS_SPLIT) {
  2646. hdr_len = rx_hdr->l2_fhdr_ip_xsum;
  2647. pg_ring_used = 1;
  2648. } else if (len > bp->rx_jumbo_thresh) {
  2649. hdr_len = bp->rx_jumbo_thresh;
  2650. pg_ring_used = 1;
  2651. }
  2652. if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
  2653. L2_FHDR_ERRORS_PHY_DECODE |
  2654. L2_FHDR_ERRORS_ALIGNMENT |
  2655. L2_FHDR_ERRORS_TOO_SHORT |
  2656. L2_FHDR_ERRORS_GIANT_FRAME))) {
  2657. bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
  2658. sw_ring_prod);
  2659. if (pg_ring_used) {
  2660. int pages;
  2661. pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
  2662. bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
  2663. }
  2664. goto next_rx;
  2665. }
  2666. len -= 4;
  2667. if (len <= bp->rx_copy_thresh) {
  2668. skb = netdev_alloc_skb(bp->dev, len + 6);
  2669. if (skb == NULL) {
  2670. bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
  2671. sw_ring_prod);
  2672. goto next_rx;
  2673. }
  2674. /* aligned copy */
  2675. memcpy(skb->data,
  2676. (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
  2677. len + 6);
  2678. skb_reserve(skb, 6);
  2679. skb_put(skb, len);
  2680. bnx2_reuse_rx_data(bp, rxr, data,
  2681. sw_ring_cons, sw_ring_prod);
  2682. } else {
  2683. skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
  2684. (sw_ring_cons << 16) | sw_ring_prod);
  2685. if (!skb)
  2686. goto next_rx;
  2687. }
  2688. if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
  2689. !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
  2690. __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
  2691. skb->protocol = eth_type_trans(skb, bp->dev);
  2692. if ((len > (bp->dev->mtu + ETH_HLEN)) &&
  2693. (ntohs(skb->protocol) != 0x8100)) {
  2694. dev_kfree_skb(skb);
  2695. goto next_rx;
  2696. }
  2697. skb_checksum_none_assert(skb);
  2698. if ((bp->dev->features & NETIF_F_RXCSUM) &&
  2699. (status & (L2_FHDR_STATUS_TCP_SEGMENT |
  2700. L2_FHDR_STATUS_UDP_DATAGRAM))) {
  2701. if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
  2702. L2_FHDR_ERRORS_UDP_XSUM)) == 0))
  2703. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2704. }
  2705. if ((bp->dev->features & NETIF_F_RXHASH) &&
  2706. ((status & L2_FHDR_STATUS_USE_RXHASH) ==
  2707. L2_FHDR_STATUS_USE_RXHASH))
  2708. skb->rxhash = rx_hdr->l2_fhdr_hash;
  2709. skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
  2710. napi_gro_receive(&bnapi->napi, skb);
  2711. rx_pkt++;
  2712. next_rx:
  2713. sw_cons = BNX2_NEXT_RX_BD(sw_cons);
  2714. sw_prod = BNX2_NEXT_RX_BD(sw_prod);
  2715. if ((rx_pkt == budget))
  2716. break;
  2717. /* Refresh hw_cons to see if there is new work */
  2718. if (sw_cons == hw_cons) {
  2719. hw_cons = bnx2_get_hw_rx_cons(bnapi);
  2720. rmb();
  2721. }
  2722. }
  2723. rxr->rx_cons = sw_cons;
  2724. rxr->rx_prod = sw_prod;
  2725. if (pg_ring_used)
  2726. BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
  2727. BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
  2728. BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
  2729. mmiowb();
  2730. return rx_pkt;
  2731. }
  2732. /* MSI ISR - The only difference between this and the INTx ISR
  2733. * is that the MSI interrupt is always serviced.
  2734. */
  2735. static irqreturn_t
  2736. bnx2_msi(int irq, void *dev_instance)
  2737. {
  2738. struct bnx2_napi *bnapi = dev_instance;
  2739. struct bnx2 *bp = bnapi->bp;
  2740. prefetch(bnapi->status_blk.msi);
  2741. BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2742. BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
  2743. BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  2744. /* Return here if interrupt is disabled. */
  2745. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  2746. return IRQ_HANDLED;
  2747. napi_schedule(&bnapi->napi);
  2748. return IRQ_HANDLED;
  2749. }
  2750. static irqreturn_t
  2751. bnx2_msi_1shot(int irq, void *dev_instance)
  2752. {
  2753. struct bnx2_napi *bnapi = dev_instance;
  2754. struct bnx2 *bp = bnapi->bp;
  2755. prefetch(bnapi->status_blk.msi);
  2756. /* Return here if interrupt is disabled. */
  2757. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  2758. return IRQ_HANDLED;
  2759. napi_schedule(&bnapi->napi);
  2760. return IRQ_HANDLED;
  2761. }
  2762. static irqreturn_t
  2763. bnx2_interrupt(int irq, void *dev_instance)
  2764. {
  2765. struct bnx2_napi *bnapi = dev_instance;
  2766. struct bnx2 *bp = bnapi->bp;
  2767. struct status_block *sblk = bnapi->status_blk.msi;
  2768. /* When using INTx, it is possible for the interrupt to arrive
  2769. * at the CPU before the status block posted prior to the
  2770. * interrupt. Reading a register will flush the status block.
  2771. * When using MSI, the MSI message will always complete after
  2772. * the status block write.
  2773. */
  2774. if ((sblk->status_idx == bnapi->last_status_idx) &&
  2775. (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
  2776. BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
  2777. return IRQ_NONE;
  2778. BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2779. BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
  2780. BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  2781. /* Read back to deassert IRQ immediately to avoid too many
  2782. * spurious interrupts.
  2783. */
  2784. BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
  2785. /* Return here if interrupt is shared and is disabled. */
  2786. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  2787. return IRQ_HANDLED;
  2788. if (napi_schedule_prep(&bnapi->napi)) {
  2789. bnapi->last_status_idx = sblk->status_idx;
  2790. __napi_schedule(&bnapi->napi);
  2791. }
  2792. return IRQ_HANDLED;
  2793. }
  2794. static inline int
  2795. bnx2_has_fast_work(struct bnx2_napi *bnapi)
  2796. {
  2797. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  2798. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  2799. if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
  2800. (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
  2801. return 1;
  2802. return 0;
  2803. }
  2804. #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
  2805. STATUS_ATTN_BITS_TIMER_ABORT)
  2806. static inline int
  2807. bnx2_has_work(struct bnx2_napi *bnapi)
  2808. {
  2809. struct status_block *sblk = bnapi->status_blk.msi;
  2810. if (bnx2_has_fast_work(bnapi))
  2811. return 1;
  2812. #ifdef BCM_CNIC
  2813. if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
  2814. return 1;
  2815. #endif
  2816. if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
  2817. (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
  2818. return 1;
  2819. return 0;
  2820. }
  2821. static void
  2822. bnx2_chk_missed_msi(struct bnx2 *bp)
  2823. {
  2824. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  2825. u32 msi_ctrl;
  2826. if (bnx2_has_work(bnapi)) {
  2827. msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
  2828. if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
  2829. return;
  2830. if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
  2831. BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
  2832. ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
  2833. BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
  2834. bnx2_msi(bp->irq_tbl[0].vector, bnapi);
  2835. }
  2836. }
  2837. bp->idle_chk_status_idx = bnapi->last_status_idx;
  2838. }
  2839. #ifdef BCM_CNIC
  2840. static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
  2841. {
  2842. struct cnic_ops *c_ops;
  2843. if (!bnapi->cnic_present)
  2844. return;
  2845. rcu_read_lock();
  2846. c_ops = rcu_dereference(bp->cnic_ops);
  2847. if (c_ops)
  2848. bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
  2849. bnapi->status_blk.msi);
  2850. rcu_read_unlock();
  2851. }
  2852. #endif
  2853. static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
  2854. {
  2855. struct status_block *sblk = bnapi->status_blk.msi;
  2856. u32 status_attn_bits = sblk->status_attn_bits;
  2857. u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
  2858. if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
  2859. (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
  2860. bnx2_phy_int(bp, bnapi);
  2861. /* This is needed to take care of transient status
  2862. * during link changes.
  2863. */
  2864. BNX2_WR(bp, BNX2_HC_COMMAND,
  2865. bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
  2866. BNX2_RD(bp, BNX2_HC_COMMAND);
  2867. }
  2868. }
  2869. static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
  2870. int work_done, int budget)
  2871. {
  2872. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  2873. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  2874. if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
  2875. bnx2_tx_int(bp, bnapi, 0);
  2876. if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
  2877. work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
  2878. return work_done;
  2879. }
  2880. static int bnx2_poll_msix(struct napi_struct *napi, int budget)
  2881. {
  2882. struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
  2883. struct bnx2 *bp = bnapi->bp;
  2884. int work_done = 0;
  2885. struct status_block_msix *sblk = bnapi->status_blk.msix;
  2886. while (1) {
  2887. work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
  2888. if (unlikely(work_done >= budget))
  2889. break;
  2890. bnapi->last_status_idx = sblk->status_idx;
  2891. /* status idx must be read before checking for more work. */
  2892. rmb();
  2893. if (likely(!bnx2_has_fast_work(bnapi))) {
  2894. napi_complete(napi);
  2895. BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  2896. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2897. bnapi->last_status_idx);
  2898. break;
  2899. }
  2900. }
  2901. return work_done;
  2902. }
  2903. static int bnx2_poll(struct napi_struct *napi, int budget)
  2904. {
  2905. struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
  2906. struct bnx2 *bp = bnapi->bp;
  2907. int work_done = 0;
  2908. struct status_block *sblk = bnapi->status_blk.msi;
  2909. while (1) {
  2910. bnx2_poll_link(bp, bnapi);
  2911. work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
  2912. #ifdef BCM_CNIC
  2913. bnx2_poll_cnic(bp, bnapi);
  2914. #endif
  2915. /* bnapi->last_status_idx is used below to tell the hw how
  2916. * much work has been processed, so we must read it before
  2917. * checking for more work.
  2918. */
  2919. bnapi->last_status_idx = sblk->status_idx;
  2920. if (unlikely(work_done >= budget))
  2921. break;
  2922. rmb();
  2923. if (likely(!bnx2_has_work(bnapi))) {
  2924. napi_complete(napi);
  2925. if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
  2926. BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2927. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2928. bnapi->last_status_idx);
  2929. break;
  2930. }
  2931. BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2932. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2933. BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
  2934. bnapi->last_status_idx);
  2935. BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2936. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2937. bnapi->last_status_idx);
  2938. break;
  2939. }
  2940. }
  2941. return work_done;
  2942. }
  2943. /* Called with rtnl_lock from vlan functions and also netif_tx_lock
  2944. * from set_multicast.
  2945. */
  2946. static void
  2947. bnx2_set_rx_mode(struct net_device *dev)
  2948. {
  2949. struct bnx2 *bp = netdev_priv(dev);
  2950. u32 rx_mode, sort_mode;
  2951. struct netdev_hw_addr *ha;
  2952. int i;
  2953. if (!netif_running(dev))
  2954. return;
  2955. spin_lock_bh(&bp->phy_lock);
  2956. rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
  2957. BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
  2958. sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
  2959. if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
  2960. (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
  2961. rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
  2962. if (dev->flags & IFF_PROMISC) {
  2963. /* Promiscuous mode. */
  2964. rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
  2965. sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
  2966. BNX2_RPM_SORT_USER0_PROM_VLAN;
  2967. }
  2968. else if (dev->flags & IFF_ALLMULTI) {
  2969. for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
  2970. BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
  2971. 0xffffffff);
  2972. }
  2973. sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
  2974. }
  2975. else {
  2976. /* Accept one or more multicast(s). */
  2977. u32 mc_filter[NUM_MC_HASH_REGISTERS];
  2978. u32 regidx;
  2979. u32 bit;
  2980. u32 crc;
  2981. memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
  2982. netdev_for_each_mc_addr(ha, dev) {
  2983. crc = ether_crc_le(ETH_ALEN, ha->addr);
  2984. bit = crc & 0xff;
  2985. regidx = (bit & 0xe0) >> 5;
  2986. bit &= 0x1f;
  2987. mc_filter[regidx] |= (1 << bit);
  2988. }
  2989. for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
  2990. BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
  2991. mc_filter[i]);
  2992. }
  2993. sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
  2994. }
  2995. if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
  2996. rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
  2997. sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
  2998. BNX2_RPM_SORT_USER0_PROM_VLAN;
  2999. } else if (!(dev->flags & IFF_PROMISC)) {
  3000. /* Add all entries into to the match filter list */
  3001. i = 0;
  3002. netdev_for_each_uc_addr(ha, dev) {
  3003. bnx2_set_mac_addr(bp, ha->addr,
  3004. i + BNX2_START_UNICAST_ADDRESS_INDEX);
  3005. sort_mode |= (1 <<
  3006. (i + BNX2_START_UNICAST_ADDRESS_INDEX));
  3007. i++;
  3008. }
  3009. }
  3010. if (rx_mode != bp->rx_mode) {
  3011. bp->rx_mode = rx_mode;
  3012. BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
  3013. }
  3014. BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
  3015. BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
  3016. BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
  3017. spin_unlock_bh(&bp->phy_lock);
  3018. }
  3019. static int
  3020. check_fw_section(const struct firmware *fw,
  3021. const struct bnx2_fw_file_section *section,
  3022. u32 alignment, bool non_empty)
  3023. {
  3024. u32 offset = be32_to_cpu(section->offset);
  3025. u32 len = be32_to_cpu(section->len);
  3026. if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
  3027. return -EINVAL;
  3028. if ((non_empty && len == 0) || len > fw->size - offset ||
  3029. len & (alignment - 1))
  3030. return -EINVAL;
  3031. return 0;
  3032. }
  3033. static int
  3034. check_mips_fw_entry(const struct firmware *fw,
  3035. const struct bnx2_mips_fw_file_entry *entry)
  3036. {
  3037. if (check_fw_section(fw, &entry->text, 4, true) ||
  3038. check_fw_section(fw, &entry->data, 4, false) ||
  3039. check_fw_section(fw, &entry->rodata, 4, false))
  3040. return -EINVAL;
  3041. return 0;
  3042. }
  3043. static void bnx2_release_firmware(struct bnx2 *bp)
  3044. {
  3045. if (bp->rv2p_firmware) {
  3046. release_firmware(bp->mips_firmware);
  3047. release_firmware(bp->rv2p_firmware);
  3048. bp->rv2p_firmware = NULL;
  3049. }
  3050. }
  3051. static int bnx2_request_uncached_firmware(struct bnx2 *bp)
  3052. {
  3053. const char *mips_fw_file, *rv2p_fw_file;
  3054. const struct bnx2_mips_fw_file *mips_fw;
  3055. const struct bnx2_rv2p_fw_file *rv2p_fw;
  3056. int rc;
  3057. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3058. mips_fw_file = FW_MIPS_FILE_09;
  3059. if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
  3060. (CHIP_ID(bp) == CHIP_ID_5709_A1))
  3061. rv2p_fw_file = FW_RV2P_FILE_09_Ax;
  3062. else
  3063. rv2p_fw_file = FW_RV2P_FILE_09;
  3064. } else {
  3065. mips_fw_file = FW_MIPS_FILE_06;
  3066. rv2p_fw_file = FW_RV2P_FILE_06;
  3067. }
  3068. rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
  3069. if (rc) {
  3070. pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
  3071. goto out;
  3072. }
  3073. rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
  3074. if (rc) {
  3075. pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
  3076. goto err_release_mips_firmware;
  3077. }
  3078. mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
  3079. rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
  3080. if (bp->mips_firmware->size < sizeof(*mips_fw) ||
  3081. check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
  3082. check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
  3083. check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
  3084. check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
  3085. check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
  3086. pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
  3087. rc = -EINVAL;
  3088. goto err_release_firmware;
  3089. }
  3090. if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
  3091. check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
  3092. check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
  3093. pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
  3094. rc = -EINVAL;
  3095. goto err_release_firmware;
  3096. }
  3097. out:
  3098. return rc;
  3099. err_release_firmware:
  3100. release_firmware(bp->rv2p_firmware);
  3101. bp->rv2p_firmware = NULL;
  3102. err_release_mips_firmware:
  3103. release_firmware(bp->mips_firmware);
  3104. goto out;
  3105. }
  3106. static int bnx2_request_firmware(struct bnx2 *bp)
  3107. {
  3108. return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
  3109. }
  3110. static u32
  3111. rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
  3112. {
  3113. switch (idx) {
  3114. case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
  3115. rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
  3116. rv2p_code |= RV2P_BD_PAGE_SIZE;
  3117. break;
  3118. }
  3119. return rv2p_code;
  3120. }
  3121. static int
  3122. load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
  3123. const struct bnx2_rv2p_fw_file_entry *fw_entry)
  3124. {
  3125. u32 rv2p_code_len, file_offset;
  3126. __be32 *rv2p_code;
  3127. int i;
  3128. u32 val, cmd, addr;
  3129. rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
  3130. file_offset = be32_to_cpu(fw_entry->rv2p.offset);
  3131. rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
  3132. if (rv2p_proc == RV2P_PROC1) {
  3133. cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
  3134. addr = BNX2_RV2P_PROC1_ADDR_CMD;
  3135. } else {
  3136. cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
  3137. addr = BNX2_RV2P_PROC2_ADDR_CMD;
  3138. }
  3139. for (i = 0; i < rv2p_code_len; i += 8) {
  3140. BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
  3141. rv2p_code++;
  3142. BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
  3143. rv2p_code++;
  3144. val = (i / 8) | cmd;
  3145. BNX2_WR(bp, addr, val);
  3146. }
  3147. rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
  3148. for (i = 0; i < 8; i++) {
  3149. u32 loc, code;
  3150. loc = be32_to_cpu(fw_entry->fixup[i]);
  3151. if (loc && ((loc * 4) < rv2p_code_len)) {
  3152. code = be32_to_cpu(*(rv2p_code + loc - 1));
  3153. BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
  3154. code = be32_to_cpu(*(rv2p_code + loc));
  3155. code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
  3156. BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
  3157. val = (loc / 2) | cmd;
  3158. BNX2_WR(bp, addr, val);
  3159. }
  3160. }
  3161. /* Reset the processor, un-stall is done later. */
  3162. if (rv2p_proc == RV2P_PROC1) {
  3163. BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
  3164. }
  3165. else {
  3166. BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
  3167. }
  3168. return 0;
  3169. }
  3170. static int
  3171. load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
  3172. const struct bnx2_mips_fw_file_entry *fw_entry)
  3173. {
  3174. u32 addr, len, file_offset;
  3175. __be32 *data;
  3176. u32 offset;
  3177. u32 val;
  3178. /* Halt the CPU. */
  3179. val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
  3180. val |= cpu_reg->mode_value_halt;
  3181. bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
  3182. bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
  3183. /* Load the Text area. */
  3184. addr = be32_to_cpu(fw_entry->text.addr);
  3185. len = be32_to_cpu(fw_entry->text.len);
  3186. file_offset = be32_to_cpu(fw_entry->text.offset);
  3187. data = (__be32 *)(bp->mips_firmware->data + file_offset);
  3188. offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
  3189. if (len) {
  3190. int j;
  3191. for (j = 0; j < (len / 4); j++, offset += 4)
  3192. bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
  3193. }
  3194. /* Load the Data area. */
  3195. addr = be32_to_cpu(fw_entry->data.addr);
  3196. len = be32_to_cpu(fw_entry->data.len);
  3197. file_offset = be32_to_cpu(fw_entry->data.offset);
  3198. data = (__be32 *)(bp->mips_firmware->data + file_offset);
  3199. offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
  3200. if (len) {
  3201. int j;
  3202. for (j = 0; j < (len / 4); j++, offset += 4)
  3203. bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
  3204. }
  3205. /* Load the Read-Only area. */
  3206. addr = be32_to_cpu(fw_entry->rodata.addr);
  3207. len = be32_to_cpu(fw_entry->rodata.len);
  3208. file_offset = be32_to_cpu(fw_entry->rodata.offset);
  3209. data = (__be32 *)(bp->mips_firmware->data + file_offset);
  3210. offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
  3211. if (len) {
  3212. int j;
  3213. for (j = 0; j < (len / 4); j++, offset += 4)
  3214. bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
  3215. }
  3216. /* Clear the pre-fetch instruction. */
  3217. bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
  3218. val = be32_to_cpu(fw_entry->start_addr);
  3219. bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
  3220. /* Start the CPU. */
  3221. val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
  3222. val &= ~cpu_reg->mode_value_halt;
  3223. bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
  3224. bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
  3225. return 0;
  3226. }
  3227. static int
  3228. bnx2_init_cpus(struct bnx2 *bp)
  3229. {
  3230. const struct bnx2_mips_fw_file *mips_fw =
  3231. (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
  3232. const struct bnx2_rv2p_fw_file *rv2p_fw =
  3233. (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
  3234. int rc;
  3235. /* Initialize the RV2P processor. */
  3236. load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
  3237. load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
  3238. /* Initialize the RX Processor. */
  3239. rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
  3240. if (rc)
  3241. goto init_cpu_err;
  3242. /* Initialize the TX Processor. */
  3243. rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
  3244. if (rc)
  3245. goto init_cpu_err;
  3246. /* Initialize the TX Patch-up Processor. */
  3247. rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
  3248. if (rc)
  3249. goto init_cpu_err;
  3250. /* Initialize the Completion Processor. */
  3251. rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
  3252. if (rc)
  3253. goto init_cpu_err;
  3254. /* Initialize the Command Processor. */
  3255. rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
  3256. init_cpu_err:
  3257. return rc;
  3258. }
  3259. static int
  3260. bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
  3261. {
  3262. u16 pmcsr;
  3263. pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
  3264. switch (state) {
  3265. case PCI_D0: {
  3266. u32 val;
  3267. pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
  3268. (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
  3269. PCI_PM_CTRL_PME_STATUS);
  3270. if (pmcsr & PCI_PM_CTRL_STATE_MASK)
  3271. /* delay required during transition out of D3hot */
  3272. msleep(20);
  3273. val = BNX2_RD(bp, BNX2_EMAC_MODE);
  3274. val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
  3275. val &= ~BNX2_EMAC_MODE_MPKT;
  3276. BNX2_WR(bp, BNX2_EMAC_MODE, val);
  3277. val = BNX2_RD(bp, BNX2_RPM_CONFIG);
  3278. val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
  3279. BNX2_WR(bp, BNX2_RPM_CONFIG, val);
  3280. break;
  3281. }
  3282. case PCI_D3hot: {
  3283. int i;
  3284. u32 val, wol_msg;
  3285. if (bp->wol) {
  3286. u32 advertising;
  3287. u8 autoneg;
  3288. autoneg = bp->autoneg;
  3289. advertising = bp->advertising;
  3290. if (bp->phy_port == PORT_TP) {
  3291. bp->autoneg = AUTONEG_SPEED;
  3292. bp->advertising = ADVERTISED_10baseT_Half |
  3293. ADVERTISED_10baseT_Full |
  3294. ADVERTISED_100baseT_Half |
  3295. ADVERTISED_100baseT_Full |
  3296. ADVERTISED_Autoneg;
  3297. }
  3298. spin_lock_bh(&bp->phy_lock);
  3299. bnx2_setup_phy(bp, bp->phy_port);
  3300. spin_unlock_bh(&bp->phy_lock);
  3301. bp->autoneg = autoneg;
  3302. bp->advertising = advertising;
  3303. bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
  3304. val = BNX2_RD(bp, BNX2_EMAC_MODE);
  3305. /* Enable port mode. */
  3306. val &= ~BNX2_EMAC_MODE_PORT;
  3307. val |= BNX2_EMAC_MODE_MPKT_RCVD |
  3308. BNX2_EMAC_MODE_ACPI_RCVD |
  3309. BNX2_EMAC_MODE_MPKT;
  3310. if (bp->phy_port == PORT_TP)
  3311. val |= BNX2_EMAC_MODE_PORT_MII;
  3312. else {
  3313. val |= BNX2_EMAC_MODE_PORT_GMII;
  3314. if (bp->line_speed == SPEED_2500)
  3315. val |= BNX2_EMAC_MODE_25G_MODE;
  3316. }
  3317. BNX2_WR(bp, BNX2_EMAC_MODE, val);
  3318. /* receive all multicast */
  3319. for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
  3320. BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
  3321. 0xffffffff);
  3322. }
  3323. BNX2_WR(bp, BNX2_EMAC_RX_MODE,
  3324. BNX2_EMAC_RX_MODE_SORT_MODE);
  3325. val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
  3326. BNX2_RPM_SORT_USER0_MC_EN;
  3327. BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
  3328. BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
  3329. BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
  3330. BNX2_RPM_SORT_USER0_ENA);
  3331. /* Need to enable EMAC and RPM for WOL. */
  3332. BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
  3333. BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
  3334. BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
  3335. BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
  3336. val = BNX2_RD(bp, BNX2_RPM_CONFIG);
  3337. val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
  3338. BNX2_WR(bp, BNX2_RPM_CONFIG, val);
  3339. wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
  3340. }
  3341. else {
  3342. wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
  3343. }
  3344. if (!(bp->flags & BNX2_FLAG_NO_WOL))
  3345. bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
  3346. 1, 0);
  3347. pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
  3348. if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
  3349. (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
  3350. if (bp->wol)
  3351. pmcsr |= 3;
  3352. }
  3353. else {
  3354. pmcsr |= 3;
  3355. }
  3356. if (bp->wol) {
  3357. pmcsr |= PCI_PM_CTRL_PME_ENABLE;
  3358. }
  3359. pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
  3360. pmcsr);
  3361. /* No more memory access after this point until
  3362. * device is brought back to D0.
  3363. */
  3364. udelay(50);
  3365. break;
  3366. }
  3367. default:
  3368. return -EINVAL;
  3369. }
  3370. return 0;
  3371. }
  3372. static int
  3373. bnx2_acquire_nvram_lock(struct bnx2 *bp)
  3374. {
  3375. u32 val;
  3376. int j;
  3377. /* Request access to the flash interface. */
  3378. BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
  3379. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3380. val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
  3381. if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
  3382. break;
  3383. udelay(5);
  3384. }
  3385. if (j >= NVRAM_TIMEOUT_COUNT)
  3386. return -EBUSY;
  3387. return 0;
  3388. }
  3389. static int
  3390. bnx2_release_nvram_lock(struct bnx2 *bp)
  3391. {
  3392. int j;
  3393. u32 val;
  3394. /* Relinquish nvram interface. */
  3395. BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
  3396. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3397. val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
  3398. if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
  3399. break;
  3400. udelay(5);
  3401. }
  3402. if (j >= NVRAM_TIMEOUT_COUNT)
  3403. return -EBUSY;
  3404. return 0;
  3405. }
  3406. static int
  3407. bnx2_enable_nvram_write(struct bnx2 *bp)
  3408. {
  3409. u32 val;
  3410. val = BNX2_RD(bp, BNX2_MISC_CFG);
  3411. BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
  3412. if (bp->flash_info->flags & BNX2_NV_WREN) {
  3413. int j;
  3414. BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3415. BNX2_WR(bp, BNX2_NVM_COMMAND,
  3416. BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
  3417. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3418. udelay(5);
  3419. val = BNX2_RD(bp, BNX2_NVM_COMMAND);
  3420. if (val & BNX2_NVM_COMMAND_DONE)
  3421. break;
  3422. }
  3423. if (j >= NVRAM_TIMEOUT_COUNT)
  3424. return -EBUSY;
  3425. }
  3426. return 0;
  3427. }
  3428. static void
  3429. bnx2_disable_nvram_write(struct bnx2 *bp)
  3430. {
  3431. u32 val;
  3432. val = BNX2_RD(bp, BNX2_MISC_CFG);
  3433. BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
  3434. }
  3435. static void
  3436. bnx2_enable_nvram_access(struct bnx2 *bp)
  3437. {
  3438. u32 val;
  3439. val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
  3440. /* Enable both bits, even on read. */
  3441. BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
  3442. val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
  3443. }
  3444. static void
  3445. bnx2_disable_nvram_access(struct bnx2 *bp)
  3446. {
  3447. u32 val;
  3448. val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
  3449. /* Disable both bits, even after read. */
  3450. BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
  3451. val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
  3452. BNX2_NVM_ACCESS_ENABLE_WR_EN));
  3453. }
  3454. static int
  3455. bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
  3456. {
  3457. u32 cmd;
  3458. int j;
  3459. if (bp->flash_info->flags & BNX2_NV_BUFFERED)
  3460. /* Buffered flash, no erase needed */
  3461. return 0;
  3462. /* Build an erase command */
  3463. cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
  3464. BNX2_NVM_COMMAND_DOIT;
  3465. /* Need to clear DONE bit separately. */
  3466. BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3467. /* Address of the NVRAM to read from. */
  3468. BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
  3469. /* Issue an erase command. */
  3470. BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
  3471. /* Wait for completion. */
  3472. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3473. u32 val;
  3474. udelay(5);
  3475. val = BNX2_RD(bp, BNX2_NVM_COMMAND);
  3476. if (val & BNX2_NVM_COMMAND_DONE)
  3477. break;
  3478. }
  3479. if (j >= NVRAM_TIMEOUT_COUNT)
  3480. return -EBUSY;
  3481. return 0;
  3482. }
  3483. static int
  3484. bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
  3485. {
  3486. u32 cmd;
  3487. int j;
  3488. /* Build the command word. */
  3489. cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
  3490. /* Calculate an offset of a buffered flash, not needed for 5709. */
  3491. if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
  3492. offset = ((offset / bp->flash_info->page_size) <<
  3493. bp->flash_info->page_bits) +
  3494. (offset % bp->flash_info->page_size);
  3495. }
  3496. /* Need to clear DONE bit separately. */
  3497. BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3498. /* Address of the NVRAM to read from. */
  3499. BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
  3500. /* Issue a read command. */
  3501. BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
  3502. /* Wait for completion. */
  3503. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3504. u32 val;
  3505. udelay(5);
  3506. val = BNX2_RD(bp, BNX2_NVM_COMMAND);
  3507. if (val & BNX2_NVM_COMMAND_DONE) {
  3508. __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
  3509. memcpy(ret_val, &v, 4);
  3510. break;
  3511. }
  3512. }
  3513. if (j >= NVRAM_TIMEOUT_COUNT)
  3514. return -EBUSY;
  3515. return 0;
  3516. }
  3517. static int
  3518. bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
  3519. {
  3520. u32 cmd;
  3521. __be32 val32;
  3522. int j;
  3523. /* Build the command word. */
  3524. cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
  3525. /* Calculate an offset of a buffered flash, not needed for 5709. */
  3526. if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
  3527. offset = ((offset / bp->flash_info->page_size) <<
  3528. bp->flash_info->page_bits) +
  3529. (offset % bp->flash_info->page_size);
  3530. }
  3531. /* Need to clear DONE bit separately. */
  3532. BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3533. memcpy(&val32, val, 4);
  3534. /* Write the data. */
  3535. BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
  3536. /* Address of the NVRAM to write to. */
  3537. BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
  3538. /* Issue the write command. */
  3539. BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
  3540. /* Wait for completion. */
  3541. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3542. udelay(5);
  3543. if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
  3544. break;
  3545. }
  3546. if (j >= NVRAM_TIMEOUT_COUNT)
  3547. return -EBUSY;
  3548. return 0;
  3549. }
  3550. static int
  3551. bnx2_init_nvram(struct bnx2 *bp)
  3552. {
  3553. u32 val;
  3554. int j, entry_count, rc = 0;
  3555. const struct flash_spec *flash;
  3556. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3557. bp->flash_info = &flash_5709;
  3558. goto get_flash_size;
  3559. }
  3560. /* Determine the selected interface. */
  3561. val = BNX2_RD(bp, BNX2_NVM_CFG1);
  3562. entry_count = ARRAY_SIZE(flash_table);
  3563. if (val & 0x40000000) {
  3564. /* Flash interface has been reconfigured */
  3565. for (j = 0, flash = &flash_table[0]; j < entry_count;
  3566. j++, flash++) {
  3567. if ((val & FLASH_BACKUP_STRAP_MASK) ==
  3568. (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
  3569. bp->flash_info = flash;
  3570. break;
  3571. }
  3572. }
  3573. }
  3574. else {
  3575. u32 mask;
  3576. /* Not yet been reconfigured */
  3577. if (val & (1 << 23))
  3578. mask = FLASH_BACKUP_STRAP_MASK;
  3579. else
  3580. mask = FLASH_STRAP_MASK;
  3581. for (j = 0, flash = &flash_table[0]; j < entry_count;
  3582. j++, flash++) {
  3583. if ((val & mask) == (flash->strapping & mask)) {
  3584. bp->flash_info = flash;
  3585. /* Request access to the flash interface. */
  3586. if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
  3587. return rc;
  3588. /* Enable access to flash interface */
  3589. bnx2_enable_nvram_access(bp);
  3590. /* Reconfigure the flash interface */
  3591. BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
  3592. BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
  3593. BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
  3594. BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
  3595. /* Disable access to flash interface */
  3596. bnx2_disable_nvram_access(bp);
  3597. bnx2_release_nvram_lock(bp);
  3598. break;
  3599. }
  3600. }
  3601. } /* if (val & 0x40000000) */
  3602. if (j == entry_count) {
  3603. bp->flash_info = NULL;
  3604. pr_alert("Unknown flash/EEPROM type\n");
  3605. return -ENODEV;
  3606. }
  3607. get_flash_size:
  3608. val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
  3609. val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
  3610. if (val)
  3611. bp->flash_size = val;
  3612. else
  3613. bp->flash_size = bp->flash_info->total_size;
  3614. return rc;
  3615. }
  3616. static int
  3617. bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
  3618. int buf_size)
  3619. {
  3620. int rc = 0;
  3621. u32 cmd_flags, offset32, len32, extra;
  3622. if (buf_size == 0)
  3623. return 0;
  3624. /* Request access to the flash interface. */
  3625. if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
  3626. return rc;
  3627. /* Enable access to flash interface */
  3628. bnx2_enable_nvram_access(bp);
  3629. len32 = buf_size;
  3630. offset32 = offset;
  3631. extra = 0;
  3632. cmd_flags = 0;
  3633. if (offset32 & 3) {
  3634. u8 buf[4];
  3635. u32 pre_len;
  3636. offset32 &= ~3;
  3637. pre_len = 4 - (offset & 3);
  3638. if (pre_len >= len32) {
  3639. pre_len = len32;
  3640. cmd_flags = BNX2_NVM_COMMAND_FIRST |
  3641. BNX2_NVM_COMMAND_LAST;
  3642. }
  3643. else {
  3644. cmd_flags = BNX2_NVM_COMMAND_FIRST;
  3645. }
  3646. rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
  3647. if (rc)
  3648. return rc;
  3649. memcpy(ret_buf, buf + (offset & 3), pre_len);
  3650. offset32 += 4;
  3651. ret_buf += pre_len;
  3652. len32 -= pre_len;
  3653. }
  3654. if (len32 & 3) {
  3655. extra = 4 - (len32 & 3);
  3656. len32 = (len32 + 4) & ~3;
  3657. }
  3658. if (len32 == 4) {
  3659. u8 buf[4];
  3660. if (cmd_flags)
  3661. cmd_flags = BNX2_NVM_COMMAND_LAST;
  3662. else
  3663. cmd_flags = BNX2_NVM_COMMAND_FIRST |
  3664. BNX2_NVM_COMMAND_LAST;
  3665. rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
  3666. memcpy(ret_buf, buf, 4 - extra);
  3667. }
  3668. else if (len32 > 0) {
  3669. u8 buf[4];
  3670. /* Read the first word. */
  3671. if (cmd_flags)
  3672. cmd_flags = 0;
  3673. else
  3674. cmd_flags = BNX2_NVM_COMMAND_FIRST;
  3675. rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
  3676. /* Advance to the next dword. */
  3677. offset32 += 4;
  3678. ret_buf += 4;
  3679. len32 -= 4;
  3680. while (len32 > 4 && rc == 0) {
  3681. rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
  3682. /* Advance to the next dword. */
  3683. offset32 += 4;
  3684. ret_buf += 4;
  3685. len32 -= 4;
  3686. }
  3687. if (rc)
  3688. return rc;
  3689. cmd_flags = BNX2_NVM_COMMAND_LAST;
  3690. rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
  3691. memcpy(ret_buf, buf, 4 - extra);
  3692. }
  3693. /* Disable access to flash interface */
  3694. bnx2_disable_nvram_access(bp);
  3695. bnx2_release_nvram_lock(bp);
  3696. return rc;
  3697. }
  3698. static int
  3699. bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
  3700. int buf_size)
  3701. {
  3702. u32 written, offset32, len32;
  3703. u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
  3704. int rc = 0;
  3705. int align_start, align_end;
  3706. buf = data_buf;
  3707. offset32 = offset;
  3708. len32 = buf_size;
  3709. align_start = align_end = 0;
  3710. if ((align_start = (offset32 & 3))) {
  3711. offset32 &= ~3;
  3712. len32 += align_start;
  3713. if (len32 < 4)
  3714. len32 = 4;
  3715. if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
  3716. return rc;
  3717. }
  3718. if (len32 & 3) {
  3719. align_end = 4 - (len32 & 3);
  3720. len32 += align_end;
  3721. if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
  3722. return rc;
  3723. }
  3724. if (align_start || align_end) {
  3725. align_buf = kmalloc(len32, GFP_KERNEL);
  3726. if (align_buf == NULL)
  3727. return -ENOMEM;
  3728. if (align_start) {
  3729. memcpy(align_buf, start, 4);
  3730. }
  3731. if (align_end) {
  3732. memcpy(align_buf + len32 - 4, end, 4);
  3733. }
  3734. memcpy(align_buf + align_start, data_buf, buf_size);
  3735. buf = align_buf;
  3736. }
  3737. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3738. flash_buffer = kmalloc(264, GFP_KERNEL);
  3739. if (flash_buffer == NULL) {
  3740. rc = -ENOMEM;
  3741. goto nvram_write_end;
  3742. }
  3743. }
  3744. written = 0;
  3745. while ((written < len32) && (rc == 0)) {
  3746. u32 page_start, page_end, data_start, data_end;
  3747. u32 addr, cmd_flags;
  3748. int i;
  3749. /* Find the page_start addr */
  3750. page_start = offset32 + written;
  3751. page_start -= (page_start % bp->flash_info->page_size);
  3752. /* Find the page_end addr */
  3753. page_end = page_start + bp->flash_info->page_size;
  3754. /* Find the data_start addr */
  3755. data_start = (written == 0) ? offset32 : page_start;
  3756. /* Find the data_end addr */
  3757. data_end = (page_end > offset32 + len32) ?
  3758. (offset32 + len32) : page_end;
  3759. /* Request access to the flash interface. */
  3760. if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
  3761. goto nvram_write_end;
  3762. /* Enable access to flash interface */
  3763. bnx2_enable_nvram_access(bp);
  3764. cmd_flags = BNX2_NVM_COMMAND_FIRST;
  3765. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3766. int j;
  3767. /* Read the whole page into the buffer
  3768. * (non-buffer flash only) */
  3769. for (j = 0; j < bp->flash_info->page_size; j += 4) {
  3770. if (j == (bp->flash_info->page_size - 4)) {
  3771. cmd_flags |= BNX2_NVM_COMMAND_LAST;
  3772. }
  3773. rc = bnx2_nvram_read_dword(bp,
  3774. page_start + j,
  3775. &flash_buffer[j],
  3776. cmd_flags);
  3777. if (rc)
  3778. goto nvram_write_end;
  3779. cmd_flags = 0;
  3780. }
  3781. }
  3782. /* Enable writes to flash interface (unlock write-protect) */
  3783. if ((rc = bnx2_enable_nvram_write(bp)) != 0)
  3784. goto nvram_write_end;
  3785. /* Loop to write back the buffer data from page_start to
  3786. * data_start */
  3787. i = 0;
  3788. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3789. /* Erase the page */
  3790. if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
  3791. goto nvram_write_end;
  3792. /* Re-enable the write again for the actual write */
  3793. bnx2_enable_nvram_write(bp);
  3794. for (addr = page_start; addr < data_start;
  3795. addr += 4, i += 4) {
  3796. rc = bnx2_nvram_write_dword(bp, addr,
  3797. &flash_buffer[i], cmd_flags);
  3798. if (rc != 0)
  3799. goto nvram_write_end;
  3800. cmd_flags = 0;
  3801. }
  3802. }
  3803. /* Loop to write the new data from data_start to data_end */
  3804. for (addr = data_start; addr < data_end; addr += 4, i += 4) {
  3805. if ((addr == page_end - 4) ||
  3806. ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
  3807. (addr == data_end - 4))) {
  3808. cmd_flags |= BNX2_NVM_COMMAND_LAST;
  3809. }
  3810. rc = bnx2_nvram_write_dword(bp, addr, buf,
  3811. cmd_flags);
  3812. if (rc != 0)
  3813. goto nvram_write_end;
  3814. cmd_flags = 0;
  3815. buf += 4;
  3816. }
  3817. /* Loop to write back the buffer data from data_end
  3818. * to page_end */
  3819. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3820. for (addr = data_end; addr < page_end;
  3821. addr += 4, i += 4) {
  3822. if (addr == page_end-4) {
  3823. cmd_flags = BNX2_NVM_COMMAND_LAST;
  3824. }
  3825. rc = bnx2_nvram_write_dword(bp, addr,
  3826. &flash_buffer[i], cmd_flags);
  3827. if (rc != 0)
  3828. goto nvram_write_end;
  3829. cmd_flags = 0;
  3830. }
  3831. }
  3832. /* Disable writes to flash interface (lock write-protect) */
  3833. bnx2_disable_nvram_write(bp);
  3834. /* Disable access to flash interface */
  3835. bnx2_disable_nvram_access(bp);
  3836. bnx2_release_nvram_lock(bp);
  3837. /* Increment written */
  3838. written += data_end - data_start;
  3839. }
  3840. nvram_write_end:
  3841. kfree(flash_buffer);
  3842. kfree(align_buf);
  3843. return rc;
  3844. }
  3845. static void
  3846. bnx2_init_fw_cap(struct bnx2 *bp)
  3847. {
  3848. u32 val, sig = 0;
  3849. bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
  3850. bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
  3851. if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
  3852. bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
  3853. val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
  3854. if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
  3855. return;
  3856. if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
  3857. bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
  3858. sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
  3859. }
  3860. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  3861. (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
  3862. u32 link;
  3863. bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
  3864. link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
  3865. if (link & BNX2_LINK_STATUS_SERDES_LINK)
  3866. bp->phy_port = PORT_FIBRE;
  3867. else
  3868. bp->phy_port = PORT_TP;
  3869. sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
  3870. BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
  3871. }
  3872. if (netif_running(bp->dev) && sig)
  3873. bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
  3874. }
  3875. static void
  3876. bnx2_setup_msix_tbl(struct bnx2 *bp)
  3877. {
  3878. BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
  3879. BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
  3880. BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
  3881. }
  3882. static int
  3883. bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
  3884. {
  3885. u32 val;
  3886. int i, rc = 0;
  3887. u8 old_port;
  3888. /* Wait for the current PCI transaction to complete before
  3889. * issuing a reset. */
  3890. if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
  3891. (CHIP_NUM(bp) == CHIP_NUM_5708)) {
  3892. BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
  3893. BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
  3894. BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
  3895. BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
  3896. BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
  3897. val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
  3898. udelay(5);
  3899. } else { /* 5709 */
  3900. val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
  3901. val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
  3902. BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
  3903. val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
  3904. for (i = 0; i < 100; i++) {
  3905. msleep(1);
  3906. val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
  3907. if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
  3908. break;
  3909. }
  3910. }
  3911. /* Wait for the firmware to tell us it is ok to issue a reset. */
  3912. bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
  3913. /* Deposit a driver reset signature so the firmware knows that
  3914. * this is a soft reset. */
  3915. bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
  3916. BNX2_DRV_RESET_SIGNATURE_MAGIC);
  3917. /* Do a dummy read to force the chip to complete all current transaction
  3918. * before we issue a reset. */
  3919. val = BNX2_RD(bp, BNX2_MISC_ID);
  3920. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3921. BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
  3922. BNX2_RD(bp, BNX2_MISC_COMMAND);
  3923. udelay(5);
  3924. val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
  3925. BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
  3926. BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
  3927. } else {
  3928. val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
  3929. BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
  3930. BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
  3931. /* Chip reset. */
  3932. BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
  3933. /* Reading back any register after chip reset will hang the
  3934. * bus on 5706 A0 and A1. The msleep below provides plenty
  3935. * of margin for write posting.
  3936. */
  3937. if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
  3938. (CHIP_ID(bp) == CHIP_ID_5706_A1))
  3939. msleep(20);
  3940. /* Reset takes approximate 30 usec */
  3941. for (i = 0; i < 10; i++) {
  3942. val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
  3943. if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
  3944. BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
  3945. break;
  3946. udelay(10);
  3947. }
  3948. if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
  3949. BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
  3950. pr_err("Chip reset did not complete\n");
  3951. return -EBUSY;
  3952. }
  3953. }
  3954. /* Make sure byte swapping is properly configured. */
  3955. val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
  3956. if (val != 0x01020304) {
  3957. pr_err("Chip not in correct endian mode\n");
  3958. return -ENODEV;
  3959. }
  3960. /* Wait for the firmware to finish its initialization. */
  3961. rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
  3962. if (rc)
  3963. return rc;
  3964. spin_lock_bh(&bp->phy_lock);
  3965. old_port = bp->phy_port;
  3966. bnx2_init_fw_cap(bp);
  3967. if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
  3968. old_port != bp->phy_port)
  3969. bnx2_set_default_remote_link(bp);
  3970. spin_unlock_bh(&bp->phy_lock);
  3971. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  3972. /* Adjust the voltage regular to two steps lower. The default
  3973. * of this register is 0x0000000e. */
  3974. BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
  3975. /* Remove bad rbuf memory from the free pool. */
  3976. rc = bnx2_alloc_bad_rbuf(bp);
  3977. }
  3978. if (bp->flags & BNX2_FLAG_USING_MSIX) {
  3979. bnx2_setup_msix_tbl(bp);
  3980. /* Prevent MSIX table reads and write from timing out */
  3981. BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
  3982. BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
  3983. }
  3984. return rc;
  3985. }
  3986. static int
  3987. bnx2_init_chip(struct bnx2 *bp)
  3988. {
  3989. u32 val, mtu;
  3990. int rc, i;
  3991. /* Make sure the interrupt is not active. */
  3992. BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  3993. val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
  3994. BNX2_DMA_CONFIG_DATA_WORD_SWAP |
  3995. #ifdef __BIG_ENDIAN
  3996. BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
  3997. #endif
  3998. BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
  3999. DMA_READ_CHANS << 12 |
  4000. DMA_WRITE_CHANS << 16;
  4001. val |= (0x2 << 20) | (1 << 11);
  4002. if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
  4003. val |= (1 << 23);
  4004. if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
  4005. (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
  4006. val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
  4007. BNX2_WR(bp, BNX2_DMA_CONFIG, val);
  4008. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  4009. val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
  4010. val |= BNX2_TDMA_CONFIG_ONE_DMA;
  4011. BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
  4012. }
  4013. if (bp->flags & BNX2_FLAG_PCIX) {
  4014. u16 val16;
  4015. pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
  4016. &val16);
  4017. pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
  4018. val16 & ~PCI_X_CMD_ERO);
  4019. }
  4020. BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
  4021. BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
  4022. BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
  4023. BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
  4024. /* Initialize context mapping and zero out the quick contexts. The
  4025. * context block must have already been enabled. */
  4026. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  4027. rc = bnx2_init_5709_context(bp);
  4028. if (rc)
  4029. return rc;
  4030. } else
  4031. bnx2_init_context(bp);
  4032. if ((rc = bnx2_init_cpus(bp)) != 0)
  4033. return rc;
  4034. bnx2_init_nvram(bp);
  4035. bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
  4036. val = BNX2_RD(bp, BNX2_MQ_CONFIG);
  4037. val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
  4038. val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
  4039. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  4040. val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
  4041. if (CHIP_REV(bp) == CHIP_REV_Ax)
  4042. val |= BNX2_MQ_CONFIG_HALT_DIS;
  4043. }
  4044. BNX2_WR(bp, BNX2_MQ_CONFIG, val);
  4045. val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
  4046. BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
  4047. BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
  4048. val = (BNX2_PAGE_BITS - 8) << 24;
  4049. BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
  4050. /* Configure page size. */
  4051. val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
  4052. val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
  4053. val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
  4054. BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
  4055. val = bp->mac_addr[0] +
  4056. (bp->mac_addr[1] << 8) +
  4057. (bp->mac_addr[2] << 16) +
  4058. bp->mac_addr[3] +
  4059. (bp->mac_addr[4] << 8) +
  4060. (bp->mac_addr[5] << 16);
  4061. BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
  4062. /* Program the MTU. Also include 4 bytes for CRC32. */
  4063. mtu = bp->dev->mtu;
  4064. val = mtu + ETH_HLEN + ETH_FCS_LEN;
  4065. if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
  4066. val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
  4067. BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
  4068. if (mtu < 1500)
  4069. mtu = 1500;
  4070. bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
  4071. bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
  4072. bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
  4073. memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
  4074. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
  4075. bp->bnx2_napi[i].last_status_idx = 0;
  4076. bp->idle_chk_status_idx = 0xffff;
  4077. bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
  4078. /* Set up how to generate a link change interrupt. */
  4079. BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
  4080. BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
  4081. (u64) bp->status_blk_mapping & 0xffffffff);
  4082. BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
  4083. BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
  4084. (u64) bp->stats_blk_mapping & 0xffffffff);
  4085. BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
  4086. (u64) bp->stats_blk_mapping >> 32);
  4087. BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
  4088. (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
  4089. BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
  4090. (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
  4091. BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
  4092. (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
  4093. BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
  4094. BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
  4095. BNX2_WR(bp, BNX2_HC_COM_TICKS,
  4096. (bp->com_ticks_int << 16) | bp->com_ticks);
  4097. BNX2_WR(bp, BNX2_HC_CMD_TICKS,
  4098. (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
  4099. if (bp->flags & BNX2_FLAG_BROKEN_STATS)
  4100. BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
  4101. else
  4102. BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
  4103. BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
  4104. if (CHIP_ID(bp) == CHIP_ID_5706_A1)
  4105. val = BNX2_HC_CONFIG_COLLECT_STATS;
  4106. else {
  4107. val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
  4108. BNX2_HC_CONFIG_COLLECT_STATS;
  4109. }
  4110. if (bp->flags & BNX2_FLAG_USING_MSIX) {
  4111. BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
  4112. BNX2_HC_MSIX_BIT_VECTOR_VAL);
  4113. val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
  4114. }
  4115. if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
  4116. val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
  4117. BNX2_WR(bp, BNX2_HC_CONFIG, val);
  4118. if (bp->rx_ticks < 25)
  4119. bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
  4120. else
  4121. bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
  4122. for (i = 1; i < bp->irq_nvecs; i++) {
  4123. u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
  4124. BNX2_HC_SB_CONFIG_1;
  4125. BNX2_WR(bp, base,
  4126. BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
  4127. BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
  4128. BNX2_HC_SB_CONFIG_1_ONE_SHOT);
  4129. BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
  4130. (bp->tx_quick_cons_trip_int << 16) |
  4131. bp->tx_quick_cons_trip);
  4132. BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
  4133. (bp->tx_ticks_int << 16) | bp->tx_ticks);
  4134. BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
  4135. (bp->rx_quick_cons_trip_int << 16) |
  4136. bp->rx_quick_cons_trip);
  4137. BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
  4138. (bp->rx_ticks_int << 16) | bp->rx_ticks);
  4139. }
  4140. /* Clear internal stats counters. */
  4141. BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
  4142. BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
  4143. /* Initialize the receive filter. */
  4144. bnx2_set_rx_mode(bp->dev);
  4145. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  4146. val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
  4147. val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
  4148. BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
  4149. }
  4150. rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
  4151. 1, 0);
  4152. BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
  4153. BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
  4154. udelay(20);
  4155. bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
  4156. return rc;
  4157. }
  4158. static void
  4159. bnx2_clear_ring_states(struct bnx2 *bp)
  4160. {
  4161. struct bnx2_napi *bnapi;
  4162. struct bnx2_tx_ring_info *txr;
  4163. struct bnx2_rx_ring_info *rxr;
  4164. int i;
  4165. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
  4166. bnapi = &bp->bnx2_napi[i];
  4167. txr = &bnapi->tx_ring;
  4168. rxr = &bnapi->rx_ring;
  4169. txr->tx_cons = 0;
  4170. txr->hw_tx_cons = 0;
  4171. rxr->rx_prod_bseq = 0;
  4172. rxr->rx_prod = 0;
  4173. rxr->rx_cons = 0;
  4174. rxr->rx_pg_prod = 0;
  4175. rxr->rx_pg_cons = 0;
  4176. }
  4177. }
  4178. static void
  4179. bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
  4180. {
  4181. u32 val, offset0, offset1, offset2, offset3;
  4182. u32 cid_addr = GET_CID_ADDR(cid);
  4183. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  4184. offset0 = BNX2_L2CTX_TYPE_XI;
  4185. offset1 = BNX2_L2CTX_CMD_TYPE_XI;
  4186. offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
  4187. offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
  4188. } else {
  4189. offset0 = BNX2_L2CTX_TYPE;
  4190. offset1 = BNX2_L2CTX_CMD_TYPE;
  4191. offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
  4192. offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
  4193. }
  4194. val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
  4195. bnx2_ctx_wr(bp, cid_addr, offset0, val);
  4196. val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
  4197. bnx2_ctx_wr(bp, cid_addr, offset1, val);
  4198. val = (u64) txr->tx_desc_mapping >> 32;
  4199. bnx2_ctx_wr(bp, cid_addr, offset2, val);
  4200. val = (u64) txr->tx_desc_mapping & 0xffffffff;
  4201. bnx2_ctx_wr(bp, cid_addr, offset3, val);
  4202. }
  4203. static void
  4204. bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
  4205. {
  4206. struct bnx2_tx_bd *txbd;
  4207. u32 cid = TX_CID;
  4208. struct bnx2_napi *bnapi;
  4209. struct bnx2_tx_ring_info *txr;
  4210. bnapi = &bp->bnx2_napi[ring_num];
  4211. txr = &bnapi->tx_ring;
  4212. if (ring_num == 0)
  4213. cid = TX_CID;
  4214. else
  4215. cid = TX_TSS_CID + ring_num - 1;
  4216. bp->tx_wake_thresh = bp->tx_ring_size / 2;
  4217. txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
  4218. txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
  4219. txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
  4220. txr->tx_prod = 0;
  4221. txr->tx_prod_bseq = 0;
  4222. txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
  4223. txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
  4224. bnx2_init_tx_context(bp, cid, txr);
  4225. }
  4226. static void
  4227. bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
  4228. u32 buf_size, int num_rings)
  4229. {
  4230. int i;
  4231. struct bnx2_rx_bd *rxbd;
  4232. for (i = 0; i < num_rings; i++) {
  4233. int j;
  4234. rxbd = &rx_ring[i][0];
  4235. for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
  4236. rxbd->rx_bd_len = buf_size;
  4237. rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
  4238. }
  4239. if (i == (num_rings - 1))
  4240. j = 0;
  4241. else
  4242. j = i + 1;
  4243. rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
  4244. rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
  4245. }
  4246. }
  4247. static void
  4248. bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
  4249. {
  4250. int i;
  4251. u16 prod, ring_prod;
  4252. u32 cid, rx_cid_addr, val;
  4253. struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
  4254. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  4255. if (ring_num == 0)
  4256. cid = RX_CID;
  4257. else
  4258. cid = RX_RSS_CID + ring_num - 1;
  4259. rx_cid_addr = GET_CID_ADDR(cid);
  4260. bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
  4261. bp->rx_buf_use_size, bp->rx_max_ring);
  4262. bnx2_init_rx_context(bp, cid);
  4263. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  4264. val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
  4265. BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
  4266. }
  4267. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
  4268. if (bp->rx_pg_ring_size) {
  4269. bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
  4270. rxr->rx_pg_desc_mapping,
  4271. PAGE_SIZE, bp->rx_max_pg_ring);
  4272. val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
  4273. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
  4274. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
  4275. BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
  4276. val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
  4277. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
  4278. val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
  4279. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
  4280. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  4281. BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
  4282. }
  4283. val = (u64) rxr->rx_desc_mapping[0] >> 32;
  4284. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
  4285. val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
  4286. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
  4287. ring_prod = prod = rxr->rx_pg_prod;
  4288. for (i = 0; i < bp->rx_pg_ring_size; i++) {
  4289. if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
  4290. netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
  4291. ring_num, i, bp->rx_pg_ring_size);
  4292. break;
  4293. }
  4294. prod = BNX2_NEXT_RX_BD(prod);
  4295. ring_prod = BNX2_RX_PG_RING_IDX(prod);
  4296. }
  4297. rxr->rx_pg_prod = prod;
  4298. ring_prod = prod = rxr->rx_prod;
  4299. for (i = 0; i < bp->rx_ring_size; i++) {
  4300. if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
  4301. netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
  4302. ring_num, i, bp->rx_ring_size);
  4303. break;
  4304. }
  4305. prod = BNX2_NEXT_RX_BD(prod);
  4306. ring_prod = BNX2_RX_RING_IDX(prod);
  4307. }
  4308. rxr->rx_prod = prod;
  4309. rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
  4310. rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
  4311. rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
  4312. BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
  4313. BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
  4314. BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
  4315. }
  4316. static void
  4317. bnx2_init_all_rings(struct bnx2 *bp)
  4318. {
  4319. int i;
  4320. u32 val;
  4321. bnx2_clear_ring_states(bp);
  4322. BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
  4323. for (i = 0; i < bp->num_tx_rings; i++)
  4324. bnx2_init_tx_ring(bp, i);
  4325. if (bp->num_tx_rings > 1)
  4326. BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
  4327. (TX_TSS_CID << 7));
  4328. BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
  4329. bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
  4330. for (i = 0; i < bp->num_rx_rings; i++)
  4331. bnx2_init_rx_ring(bp, i);
  4332. if (bp->num_rx_rings > 1) {
  4333. u32 tbl_32 = 0;
  4334. for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
  4335. int shift = (i % 8) << 2;
  4336. tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
  4337. if ((i % 8) == 7) {
  4338. BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
  4339. BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
  4340. BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
  4341. BNX2_RLUP_RSS_COMMAND_WRITE |
  4342. BNX2_RLUP_RSS_COMMAND_HASH_MASK);
  4343. tbl_32 = 0;
  4344. }
  4345. }
  4346. val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
  4347. BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
  4348. BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
  4349. }
  4350. }
  4351. static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
  4352. {
  4353. u32 max, num_rings = 1;
  4354. while (ring_size > BNX2_MAX_RX_DESC_CNT) {
  4355. ring_size -= BNX2_MAX_RX_DESC_CNT;
  4356. num_rings++;
  4357. }
  4358. /* round to next power of 2 */
  4359. max = max_size;
  4360. while ((max & num_rings) == 0)
  4361. max >>= 1;
  4362. if (num_rings != max)
  4363. max <<= 1;
  4364. return max;
  4365. }
  4366. static void
  4367. bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
  4368. {
  4369. u32 rx_size, rx_space, jumbo_size;
  4370. /* 8 for CRC and VLAN */
  4371. rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
  4372. rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
  4373. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  4374. bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
  4375. bp->rx_pg_ring_size = 0;
  4376. bp->rx_max_pg_ring = 0;
  4377. bp->rx_max_pg_ring_idx = 0;
  4378. if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
  4379. int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
  4380. jumbo_size = size * pages;
  4381. if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
  4382. jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
  4383. bp->rx_pg_ring_size = jumbo_size;
  4384. bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
  4385. BNX2_MAX_RX_PG_RINGS);
  4386. bp->rx_max_pg_ring_idx =
  4387. (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
  4388. rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
  4389. bp->rx_copy_thresh = 0;
  4390. }
  4391. bp->rx_buf_use_size = rx_size;
  4392. /* hw alignment + build_skb() overhead*/
  4393. bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
  4394. NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  4395. bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
  4396. bp->rx_ring_size = size;
  4397. bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
  4398. bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
  4399. }
  4400. static void
  4401. bnx2_free_tx_skbs(struct bnx2 *bp)
  4402. {
  4403. int i;
  4404. for (i = 0; i < bp->num_tx_rings; i++) {
  4405. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  4406. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  4407. int j;
  4408. if (txr->tx_buf_ring == NULL)
  4409. continue;
  4410. for (j = 0; j < BNX2_TX_DESC_CNT; ) {
  4411. struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
  4412. struct sk_buff *skb = tx_buf->skb;
  4413. int k, last;
  4414. if (skb == NULL) {
  4415. j = BNX2_NEXT_TX_BD(j);
  4416. continue;
  4417. }
  4418. dma_unmap_single(&bp->pdev->dev,
  4419. dma_unmap_addr(tx_buf, mapping),
  4420. skb_headlen(skb),
  4421. PCI_DMA_TODEVICE);
  4422. tx_buf->skb = NULL;
  4423. last = tx_buf->nr_frags;
  4424. j = BNX2_NEXT_TX_BD(j);
  4425. for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
  4426. tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
  4427. dma_unmap_page(&bp->pdev->dev,
  4428. dma_unmap_addr(tx_buf, mapping),
  4429. skb_frag_size(&skb_shinfo(skb)->frags[k]),
  4430. PCI_DMA_TODEVICE);
  4431. }
  4432. dev_kfree_skb(skb);
  4433. }
  4434. netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
  4435. }
  4436. }
  4437. static void
  4438. bnx2_free_rx_skbs(struct bnx2 *bp)
  4439. {
  4440. int i;
  4441. for (i = 0; i < bp->num_rx_rings; i++) {
  4442. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  4443. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  4444. int j;
  4445. if (rxr->rx_buf_ring == NULL)
  4446. return;
  4447. for (j = 0; j < bp->rx_max_ring_idx; j++) {
  4448. struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
  4449. u8 *data = rx_buf->data;
  4450. if (data == NULL)
  4451. continue;
  4452. dma_unmap_single(&bp->pdev->dev,
  4453. dma_unmap_addr(rx_buf, mapping),
  4454. bp->rx_buf_use_size,
  4455. PCI_DMA_FROMDEVICE);
  4456. rx_buf->data = NULL;
  4457. kfree(data);
  4458. }
  4459. for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
  4460. bnx2_free_rx_page(bp, rxr, j);
  4461. }
  4462. }
  4463. static void
  4464. bnx2_free_skbs(struct bnx2 *bp)
  4465. {
  4466. bnx2_free_tx_skbs(bp);
  4467. bnx2_free_rx_skbs(bp);
  4468. }
  4469. static int
  4470. bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
  4471. {
  4472. int rc;
  4473. rc = bnx2_reset_chip(bp, reset_code);
  4474. bnx2_free_skbs(bp);
  4475. if (rc)
  4476. return rc;
  4477. if ((rc = bnx2_init_chip(bp)) != 0)
  4478. return rc;
  4479. bnx2_init_all_rings(bp);
  4480. return 0;
  4481. }
  4482. static int
  4483. bnx2_init_nic(struct bnx2 *bp, int reset_phy)
  4484. {
  4485. int rc;
  4486. if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
  4487. return rc;
  4488. spin_lock_bh(&bp->phy_lock);
  4489. bnx2_init_phy(bp, reset_phy);
  4490. bnx2_set_link(bp);
  4491. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  4492. bnx2_remote_phy_event(bp);
  4493. spin_unlock_bh(&bp->phy_lock);
  4494. return 0;
  4495. }
  4496. static int
  4497. bnx2_shutdown_chip(struct bnx2 *bp)
  4498. {
  4499. u32 reset_code;
  4500. if (bp->flags & BNX2_FLAG_NO_WOL)
  4501. reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
  4502. else if (bp->wol)
  4503. reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
  4504. else
  4505. reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
  4506. return bnx2_reset_chip(bp, reset_code);
  4507. }
  4508. static int
  4509. bnx2_test_registers(struct bnx2 *bp)
  4510. {
  4511. int ret;
  4512. int i, is_5709;
  4513. static const struct {
  4514. u16 offset;
  4515. u16 flags;
  4516. #define BNX2_FL_NOT_5709 1
  4517. u32 rw_mask;
  4518. u32 ro_mask;
  4519. } reg_tbl[] = {
  4520. { 0x006c, 0, 0x00000000, 0x0000003f },
  4521. { 0x0090, 0, 0xffffffff, 0x00000000 },
  4522. { 0x0094, 0, 0x00000000, 0x00000000 },
  4523. { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
  4524. { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4525. { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4526. { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
  4527. { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
  4528. { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
  4529. { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
  4530. { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4531. { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4532. { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4533. { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4534. { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4535. { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4536. { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4537. { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4538. { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
  4539. { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
  4540. { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
  4541. { 0x1000, 0, 0x00000000, 0x00000001 },
  4542. { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
  4543. { 0x1408, 0, 0x01c00800, 0x00000000 },
  4544. { 0x149c, 0, 0x8000ffff, 0x00000000 },
  4545. { 0x14a8, 0, 0x00000000, 0x000001ff },
  4546. { 0x14ac, 0, 0x0fffffff, 0x10000000 },
  4547. { 0x14b0, 0, 0x00000002, 0x00000001 },
  4548. { 0x14b8, 0, 0x00000000, 0x00000000 },
  4549. { 0x14c0, 0, 0x00000000, 0x00000009 },
  4550. { 0x14c4, 0, 0x00003fff, 0x00000000 },
  4551. { 0x14cc, 0, 0x00000000, 0x00000001 },
  4552. { 0x14d0, 0, 0xffffffff, 0x00000000 },
  4553. { 0x1800, 0, 0x00000000, 0x00000001 },
  4554. { 0x1804, 0, 0x00000000, 0x00000003 },
  4555. { 0x2800, 0, 0x00000000, 0x00000001 },
  4556. { 0x2804, 0, 0x00000000, 0x00003f01 },
  4557. { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
  4558. { 0x2810, 0, 0xffff0000, 0x00000000 },
  4559. { 0x2814, 0, 0xffff0000, 0x00000000 },
  4560. { 0x2818, 0, 0xffff0000, 0x00000000 },
  4561. { 0x281c, 0, 0xffff0000, 0x00000000 },
  4562. { 0x2834, 0, 0xffffffff, 0x00000000 },
  4563. { 0x2840, 0, 0x00000000, 0xffffffff },
  4564. { 0x2844, 0, 0x00000000, 0xffffffff },
  4565. { 0x2848, 0, 0xffffffff, 0x00000000 },
  4566. { 0x284c, 0, 0xf800f800, 0x07ff07ff },
  4567. { 0x2c00, 0, 0x00000000, 0x00000011 },
  4568. { 0x2c04, 0, 0x00000000, 0x00030007 },
  4569. { 0x3c00, 0, 0x00000000, 0x00000001 },
  4570. { 0x3c04, 0, 0x00000000, 0x00070000 },
  4571. { 0x3c08, 0, 0x00007f71, 0x07f00000 },
  4572. { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
  4573. { 0x3c10, 0, 0xffffffff, 0x00000000 },
  4574. { 0x3c14, 0, 0x00000000, 0xffffffff },
  4575. { 0x3c18, 0, 0x00000000, 0xffffffff },
  4576. { 0x3c1c, 0, 0xfffff000, 0x00000000 },
  4577. { 0x3c20, 0, 0xffffff00, 0x00000000 },
  4578. { 0x5004, 0, 0x00000000, 0x0000007f },
  4579. { 0x5008, 0, 0x0f0007ff, 0x00000000 },
  4580. { 0x5c00, 0, 0x00000000, 0x00000001 },
  4581. { 0x5c04, 0, 0x00000000, 0x0003000f },
  4582. { 0x5c08, 0, 0x00000003, 0x00000000 },
  4583. { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
  4584. { 0x5c10, 0, 0x00000000, 0xffffffff },
  4585. { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
  4586. { 0x5c84, 0, 0x00000000, 0x0000f333 },
  4587. { 0x5c88, 0, 0x00000000, 0x00077373 },
  4588. { 0x5c8c, 0, 0x00000000, 0x0007f737 },
  4589. { 0x6808, 0, 0x0000ff7f, 0x00000000 },
  4590. { 0x680c, 0, 0xffffffff, 0x00000000 },
  4591. { 0x6810, 0, 0xffffffff, 0x00000000 },
  4592. { 0x6814, 0, 0xffffffff, 0x00000000 },
  4593. { 0x6818, 0, 0xffffffff, 0x00000000 },
  4594. { 0x681c, 0, 0xffffffff, 0x00000000 },
  4595. { 0x6820, 0, 0x00ff00ff, 0x00000000 },
  4596. { 0x6824, 0, 0x00ff00ff, 0x00000000 },
  4597. { 0x6828, 0, 0x00ff00ff, 0x00000000 },
  4598. { 0x682c, 0, 0x03ff03ff, 0x00000000 },
  4599. { 0x6830, 0, 0x03ff03ff, 0x00000000 },
  4600. { 0x6834, 0, 0x03ff03ff, 0x00000000 },
  4601. { 0x6838, 0, 0x03ff03ff, 0x00000000 },
  4602. { 0x683c, 0, 0x0000ffff, 0x00000000 },
  4603. { 0x6840, 0, 0x00000ff0, 0x00000000 },
  4604. { 0x6844, 0, 0x00ffff00, 0x00000000 },
  4605. { 0x684c, 0, 0xffffffff, 0x00000000 },
  4606. { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
  4607. { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
  4608. { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
  4609. { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
  4610. { 0x6908, 0, 0x00000000, 0x0001ff0f },
  4611. { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
  4612. { 0xffff, 0, 0x00000000, 0x00000000 },
  4613. };
  4614. ret = 0;
  4615. is_5709 = 0;
  4616. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  4617. is_5709 = 1;
  4618. for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
  4619. u32 offset, rw_mask, ro_mask, save_val, val;
  4620. u16 flags = reg_tbl[i].flags;
  4621. if (is_5709 && (flags & BNX2_FL_NOT_5709))
  4622. continue;
  4623. offset = (u32) reg_tbl[i].offset;
  4624. rw_mask = reg_tbl[i].rw_mask;
  4625. ro_mask = reg_tbl[i].ro_mask;
  4626. save_val = readl(bp->regview + offset);
  4627. writel(0, bp->regview + offset);
  4628. val = readl(bp->regview + offset);
  4629. if ((val & rw_mask) != 0) {
  4630. goto reg_test_err;
  4631. }
  4632. if ((val & ro_mask) != (save_val & ro_mask)) {
  4633. goto reg_test_err;
  4634. }
  4635. writel(0xffffffff, bp->regview + offset);
  4636. val = readl(bp->regview + offset);
  4637. if ((val & rw_mask) != rw_mask) {
  4638. goto reg_test_err;
  4639. }
  4640. if ((val & ro_mask) != (save_val & ro_mask)) {
  4641. goto reg_test_err;
  4642. }
  4643. writel(save_val, bp->regview + offset);
  4644. continue;
  4645. reg_test_err:
  4646. writel(save_val, bp->regview + offset);
  4647. ret = -ENODEV;
  4648. break;
  4649. }
  4650. return ret;
  4651. }
  4652. static int
  4653. bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
  4654. {
  4655. static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
  4656. 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
  4657. int i;
  4658. for (i = 0; i < sizeof(test_pattern) / 4; i++) {
  4659. u32 offset;
  4660. for (offset = 0; offset < size; offset += 4) {
  4661. bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
  4662. if (bnx2_reg_rd_ind(bp, start + offset) !=
  4663. test_pattern[i]) {
  4664. return -ENODEV;
  4665. }
  4666. }
  4667. }
  4668. return 0;
  4669. }
  4670. static int
  4671. bnx2_test_memory(struct bnx2 *bp)
  4672. {
  4673. int ret = 0;
  4674. int i;
  4675. static struct mem_entry {
  4676. u32 offset;
  4677. u32 len;
  4678. } mem_tbl_5706[] = {
  4679. { 0x60000, 0x4000 },
  4680. { 0xa0000, 0x3000 },
  4681. { 0xe0000, 0x4000 },
  4682. { 0x120000, 0x4000 },
  4683. { 0x1a0000, 0x4000 },
  4684. { 0x160000, 0x4000 },
  4685. { 0xffffffff, 0 },
  4686. },
  4687. mem_tbl_5709[] = {
  4688. { 0x60000, 0x4000 },
  4689. { 0xa0000, 0x3000 },
  4690. { 0xe0000, 0x4000 },
  4691. { 0x120000, 0x4000 },
  4692. { 0x1a0000, 0x4000 },
  4693. { 0xffffffff, 0 },
  4694. };
  4695. struct mem_entry *mem_tbl;
  4696. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  4697. mem_tbl = mem_tbl_5709;
  4698. else
  4699. mem_tbl = mem_tbl_5706;
  4700. for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
  4701. if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
  4702. mem_tbl[i].len)) != 0) {
  4703. return ret;
  4704. }
  4705. }
  4706. return ret;
  4707. }
  4708. #define BNX2_MAC_LOOPBACK 0
  4709. #define BNX2_PHY_LOOPBACK 1
  4710. static int
  4711. bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
  4712. {
  4713. unsigned int pkt_size, num_pkts, i;
  4714. struct sk_buff *skb;
  4715. u8 *data;
  4716. unsigned char *packet;
  4717. u16 rx_start_idx, rx_idx;
  4718. dma_addr_t map;
  4719. struct bnx2_tx_bd *txbd;
  4720. struct bnx2_sw_bd *rx_buf;
  4721. struct l2_fhdr *rx_hdr;
  4722. int ret = -ENODEV;
  4723. struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
  4724. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  4725. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  4726. tx_napi = bnapi;
  4727. txr = &tx_napi->tx_ring;
  4728. rxr = &bnapi->rx_ring;
  4729. if (loopback_mode == BNX2_MAC_LOOPBACK) {
  4730. bp->loopback = MAC_LOOPBACK;
  4731. bnx2_set_mac_loopback(bp);
  4732. }
  4733. else if (loopback_mode == BNX2_PHY_LOOPBACK) {
  4734. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  4735. return 0;
  4736. bp->loopback = PHY_LOOPBACK;
  4737. bnx2_set_phy_loopback(bp);
  4738. }
  4739. else
  4740. return -EINVAL;
  4741. pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
  4742. skb = netdev_alloc_skb(bp->dev, pkt_size);
  4743. if (!skb)
  4744. return -ENOMEM;
  4745. packet = skb_put(skb, pkt_size);
  4746. memcpy(packet, bp->dev->dev_addr, 6);
  4747. memset(packet + 6, 0x0, 8);
  4748. for (i = 14; i < pkt_size; i++)
  4749. packet[i] = (unsigned char) (i & 0xff);
  4750. map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
  4751. PCI_DMA_TODEVICE);
  4752. if (dma_mapping_error(&bp->pdev->dev, map)) {
  4753. dev_kfree_skb(skb);
  4754. return -EIO;
  4755. }
  4756. BNX2_WR(bp, BNX2_HC_COMMAND,
  4757. bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
  4758. BNX2_RD(bp, BNX2_HC_COMMAND);
  4759. udelay(5);
  4760. rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
  4761. num_pkts = 0;
  4762. txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
  4763. txbd->tx_bd_haddr_hi = (u64) map >> 32;
  4764. txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
  4765. txbd->tx_bd_mss_nbytes = pkt_size;
  4766. txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
  4767. num_pkts++;
  4768. txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
  4769. txr->tx_prod_bseq += pkt_size;
  4770. BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
  4771. BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
  4772. udelay(100);
  4773. BNX2_WR(bp, BNX2_HC_COMMAND,
  4774. bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
  4775. BNX2_RD(bp, BNX2_HC_COMMAND);
  4776. udelay(5);
  4777. dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
  4778. dev_kfree_skb(skb);
  4779. if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
  4780. goto loopback_test_done;
  4781. rx_idx = bnx2_get_hw_rx_cons(bnapi);
  4782. if (rx_idx != rx_start_idx + num_pkts) {
  4783. goto loopback_test_done;
  4784. }
  4785. rx_buf = &rxr->rx_buf_ring[rx_start_idx];
  4786. data = rx_buf->data;
  4787. rx_hdr = get_l2_fhdr(data);
  4788. data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
  4789. dma_sync_single_for_cpu(&bp->pdev->dev,
  4790. dma_unmap_addr(rx_buf, mapping),
  4791. bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
  4792. if (rx_hdr->l2_fhdr_status &
  4793. (L2_FHDR_ERRORS_BAD_CRC |
  4794. L2_FHDR_ERRORS_PHY_DECODE |
  4795. L2_FHDR_ERRORS_ALIGNMENT |
  4796. L2_FHDR_ERRORS_TOO_SHORT |
  4797. L2_FHDR_ERRORS_GIANT_FRAME)) {
  4798. goto loopback_test_done;
  4799. }
  4800. if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
  4801. goto loopback_test_done;
  4802. }
  4803. for (i = 14; i < pkt_size; i++) {
  4804. if (*(data + i) != (unsigned char) (i & 0xff)) {
  4805. goto loopback_test_done;
  4806. }
  4807. }
  4808. ret = 0;
  4809. loopback_test_done:
  4810. bp->loopback = 0;
  4811. return ret;
  4812. }
  4813. #define BNX2_MAC_LOOPBACK_FAILED 1
  4814. #define BNX2_PHY_LOOPBACK_FAILED 2
  4815. #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
  4816. BNX2_PHY_LOOPBACK_FAILED)
  4817. static int
  4818. bnx2_test_loopback(struct bnx2 *bp)
  4819. {
  4820. int rc = 0;
  4821. if (!netif_running(bp->dev))
  4822. return BNX2_LOOPBACK_FAILED;
  4823. bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
  4824. spin_lock_bh(&bp->phy_lock);
  4825. bnx2_init_phy(bp, 1);
  4826. spin_unlock_bh(&bp->phy_lock);
  4827. if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
  4828. rc |= BNX2_MAC_LOOPBACK_FAILED;
  4829. if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
  4830. rc |= BNX2_PHY_LOOPBACK_FAILED;
  4831. return rc;
  4832. }
  4833. #define NVRAM_SIZE 0x200
  4834. #define CRC32_RESIDUAL 0xdebb20e3
  4835. static int
  4836. bnx2_test_nvram(struct bnx2 *bp)
  4837. {
  4838. __be32 buf[NVRAM_SIZE / 4];
  4839. u8 *data = (u8 *) buf;
  4840. int rc = 0;
  4841. u32 magic, csum;
  4842. if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
  4843. goto test_nvram_done;
  4844. magic = be32_to_cpu(buf[0]);
  4845. if (magic != 0x669955aa) {
  4846. rc = -ENODEV;
  4847. goto test_nvram_done;
  4848. }
  4849. if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
  4850. goto test_nvram_done;
  4851. csum = ether_crc_le(0x100, data);
  4852. if (csum != CRC32_RESIDUAL) {
  4853. rc = -ENODEV;
  4854. goto test_nvram_done;
  4855. }
  4856. csum = ether_crc_le(0x100, data + 0x100);
  4857. if (csum != CRC32_RESIDUAL) {
  4858. rc = -ENODEV;
  4859. }
  4860. test_nvram_done:
  4861. return rc;
  4862. }
  4863. static int
  4864. bnx2_test_link(struct bnx2 *bp)
  4865. {
  4866. u32 bmsr;
  4867. if (!netif_running(bp->dev))
  4868. return -ENODEV;
  4869. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  4870. if (bp->link_up)
  4871. return 0;
  4872. return -ENODEV;
  4873. }
  4874. spin_lock_bh(&bp->phy_lock);
  4875. bnx2_enable_bmsr1(bp);
  4876. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  4877. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  4878. bnx2_disable_bmsr1(bp);
  4879. spin_unlock_bh(&bp->phy_lock);
  4880. if (bmsr & BMSR_LSTATUS) {
  4881. return 0;
  4882. }
  4883. return -ENODEV;
  4884. }
  4885. static int
  4886. bnx2_test_intr(struct bnx2 *bp)
  4887. {
  4888. int i;
  4889. u16 status_idx;
  4890. if (!netif_running(bp->dev))
  4891. return -ENODEV;
  4892. status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
  4893. /* This register is not touched during run-time. */
  4894. BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
  4895. BNX2_RD(bp, BNX2_HC_COMMAND);
  4896. for (i = 0; i < 10; i++) {
  4897. if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
  4898. status_idx) {
  4899. break;
  4900. }
  4901. msleep_interruptible(10);
  4902. }
  4903. if (i < 10)
  4904. return 0;
  4905. return -ENODEV;
  4906. }
  4907. /* Determining link for parallel detection. */
  4908. static int
  4909. bnx2_5706_serdes_has_link(struct bnx2 *bp)
  4910. {
  4911. u32 mode_ctl, an_dbg, exp;
  4912. if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
  4913. return 0;
  4914. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
  4915. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
  4916. if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
  4917. return 0;
  4918. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
  4919. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  4920. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  4921. if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
  4922. return 0;
  4923. bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
  4924. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
  4925. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
  4926. if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
  4927. return 0;
  4928. return 1;
  4929. }
  4930. static void
  4931. bnx2_5706_serdes_timer(struct bnx2 *bp)
  4932. {
  4933. int check_link = 1;
  4934. spin_lock(&bp->phy_lock);
  4935. if (bp->serdes_an_pending) {
  4936. bp->serdes_an_pending--;
  4937. check_link = 0;
  4938. } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
  4939. u32 bmcr;
  4940. bp->current_interval = BNX2_TIMER_INTERVAL;
  4941. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  4942. if (bmcr & BMCR_ANENABLE) {
  4943. if (bnx2_5706_serdes_has_link(bp)) {
  4944. bmcr &= ~BMCR_ANENABLE;
  4945. bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
  4946. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  4947. bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
  4948. }
  4949. }
  4950. }
  4951. else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
  4952. (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
  4953. u32 phy2;
  4954. bnx2_write_phy(bp, 0x17, 0x0f01);
  4955. bnx2_read_phy(bp, 0x15, &phy2);
  4956. if (phy2 & 0x20) {
  4957. u32 bmcr;
  4958. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  4959. bmcr |= BMCR_ANENABLE;
  4960. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  4961. bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
  4962. }
  4963. } else
  4964. bp->current_interval = BNX2_TIMER_INTERVAL;
  4965. if (check_link) {
  4966. u32 val;
  4967. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
  4968. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
  4969. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
  4970. if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
  4971. if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
  4972. bnx2_5706s_force_link_dn(bp, 1);
  4973. bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
  4974. } else
  4975. bnx2_set_link(bp);
  4976. } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
  4977. bnx2_set_link(bp);
  4978. }
  4979. spin_unlock(&bp->phy_lock);
  4980. }
  4981. static void
  4982. bnx2_5708_serdes_timer(struct bnx2 *bp)
  4983. {
  4984. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  4985. return;
  4986. if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
  4987. bp->serdes_an_pending = 0;
  4988. return;
  4989. }
  4990. spin_lock(&bp->phy_lock);
  4991. if (bp->serdes_an_pending)
  4992. bp->serdes_an_pending--;
  4993. else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
  4994. u32 bmcr;
  4995. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  4996. if (bmcr & BMCR_ANENABLE) {
  4997. bnx2_enable_forced_2g5(bp);
  4998. bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
  4999. } else {
  5000. bnx2_disable_forced_2g5(bp);
  5001. bp->serdes_an_pending = 2;
  5002. bp->current_interval = BNX2_TIMER_INTERVAL;
  5003. }
  5004. } else
  5005. bp->current_interval = BNX2_TIMER_INTERVAL;
  5006. spin_unlock(&bp->phy_lock);
  5007. }
  5008. static void
  5009. bnx2_timer(unsigned long data)
  5010. {
  5011. struct bnx2 *bp = (struct bnx2 *) data;
  5012. if (!netif_running(bp->dev))
  5013. return;
  5014. if (atomic_read(&bp->intr_sem) != 0)
  5015. goto bnx2_restart_timer;
  5016. if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
  5017. BNX2_FLAG_USING_MSI)
  5018. bnx2_chk_missed_msi(bp);
  5019. bnx2_send_heart_beat(bp);
  5020. bp->stats_blk->stat_FwRxDrop =
  5021. bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
  5022. /* workaround occasional corrupted counters */
  5023. if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
  5024. BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
  5025. BNX2_HC_COMMAND_STATS_NOW);
  5026. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  5027. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  5028. bnx2_5706_serdes_timer(bp);
  5029. else
  5030. bnx2_5708_serdes_timer(bp);
  5031. }
  5032. bnx2_restart_timer:
  5033. mod_timer(&bp->timer, jiffies + bp->current_interval);
  5034. }
  5035. static int
  5036. bnx2_request_irq(struct bnx2 *bp)
  5037. {
  5038. unsigned long flags;
  5039. struct bnx2_irq *irq;
  5040. int rc = 0, i;
  5041. if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
  5042. flags = 0;
  5043. else
  5044. flags = IRQF_SHARED;
  5045. for (i = 0; i < bp->irq_nvecs; i++) {
  5046. irq = &bp->irq_tbl[i];
  5047. rc = request_irq(irq->vector, irq->handler, flags, irq->name,
  5048. &bp->bnx2_napi[i]);
  5049. if (rc)
  5050. break;
  5051. irq->requested = 1;
  5052. }
  5053. return rc;
  5054. }
  5055. static void
  5056. __bnx2_free_irq(struct bnx2 *bp)
  5057. {
  5058. struct bnx2_irq *irq;
  5059. int i;
  5060. for (i = 0; i < bp->irq_nvecs; i++) {
  5061. irq = &bp->irq_tbl[i];
  5062. if (irq->requested)
  5063. free_irq(irq->vector, &bp->bnx2_napi[i]);
  5064. irq->requested = 0;
  5065. }
  5066. }
  5067. static void
  5068. bnx2_free_irq(struct bnx2 *bp)
  5069. {
  5070. __bnx2_free_irq(bp);
  5071. if (bp->flags & BNX2_FLAG_USING_MSI)
  5072. pci_disable_msi(bp->pdev);
  5073. else if (bp->flags & BNX2_FLAG_USING_MSIX)
  5074. pci_disable_msix(bp->pdev);
  5075. bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
  5076. }
  5077. static void
  5078. bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
  5079. {
  5080. int i, total_vecs, rc;
  5081. struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
  5082. struct net_device *dev = bp->dev;
  5083. const int len = sizeof(bp->irq_tbl[0].name);
  5084. bnx2_setup_msix_tbl(bp);
  5085. BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
  5086. BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
  5087. BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
  5088. /* Need to flush the previous three writes to ensure MSI-X
  5089. * is setup properly */
  5090. BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
  5091. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
  5092. msix_ent[i].entry = i;
  5093. msix_ent[i].vector = 0;
  5094. }
  5095. total_vecs = msix_vecs;
  5096. #ifdef BCM_CNIC
  5097. total_vecs++;
  5098. #endif
  5099. rc = -ENOSPC;
  5100. while (total_vecs >= BNX2_MIN_MSIX_VEC) {
  5101. rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
  5102. if (rc <= 0)
  5103. break;
  5104. if (rc > 0)
  5105. total_vecs = rc;
  5106. }
  5107. if (rc != 0)
  5108. return;
  5109. msix_vecs = total_vecs;
  5110. #ifdef BCM_CNIC
  5111. msix_vecs--;
  5112. #endif
  5113. bp->irq_nvecs = msix_vecs;
  5114. bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
  5115. for (i = 0; i < total_vecs; i++) {
  5116. bp->irq_tbl[i].vector = msix_ent[i].vector;
  5117. snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
  5118. bp->irq_tbl[i].handler = bnx2_msi_1shot;
  5119. }
  5120. }
  5121. static int
  5122. bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
  5123. {
  5124. int cpus = netif_get_num_default_rss_queues();
  5125. int msix_vecs;
  5126. if (!bp->num_req_rx_rings)
  5127. msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
  5128. else if (!bp->num_req_tx_rings)
  5129. msix_vecs = max(cpus, bp->num_req_rx_rings);
  5130. else
  5131. msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
  5132. msix_vecs = min(msix_vecs, RX_MAX_RINGS);
  5133. bp->irq_tbl[0].handler = bnx2_interrupt;
  5134. strcpy(bp->irq_tbl[0].name, bp->dev->name);
  5135. bp->irq_nvecs = 1;
  5136. bp->irq_tbl[0].vector = bp->pdev->irq;
  5137. if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
  5138. bnx2_enable_msix(bp, msix_vecs);
  5139. if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
  5140. !(bp->flags & BNX2_FLAG_USING_MSIX)) {
  5141. if (pci_enable_msi(bp->pdev) == 0) {
  5142. bp->flags |= BNX2_FLAG_USING_MSI;
  5143. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  5144. bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
  5145. bp->irq_tbl[0].handler = bnx2_msi_1shot;
  5146. } else
  5147. bp->irq_tbl[0].handler = bnx2_msi;
  5148. bp->irq_tbl[0].vector = bp->pdev->irq;
  5149. }
  5150. }
  5151. if (!bp->num_req_tx_rings)
  5152. bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
  5153. else
  5154. bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
  5155. if (!bp->num_req_rx_rings)
  5156. bp->num_rx_rings = bp->irq_nvecs;
  5157. else
  5158. bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
  5159. netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
  5160. return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
  5161. }
  5162. /* Called with rtnl_lock */
  5163. static int
  5164. bnx2_open(struct net_device *dev)
  5165. {
  5166. struct bnx2 *bp = netdev_priv(dev);
  5167. int rc;
  5168. rc = bnx2_request_firmware(bp);
  5169. if (rc < 0)
  5170. goto out;
  5171. netif_carrier_off(dev);
  5172. bnx2_set_power_state(bp, PCI_D0);
  5173. bnx2_disable_int(bp);
  5174. rc = bnx2_setup_int_mode(bp, disable_msi);
  5175. if (rc)
  5176. goto open_err;
  5177. bnx2_init_napi(bp);
  5178. bnx2_napi_enable(bp);
  5179. rc = bnx2_alloc_mem(bp);
  5180. if (rc)
  5181. goto open_err;
  5182. rc = bnx2_request_irq(bp);
  5183. if (rc)
  5184. goto open_err;
  5185. rc = bnx2_init_nic(bp, 1);
  5186. if (rc)
  5187. goto open_err;
  5188. mod_timer(&bp->timer, jiffies + bp->current_interval);
  5189. atomic_set(&bp->intr_sem, 0);
  5190. memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
  5191. bnx2_enable_int(bp);
  5192. if (bp->flags & BNX2_FLAG_USING_MSI) {
  5193. /* Test MSI to make sure it is working
  5194. * If MSI test fails, go back to INTx mode
  5195. */
  5196. if (bnx2_test_intr(bp) != 0) {
  5197. netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
  5198. bnx2_disable_int(bp);
  5199. bnx2_free_irq(bp);
  5200. bnx2_setup_int_mode(bp, 1);
  5201. rc = bnx2_init_nic(bp, 0);
  5202. if (!rc)
  5203. rc = bnx2_request_irq(bp);
  5204. if (rc) {
  5205. del_timer_sync(&bp->timer);
  5206. goto open_err;
  5207. }
  5208. bnx2_enable_int(bp);
  5209. }
  5210. }
  5211. if (bp->flags & BNX2_FLAG_USING_MSI)
  5212. netdev_info(dev, "using MSI\n");
  5213. else if (bp->flags & BNX2_FLAG_USING_MSIX)
  5214. netdev_info(dev, "using MSIX\n");
  5215. netif_tx_start_all_queues(dev);
  5216. out:
  5217. return rc;
  5218. open_err:
  5219. bnx2_napi_disable(bp);
  5220. bnx2_free_skbs(bp);
  5221. bnx2_free_irq(bp);
  5222. bnx2_free_mem(bp);
  5223. bnx2_del_napi(bp);
  5224. bnx2_release_firmware(bp);
  5225. goto out;
  5226. }
  5227. static void
  5228. bnx2_reset_task(struct work_struct *work)
  5229. {
  5230. struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
  5231. int rc;
  5232. u16 pcicmd;
  5233. rtnl_lock();
  5234. if (!netif_running(bp->dev)) {
  5235. rtnl_unlock();
  5236. return;
  5237. }
  5238. bnx2_netif_stop(bp, true);
  5239. pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
  5240. if (!(pcicmd & PCI_COMMAND_MEMORY)) {
  5241. /* in case PCI block has reset */
  5242. pci_restore_state(bp->pdev);
  5243. pci_save_state(bp->pdev);
  5244. }
  5245. rc = bnx2_init_nic(bp, 1);
  5246. if (rc) {
  5247. netdev_err(bp->dev, "failed to reset NIC, closing\n");
  5248. bnx2_napi_enable(bp);
  5249. dev_close(bp->dev);
  5250. rtnl_unlock();
  5251. return;
  5252. }
  5253. atomic_set(&bp->intr_sem, 1);
  5254. bnx2_netif_start(bp, true);
  5255. rtnl_unlock();
  5256. }
  5257. #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
  5258. static void
  5259. bnx2_dump_ftq(struct bnx2 *bp)
  5260. {
  5261. int i;
  5262. u32 reg, bdidx, cid, valid;
  5263. struct net_device *dev = bp->dev;
  5264. static const struct ftq_reg {
  5265. char *name;
  5266. u32 off;
  5267. } ftq_arr[] = {
  5268. BNX2_FTQ_ENTRY(RV2P_P),
  5269. BNX2_FTQ_ENTRY(RV2P_T),
  5270. BNX2_FTQ_ENTRY(RV2P_M),
  5271. BNX2_FTQ_ENTRY(TBDR_),
  5272. BNX2_FTQ_ENTRY(TDMA_),
  5273. BNX2_FTQ_ENTRY(TXP_),
  5274. BNX2_FTQ_ENTRY(TXP_),
  5275. BNX2_FTQ_ENTRY(TPAT_),
  5276. BNX2_FTQ_ENTRY(RXP_C),
  5277. BNX2_FTQ_ENTRY(RXP_),
  5278. BNX2_FTQ_ENTRY(COM_COMXQ_),
  5279. BNX2_FTQ_ENTRY(COM_COMTQ_),
  5280. BNX2_FTQ_ENTRY(COM_COMQ_),
  5281. BNX2_FTQ_ENTRY(CP_CPQ_),
  5282. };
  5283. netdev_err(dev, "<--- start FTQ dump --->\n");
  5284. for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
  5285. netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
  5286. bnx2_reg_rd_ind(bp, ftq_arr[i].off));
  5287. netdev_err(dev, "CPU states:\n");
  5288. for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
  5289. netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
  5290. reg, bnx2_reg_rd_ind(bp, reg),
  5291. bnx2_reg_rd_ind(bp, reg + 4),
  5292. bnx2_reg_rd_ind(bp, reg + 8),
  5293. bnx2_reg_rd_ind(bp, reg + 0x1c),
  5294. bnx2_reg_rd_ind(bp, reg + 0x1c),
  5295. bnx2_reg_rd_ind(bp, reg + 0x20));
  5296. netdev_err(dev, "<--- end FTQ dump --->\n");
  5297. netdev_err(dev, "<--- start TBDC dump --->\n");
  5298. netdev_err(dev, "TBDC free cnt: %ld\n",
  5299. BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
  5300. netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
  5301. for (i = 0; i < 0x20; i++) {
  5302. int j = 0;
  5303. BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
  5304. BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
  5305. BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
  5306. BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
  5307. while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
  5308. BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
  5309. j++;
  5310. cid = BNX2_RD(bp, BNX2_TBDC_CID);
  5311. bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
  5312. valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
  5313. netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
  5314. i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
  5315. bdidx >> 24, (valid >> 8) & 0x0ff);
  5316. }
  5317. netdev_err(dev, "<--- end TBDC dump --->\n");
  5318. }
  5319. static void
  5320. bnx2_dump_state(struct bnx2 *bp)
  5321. {
  5322. struct net_device *dev = bp->dev;
  5323. u32 val1, val2;
  5324. pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
  5325. netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
  5326. atomic_read(&bp->intr_sem), val1);
  5327. pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
  5328. pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
  5329. netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
  5330. netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
  5331. BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
  5332. BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
  5333. netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
  5334. BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
  5335. netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
  5336. BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
  5337. if (bp->flags & BNX2_FLAG_USING_MSIX)
  5338. netdev_err(dev, "DEBUG: PBA[%08x]\n",
  5339. BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
  5340. }
  5341. static void
  5342. bnx2_tx_timeout(struct net_device *dev)
  5343. {
  5344. struct bnx2 *bp = netdev_priv(dev);
  5345. bnx2_dump_ftq(bp);
  5346. bnx2_dump_state(bp);
  5347. bnx2_dump_mcp_state(bp);
  5348. /* This allows the netif to be shutdown gracefully before resetting */
  5349. schedule_work(&bp->reset_task);
  5350. }
  5351. /* Called with netif_tx_lock.
  5352. * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
  5353. * netif_wake_queue().
  5354. */
  5355. static netdev_tx_t
  5356. bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
  5357. {
  5358. struct bnx2 *bp = netdev_priv(dev);
  5359. dma_addr_t mapping;
  5360. struct bnx2_tx_bd *txbd;
  5361. struct bnx2_sw_tx_bd *tx_buf;
  5362. u32 len, vlan_tag_flags, last_frag, mss;
  5363. u16 prod, ring_prod;
  5364. int i;
  5365. struct bnx2_napi *bnapi;
  5366. struct bnx2_tx_ring_info *txr;
  5367. struct netdev_queue *txq;
  5368. /* Determine which tx ring we will be placed on */
  5369. i = skb_get_queue_mapping(skb);
  5370. bnapi = &bp->bnx2_napi[i];
  5371. txr = &bnapi->tx_ring;
  5372. txq = netdev_get_tx_queue(dev, i);
  5373. if (unlikely(bnx2_tx_avail(bp, txr) <
  5374. (skb_shinfo(skb)->nr_frags + 1))) {
  5375. netif_tx_stop_queue(txq);
  5376. netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
  5377. return NETDEV_TX_BUSY;
  5378. }
  5379. len = skb_headlen(skb);
  5380. prod = txr->tx_prod;
  5381. ring_prod = BNX2_TX_RING_IDX(prod);
  5382. vlan_tag_flags = 0;
  5383. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  5384. vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
  5385. }
  5386. if (vlan_tx_tag_present(skb)) {
  5387. vlan_tag_flags |=
  5388. (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
  5389. }
  5390. if ((mss = skb_shinfo(skb)->gso_size)) {
  5391. u32 tcp_opt_len;
  5392. struct iphdr *iph;
  5393. vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
  5394. tcp_opt_len = tcp_optlen(skb);
  5395. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
  5396. u32 tcp_off = skb_transport_offset(skb) -
  5397. sizeof(struct ipv6hdr) - ETH_HLEN;
  5398. vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
  5399. TX_BD_FLAGS_SW_FLAGS;
  5400. if (likely(tcp_off == 0))
  5401. vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
  5402. else {
  5403. tcp_off >>= 3;
  5404. vlan_tag_flags |= ((tcp_off & 0x3) <<
  5405. TX_BD_FLAGS_TCP6_OFF0_SHL) |
  5406. ((tcp_off & 0x10) <<
  5407. TX_BD_FLAGS_TCP6_OFF4_SHL);
  5408. mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
  5409. }
  5410. } else {
  5411. iph = ip_hdr(skb);
  5412. if (tcp_opt_len || (iph->ihl > 5)) {
  5413. vlan_tag_flags |= ((iph->ihl - 5) +
  5414. (tcp_opt_len >> 2)) << 8;
  5415. }
  5416. }
  5417. } else
  5418. mss = 0;
  5419. mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
  5420. if (dma_mapping_error(&bp->pdev->dev, mapping)) {
  5421. dev_kfree_skb(skb);
  5422. return NETDEV_TX_OK;
  5423. }
  5424. tx_buf = &txr->tx_buf_ring[ring_prod];
  5425. tx_buf->skb = skb;
  5426. dma_unmap_addr_set(tx_buf, mapping, mapping);
  5427. txbd = &txr->tx_desc_ring[ring_prod];
  5428. txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
  5429. txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  5430. txbd->tx_bd_mss_nbytes = len | (mss << 16);
  5431. txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
  5432. last_frag = skb_shinfo(skb)->nr_frags;
  5433. tx_buf->nr_frags = last_frag;
  5434. tx_buf->is_gso = skb_is_gso(skb);
  5435. for (i = 0; i < last_frag; i++) {
  5436. const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  5437. prod = BNX2_NEXT_TX_BD(prod);
  5438. ring_prod = BNX2_TX_RING_IDX(prod);
  5439. txbd = &txr->tx_desc_ring[ring_prod];
  5440. len = skb_frag_size(frag);
  5441. mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
  5442. DMA_TO_DEVICE);
  5443. if (dma_mapping_error(&bp->pdev->dev, mapping))
  5444. goto dma_error;
  5445. dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
  5446. mapping);
  5447. txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
  5448. txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  5449. txbd->tx_bd_mss_nbytes = len | (mss << 16);
  5450. txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
  5451. }
  5452. txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
  5453. /* Sync BD data before updating TX mailbox */
  5454. wmb();
  5455. netdev_tx_sent_queue(txq, skb->len);
  5456. prod = BNX2_NEXT_TX_BD(prod);
  5457. txr->tx_prod_bseq += skb->len;
  5458. BNX2_WR16(bp, txr->tx_bidx_addr, prod);
  5459. BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
  5460. mmiowb();
  5461. txr->tx_prod = prod;
  5462. if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
  5463. netif_tx_stop_queue(txq);
  5464. /* netif_tx_stop_queue() must be done before checking
  5465. * tx index in bnx2_tx_avail() below, because in
  5466. * bnx2_tx_int(), we update tx index before checking for
  5467. * netif_tx_queue_stopped().
  5468. */
  5469. smp_mb();
  5470. if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
  5471. netif_tx_wake_queue(txq);
  5472. }
  5473. return NETDEV_TX_OK;
  5474. dma_error:
  5475. /* save value of frag that failed */
  5476. last_frag = i;
  5477. /* start back at beginning and unmap skb */
  5478. prod = txr->tx_prod;
  5479. ring_prod = BNX2_TX_RING_IDX(prod);
  5480. tx_buf = &txr->tx_buf_ring[ring_prod];
  5481. tx_buf->skb = NULL;
  5482. dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
  5483. skb_headlen(skb), PCI_DMA_TODEVICE);
  5484. /* unmap remaining mapped pages */
  5485. for (i = 0; i < last_frag; i++) {
  5486. prod = BNX2_NEXT_TX_BD(prod);
  5487. ring_prod = BNX2_TX_RING_IDX(prod);
  5488. tx_buf = &txr->tx_buf_ring[ring_prod];
  5489. dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
  5490. skb_frag_size(&skb_shinfo(skb)->frags[i]),
  5491. PCI_DMA_TODEVICE);
  5492. }
  5493. dev_kfree_skb(skb);
  5494. return NETDEV_TX_OK;
  5495. }
  5496. /* Called with rtnl_lock */
  5497. static int
  5498. bnx2_close(struct net_device *dev)
  5499. {
  5500. struct bnx2 *bp = netdev_priv(dev);
  5501. bnx2_disable_int_sync(bp);
  5502. bnx2_napi_disable(bp);
  5503. netif_tx_disable(dev);
  5504. del_timer_sync(&bp->timer);
  5505. bnx2_shutdown_chip(bp);
  5506. bnx2_free_irq(bp);
  5507. bnx2_free_skbs(bp);
  5508. bnx2_free_mem(bp);
  5509. bnx2_del_napi(bp);
  5510. bp->link_up = 0;
  5511. netif_carrier_off(bp->dev);
  5512. bnx2_set_power_state(bp, PCI_D3hot);
  5513. return 0;
  5514. }
  5515. static void
  5516. bnx2_save_stats(struct bnx2 *bp)
  5517. {
  5518. u32 *hw_stats = (u32 *) bp->stats_blk;
  5519. u32 *temp_stats = (u32 *) bp->temp_stats_blk;
  5520. int i;
  5521. /* The 1st 10 counters are 64-bit counters */
  5522. for (i = 0; i < 20; i += 2) {
  5523. u32 hi;
  5524. u64 lo;
  5525. hi = temp_stats[i] + hw_stats[i];
  5526. lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
  5527. if (lo > 0xffffffff)
  5528. hi++;
  5529. temp_stats[i] = hi;
  5530. temp_stats[i + 1] = lo & 0xffffffff;
  5531. }
  5532. for ( ; i < sizeof(struct statistics_block) / 4; i++)
  5533. temp_stats[i] += hw_stats[i];
  5534. }
  5535. #define GET_64BIT_NET_STATS64(ctr) \
  5536. (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
  5537. #define GET_64BIT_NET_STATS(ctr) \
  5538. GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
  5539. GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
  5540. #define GET_32BIT_NET_STATS(ctr) \
  5541. (unsigned long) (bp->stats_blk->ctr + \
  5542. bp->temp_stats_blk->ctr)
  5543. static struct rtnl_link_stats64 *
  5544. bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
  5545. {
  5546. struct bnx2 *bp = netdev_priv(dev);
  5547. if (bp->stats_blk == NULL)
  5548. return net_stats;
  5549. net_stats->rx_packets =
  5550. GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
  5551. GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
  5552. GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
  5553. net_stats->tx_packets =
  5554. GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
  5555. GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
  5556. GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
  5557. net_stats->rx_bytes =
  5558. GET_64BIT_NET_STATS(stat_IfHCInOctets);
  5559. net_stats->tx_bytes =
  5560. GET_64BIT_NET_STATS(stat_IfHCOutOctets);
  5561. net_stats->multicast =
  5562. GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
  5563. net_stats->collisions =
  5564. GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
  5565. net_stats->rx_length_errors =
  5566. GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
  5567. GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
  5568. net_stats->rx_over_errors =
  5569. GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
  5570. GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
  5571. net_stats->rx_frame_errors =
  5572. GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
  5573. net_stats->rx_crc_errors =
  5574. GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
  5575. net_stats->rx_errors = net_stats->rx_length_errors +
  5576. net_stats->rx_over_errors + net_stats->rx_frame_errors +
  5577. net_stats->rx_crc_errors;
  5578. net_stats->tx_aborted_errors =
  5579. GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
  5580. GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
  5581. if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
  5582. (CHIP_ID(bp) == CHIP_ID_5708_A0))
  5583. net_stats->tx_carrier_errors = 0;
  5584. else {
  5585. net_stats->tx_carrier_errors =
  5586. GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
  5587. }
  5588. net_stats->tx_errors =
  5589. GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
  5590. net_stats->tx_aborted_errors +
  5591. net_stats->tx_carrier_errors;
  5592. net_stats->rx_missed_errors =
  5593. GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
  5594. GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
  5595. GET_32BIT_NET_STATS(stat_FwRxDrop);
  5596. return net_stats;
  5597. }
  5598. /* All ethtool functions called with rtnl_lock */
  5599. static int
  5600. bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  5601. {
  5602. struct bnx2 *bp = netdev_priv(dev);
  5603. int support_serdes = 0, support_copper = 0;
  5604. cmd->supported = SUPPORTED_Autoneg;
  5605. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  5606. support_serdes = 1;
  5607. support_copper = 1;
  5608. } else if (bp->phy_port == PORT_FIBRE)
  5609. support_serdes = 1;
  5610. else
  5611. support_copper = 1;
  5612. if (support_serdes) {
  5613. cmd->supported |= SUPPORTED_1000baseT_Full |
  5614. SUPPORTED_FIBRE;
  5615. if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
  5616. cmd->supported |= SUPPORTED_2500baseX_Full;
  5617. }
  5618. if (support_copper) {
  5619. cmd->supported |= SUPPORTED_10baseT_Half |
  5620. SUPPORTED_10baseT_Full |
  5621. SUPPORTED_100baseT_Half |
  5622. SUPPORTED_100baseT_Full |
  5623. SUPPORTED_1000baseT_Full |
  5624. SUPPORTED_TP;
  5625. }
  5626. spin_lock_bh(&bp->phy_lock);
  5627. cmd->port = bp->phy_port;
  5628. cmd->advertising = bp->advertising;
  5629. if (bp->autoneg & AUTONEG_SPEED) {
  5630. cmd->autoneg = AUTONEG_ENABLE;
  5631. } else {
  5632. cmd->autoneg = AUTONEG_DISABLE;
  5633. }
  5634. if (netif_carrier_ok(dev)) {
  5635. ethtool_cmd_speed_set(cmd, bp->line_speed);
  5636. cmd->duplex = bp->duplex;
  5637. }
  5638. else {
  5639. ethtool_cmd_speed_set(cmd, -1);
  5640. cmd->duplex = -1;
  5641. }
  5642. spin_unlock_bh(&bp->phy_lock);
  5643. cmd->transceiver = XCVR_INTERNAL;
  5644. cmd->phy_address = bp->phy_addr;
  5645. return 0;
  5646. }
  5647. static int
  5648. bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  5649. {
  5650. struct bnx2 *bp = netdev_priv(dev);
  5651. u8 autoneg = bp->autoneg;
  5652. u8 req_duplex = bp->req_duplex;
  5653. u16 req_line_speed = bp->req_line_speed;
  5654. u32 advertising = bp->advertising;
  5655. int err = -EINVAL;
  5656. spin_lock_bh(&bp->phy_lock);
  5657. if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
  5658. goto err_out_unlock;
  5659. if (cmd->port != bp->phy_port &&
  5660. !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
  5661. goto err_out_unlock;
  5662. /* If device is down, we can store the settings only if the user
  5663. * is setting the currently active port.
  5664. */
  5665. if (!netif_running(dev) && cmd->port != bp->phy_port)
  5666. goto err_out_unlock;
  5667. if (cmd->autoneg == AUTONEG_ENABLE) {
  5668. autoneg |= AUTONEG_SPEED;
  5669. advertising = cmd->advertising;
  5670. if (cmd->port == PORT_TP) {
  5671. advertising &= ETHTOOL_ALL_COPPER_SPEED;
  5672. if (!advertising)
  5673. advertising = ETHTOOL_ALL_COPPER_SPEED;
  5674. } else {
  5675. advertising &= ETHTOOL_ALL_FIBRE_SPEED;
  5676. if (!advertising)
  5677. advertising = ETHTOOL_ALL_FIBRE_SPEED;
  5678. }
  5679. advertising |= ADVERTISED_Autoneg;
  5680. }
  5681. else {
  5682. u32 speed = ethtool_cmd_speed(cmd);
  5683. if (cmd->port == PORT_FIBRE) {
  5684. if ((speed != SPEED_1000 &&
  5685. speed != SPEED_2500) ||
  5686. (cmd->duplex != DUPLEX_FULL))
  5687. goto err_out_unlock;
  5688. if (speed == SPEED_2500 &&
  5689. !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  5690. goto err_out_unlock;
  5691. } else if (speed == SPEED_1000 || speed == SPEED_2500)
  5692. goto err_out_unlock;
  5693. autoneg &= ~AUTONEG_SPEED;
  5694. req_line_speed = speed;
  5695. req_duplex = cmd->duplex;
  5696. advertising = 0;
  5697. }
  5698. bp->autoneg = autoneg;
  5699. bp->advertising = advertising;
  5700. bp->req_line_speed = req_line_speed;
  5701. bp->req_duplex = req_duplex;
  5702. err = 0;
  5703. /* If device is down, the new settings will be picked up when it is
  5704. * brought up.
  5705. */
  5706. if (netif_running(dev))
  5707. err = bnx2_setup_phy(bp, cmd->port);
  5708. err_out_unlock:
  5709. spin_unlock_bh(&bp->phy_lock);
  5710. return err;
  5711. }
  5712. static void
  5713. bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  5714. {
  5715. struct bnx2 *bp = netdev_priv(dev);
  5716. strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
  5717. strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
  5718. strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
  5719. strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
  5720. }
  5721. #define BNX2_REGDUMP_LEN (32 * 1024)
  5722. static int
  5723. bnx2_get_regs_len(struct net_device *dev)
  5724. {
  5725. return BNX2_REGDUMP_LEN;
  5726. }
  5727. static void
  5728. bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
  5729. {
  5730. u32 *p = _p, i, offset;
  5731. u8 *orig_p = _p;
  5732. struct bnx2 *bp = netdev_priv(dev);
  5733. static const u32 reg_boundaries[] = {
  5734. 0x0000, 0x0098, 0x0400, 0x045c,
  5735. 0x0800, 0x0880, 0x0c00, 0x0c10,
  5736. 0x0c30, 0x0d08, 0x1000, 0x101c,
  5737. 0x1040, 0x1048, 0x1080, 0x10a4,
  5738. 0x1400, 0x1490, 0x1498, 0x14f0,
  5739. 0x1500, 0x155c, 0x1580, 0x15dc,
  5740. 0x1600, 0x1658, 0x1680, 0x16d8,
  5741. 0x1800, 0x1820, 0x1840, 0x1854,
  5742. 0x1880, 0x1894, 0x1900, 0x1984,
  5743. 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
  5744. 0x1c80, 0x1c94, 0x1d00, 0x1d84,
  5745. 0x2000, 0x2030, 0x23c0, 0x2400,
  5746. 0x2800, 0x2820, 0x2830, 0x2850,
  5747. 0x2b40, 0x2c10, 0x2fc0, 0x3058,
  5748. 0x3c00, 0x3c94, 0x4000, 0x4010,
  5749. 0x4080, 0x4090, 0x43c0, 0x4458,
  5750. 0x4c00, 0x4c18, 0x4c40, 0x4c54,
  5751. 0x4fc0, 0x5010, 0x53c0, 0x5444,
  5752. 0x5c00, 0x5c18, 0x5c80, 0x5c90,
  5753. 0x5fc0, 0x6000, 0x6400, 0x6428,
  5754. 0x6800, 0x6848, 0x684c, 0x6860,
  5755. 0x6888, 0x6910, 0x8000
  5756. };
  5757. regs->version = 0;
  5758. memset(p, 0, BNX2_REGDUMP_LEN);
  5759. if (!netif_running(bp->dev))
  5760. return;
  5761. i = 0;
  5762. offset = reg_boundaries[0];
  5763. p += offset;
  5764. while (offset < BNX2_REGDUMP_LEN) {
  5765. *p++ = BNX2_RD(bp, offset);
  5766. offset += 4;
  5767. if (offset == reg_boundaries[i + 1]) {
  5768. offset = reg_boundaries[i + 2];
  5769. p = (u32 *) (orig_p + offset);
  5770. i += 2;
  5771. }
  5772. }
  5773. }
  5774. static void
  5775. bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  5776. {
  5777. struct bnx2 *bp = netdev_priv(dev);
  5778. if (bp->flags & BNX2_FLAG_NO_WOL) {
  5779. wol->supported = 0;
  5780. wol->wolopts = 0;
  5781. }
  5782. else {
  5783. wol->supported = WAKE_MAGIC;
  5784. if (bp->wol)
  5785. wol->wolopts = WAKE_MAGIC;
  5786. else
  5787. wol->wolopts = 0;
  5788. }
  5789. memset(&wol->sopass, 0, sizeof(wol->sopass));
  5790. }
  5791. static int
  5792. bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  5793. {
  5794. struct bnx2 *bp = netdev_priv(dev);
  5795. if (wol->wolopts & ~WAKE_MAGIC)
  5796. return -EINVAL;
  5797. if (wol->wolopts & WAKE_MAGIC) {
  5798. if (bp->flags & BNX2_FLAG_NO_WOL)
  5799. return -EINVAL;
  5800. bp->wol = 1;
  5801. }
  5802. else {
  5803. bp->wol = 0;
  5804. }
  5805. return 0;
  5806. }
  5807. static int
  5808. bnx2_nway_reset(struct net_device *dev)
  5809. {
  5810. struct bnx2 *bp = netdev_priv(dev);
  5811. u32 bmcr;
  5812. if (!netif_running(dev))
  5813. return -EAGAIN;
  5814. if (!(bp->autoneg & AUTONEG_SPEED)) {
  5815. return -EINVAL;
  5816. }
  5817. spin_lock_bh(&bp->phy_lock);
  5818. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  5819. int rc;
  5820. rc = bnx2_setup_remote_phy(bp, bp->phy_port);
  5821. spin_unlock_bh(&bp->phy_lock);
  5822. return rc;
  5823. }
  5824. /* Force a link down visible on the other side */
  5825. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  5826. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
  5827. spin_unlock_bh(&bp->phy_lock);
  5828. msleep(20);
  5829. spin_lock_bh(&bp->phy_lock);
  5830. bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
  5831. bp->serdes_an_pending = 1;
  5832. mod_timer(&bp->timer, jiffies + bp->current_interval);
  5833. }
  5834. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  5835. bmcr &= ~BMCR_LOOPBACK;
  5836. bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
  5837. spin_unlock_bh(&bp->phy_lock);
  5838. return 0;
  5839. }
  5840. static u32
  5841. bnx2_get_link(struct net_device *dev)
  5842. {
  5843. struct bnx2 *bp = netdev_priv(dev);
  5844. return bp->link_up;
  5845. }
  5846. static int
  5847. bnx2_get_eeprom_len(struct net_device *dev)
  5848. {
  5849. struct bnx2 *bp = netdev_priv(dev);
  5850. if (bp->flash_info == NULL)
  5851. return 0;
  5852. return (int) bp->flash_size;
  5853. }
  5854. static int
  5855. bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
  5856. u8 *eebuf)
  5857. {
  5858. struct bnx2 *bp = netdev_priv(dev);
  5859. int rc;
  5860. if (!netif_running(dev))
  5861. return -EAGAIN;
  5862. /* parameters already validated in ethtool_get_eeprom */
  5863. rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
  5864. return rc;
  5865. }
  5866. static int
  5867. bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
  5868. u8 *eebuf)
  5869. {
  5870. struct bnx2 *bp = netdev_priv(dev);
  5871. int rc;
  5872. if (!netif_running(dev))
  5873. return -EAGAIN;
  5874. /* parameters already validated in ethtool_set_eeprom */
  5875. rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
  5876. return rc;
  5877. }
  5878. static int
  5879. bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
  5880. {
  5881. struct bnx2 *bp = netdev_priv(dev);
  5882. memset(coal, 0, sizeof(struct ethtool_coalesce));
  5883. coal->rx_coalesce_usecs = bp->rx_ticks;
  5884. coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
  5885. coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
  5886. coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
  5887. coal->tx_coalesce_usecs = bp->tx_ticks;
  5888. coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
  5889. coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
  5890. coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
  5891. coal->stats_block_coalesce_usecs = bp->stats_ticks;
  5892. return 0;
  5893. }
  5894. static int
  5895. bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
  5896. {
  5897. struct bnx2 *bp = netdev_priv(dev);
  5898. bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
  5899. if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
  5900. bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
  5901. if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
  5902. bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
  5903. if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
  5904. bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
  5905. if (bp->rx_quick_cons_trip_int > 0xff)
  5906. bp->rx_quick_cons_trip_int = 0xff;
  5907. bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
  5908. if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
  5909. bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
  5910. if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
  5911. bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
  5912. if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
  5913. bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
  5914. if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
  5915. 0xff;
  5916. bp->stats_ticks = coal->stats_block_coalesce_usecs;
  5917. if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
  5918. if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
  5919. bp->stats_ticks = USEC_PER_SEC;
  5920. }
  5921. if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
  5922. bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
  5923. bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
  5924. if (netif_running(bp->dev)) {
  5925. bnx2_netif_stop(bp, true);
  5926. bnx2_init_nic(bp, 0);
  5927. bnx2_netif_start(bp, true);
  5928. }
  5929. return 0;
  5930. }
  5931. static void
  5932. bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
  5933. {
  5934. struct bnx2 *bp = netdev_priv(dev);
  5935. ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
  5936. ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
  5937. ering->rx_pending = bp->rx_ring_size;
  5938. ering->rx_jumbo_pending = bp->rx_pg_ring_size;
  5939. ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
  5940. ering->tx_pending = bp->tx_ring_size;
  5941. }
  5942. static int
  5943. bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
  5944. {
  5945. if (netif_running(bp->dev)) {
  5946. /* Reset will erase chipset stats; save them */
  5947. bnx2_save_stats(bp);
  5948. bnx2_netif_stop(bp, true);
  5949. bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
  5950. if (reset_irq) {
  5951. bnx2_free_irq(bp);
  5952. bnx2_del_napi(bp);
  5953. } else {
  5954. __bnx2_free_irq(bp);
  5955. }
  5956. bnx2_free_skbs(bp);
  5957. bnx2_free_mem(bp);
  5958. }
  5959. bnx2_set_rx_ring_size(bp, rx);
  5960. bp->tx_ring_size = tx;
  5961. if (netif_running(bp->dev)) {
  5962. int rc = 0;
  5963. if (reset_irq) {
  5964. rc = bnx2_setup_int_mode(bp, disable_msi);
  5965. bnx2_init_napi(bp);
  5966. }
  5967. if (!rc)
  5968. rc = bnx2_alloc_mem(bp);
  5969. if (!rc)
  5970. rc = bnx2_request_irq(bp);
  5971. if (!rc)
  5972. rc = bnx2_init_nic(bp, 0);
  5973. if (rc) {
  5974. bnx2_napi_enable(bp);
  5975. dev_close(bp->dev);
  5976. return rc;
  5977. }
  5978. #ifdef BCM_CNIC
  5979. mutex_lock(&bp->cnic_lock);
  5980. /* Let cnic know about the new status block. */
  5981. if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
  5982. bnx2_setup_cnic_irq_info(bp);
  5983. mutex_unlock(&bp->cnic_lock);
  5984. #endif
  5985. bnx2_netif_start(bp, true);
  5986. }
  5987. return 0;
  5988. }
  5989. static int
  5990. bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
  5991. {
  5992. struct bnx2 *bp = netdev_priv(dev);
  5993. int rc;
  5994. if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
  5995. (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
  5996. (ering->tx_pending <= MAX_SKB_FRAGS)) {
  5997. return -EINVAL;
  5998. }
  5999. rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
  6000. false);
  6001. return rc;
  6002. }
  6003. static void
  6004. bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
  6005. {
  6006. struct bnx2 *bp = netdev_priv(dev);
  6007. epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
  6008. epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
  6009. epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
  6010. }
  6011. static int
  6012. bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
  6013. {
  6014. struct bnx2 *bp = netdev_priv(dev);
  6015. bp->req_flow_ctrl = 0;
  6016. if (epause->rx_pause)
  6017. bp->req_flow_ctrl |= FLOW_CTRL_RX;
  6018. if (epause->tx_pause)
  6019. bp->req_flow_ctrl |= FLOW_CTRL_TX;
  6020. if (epause->autoneg) {
  6021. bp->autoneg |= AUTONEG_FLOW_CTRL;
  6022. }
  6023. else {
  6024. bp->autoneg &= ~AUTONEG_FLOW_CTRL;
  6025. }
  6026. if (netif_running(dev)) {
  6027. spin_lock_bh(&bp->phy_lock);
  6028. bnx2_setup_phy(bp, bp->phy_port);
  6029. spin_unlock_bh(&bp->phy_lock);
  6030. }
  6031. return 0;
  6032. }
  6033. static struct {
  6034. char string[ETH_GSTRING_LEN];
  6035. } bnx2_stats_str_arr[] = {
  6036. { "rx_bytes" },
  6037. { "rx_error_bytes" },
  6038. { "tx_bytes" },
  6039. { "tx_error_bytes" },
  6040. { "rx_ucast_packets" },
  6041. { "rx_mcast_packets" },
  6042. { "rx_bcast_packets" },
  6043. { "tx_ucast_packets" },
  6044. { "tx_mcast_packets" },
  6045. { "tx_bcast_packets" },
  6046. { "tx_mac_errors" },
  6047. { "tx_carrier_errors" },
  6048. { "rx_crc_errors" },
  6049. { "rx_align_errors" },
  6050. { "tx_single_collisions" },
  6051. { "tx_multi_collisions" },
  6052. { "tx_deferred" },
  6053. { "tx_excess_collisions" },
  6054. { "tx_late_collisions" },
  6055. { "tx_total_collisions" },
  6056. { "rx_fragments" },
  6057. { "rx_jabbers" },
  6058. { "rx_undersize_packets" },
  6059. { "rx_oversize_packets" },
  6060. { "rx_64_byte_packets" },
  6061. { "rx_65_to_127_byte_packets" },
  6062. { "rx_128_to_255_byte_packets" },
  6063. { "rx_256_to_511_byte_packets" },
  6064. { "rx_512_to_1023_byte_packets" },
  6065. { "rx_1024_to_1522_byte_packets" },
  6066. { "rx_1523_to_9022_byte_packets" },
  6067. { "tx_64_byte_packets" },
  6068. { "tx_65_to_127_byte_packets" },
  6069. { "tx_128_to_255_byte_packets" },
  6070. { "tx_256_to_511_byte_packets" },
  6071. { "tx_512_to_1023_byte_packets" },
  6072. { "tx_1024_to_1522_byte_packets" },
  6073. { "tx_1523_to_9022_byte_packets" },
  6074. { "rx_xon_frames" },
  6075. { "rx_xoff_frames" },
  6076. { "tx_xon_frames" },
  6077. { "tx_xoff_frames" },
  6078. { "rx_mac_ctrl_frames" },
  6079. { "rx_filtered_packets" },
  6080. { "rx_ftq_discards" },
  6081. { "rx_discards" },
  6082. { "rx_fw_discards" },
  6083. };
  6084. #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
  6085. #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
  6086. static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
  6087. STATS_OFFSET32(stat_IfHCInOctets_hi),
  6088. STATS_OFFSET32(stat_IfHCInBadOctets_hi),
  6089. STATS_OFFSET32(stat_IfHCOutOctets_hi),
  6090. STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
  6091. STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
  6092. STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
  6093. STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
  6094. STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
  6095. STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
  6096. STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
  6097. STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
  6098. STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
  6099. STATS_OFFSET32(stat_Dot3StatsFCSErrors),
  6100. STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
  6101. STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
  6102. STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
  6103. STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
  6104. STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
  6105. STATS_OFFSET32(stat_Dot3StatsLateCollisions),
  6106. STATS_OFFSET32(stat_EtherStatsCollisions),
  6107. STATS_OFFSET32(stat_EtherStatsFragments),
  6108. STATS_OFFSET32(stat_EtherStatsJabbers),
  6109. STATS_OFFSET32(stat_EtherStatsUndersizePkts),
  6110. STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
  6111. STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
  6112. STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
  6113. STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
  6114. STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
  6115. STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
  6116. STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
  6117. STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
  6118. STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
  6119. STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
  6120. STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
  6121. STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
  6122. STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
  6123. STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
  6124. STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
  6125. STATS_OFFSET32(stat_XonPauseFramesReceived),
  6126. STATS_OFFSET32(stat_XoffPauseFramesReceived),
  6127. STATS_OFFSET32(stat_OutXonSent),
  6128. STATS_OFFSET32(stat_OutXoffSent),
  6129. STATS_OFFSET32(stat_MacControlFramesReceived),
  6130. STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
  6131. STATS_OFFSET32(stat_IfInFTQDiscards),
  6132. STATS_OFFSET32(stat_IfInMBUFDiscards),
  6133. STATS_OFFSET32(stat_FwRxDrop),
  6134. };
  6135. /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
  6136. * skipped because of errata.
  6137. */
  6138. static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
  6139. 8,0,8,8,8,8,8,8,8,8,
  6140. 4,0,4,4,4,4,4,4,4,4,
  6141. 4,4,4,4,4,4,4,4,4,4,
  6142. 4,4,4,4,4,4,4,4,4,4,
  6143. 4,4,4,4,4,4,4,
  6144. };
  6145. static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
  6146. 8,0,8,8,8,8,8,8,8,8,
  6147. 4,4,4,4,4,4,4,4,4,4,
  6148. 4,4,4,4,4,4,4,4,4,4,
  6149. 4,4,4,4,4,4,4,4,4,4,
  6150. 4,4,4,4,4,4,4,
  6151. };
  6152. #define BNX2_NUM_TESTS 6
  6153. static struct {
  6154. char string[ETH_GSTRING_LEN];
  6155. } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
  6156. { "register_test (offline)" },
  6157. { "memory_test (offline)" },
  6158. { "loopback_test (offline)" },
  6159. { "nvram_test (online)" },
  6160. { "interrupt_test (online)" },
  6161. { "link_test (online)" },
  6162. };
  6163. static int
  6164. bnx2_get_sset_count(struct net_device *dev, int sset)
  6165. {
  6166. switch (sset) {
  6167. case ETH_SS_TEST:
  6168. return BNX2_NUM_TESTS;
  6169. case ETH_SS_STATS:
  6170. return BNX2_NUM_STATS;
  6171. default:
  6172. return -EOPNOTSUPP;
  6173. }
  6174. }
  6175. static void
  6176. bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
  6177. {
  6178. struct bnx2 *bp = netdev_priv(dev);
  6179. bnx2_set_power_state(bp, PCI_D0);
  6180. memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
  6181. if (etest->flags & ETH_TEST_FL_OFFLINE) {
  6182. int i;
  6183. bnx2_netif_stop(bp, true);
  6184. bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
  6185. bnx2_free_skbs(bp);
  6186. if (bnx2_test_registers(bp) != 0) {
  6187. buf[0] = 1;
  6188. etest->flags |= ETH_TEST_FL_FAILED;
  6189. }
  6190. if (bnx2_test_memory(bp) != 0) {
  6191. buf[1] = 1;
  6192. etest->flags |= ETH_TEST_FL_FAILED;
  6193. }
  6194. if ((buf[2] = bnx2_test_loopback(bp)) != 0)
  6195. etest->flags |= ETH_TEST_FL_FAILED;
  6196. if (!netif_running(bp->dev))
  6197. bnx2_shutdown_chip(bp);
  6198. else {
  6199. bnx2_init_nic(bp, 1);
  6200. bnx2_netif_start(bp, true);
  6201. }
  6202. /* wait for link up */
  6203. for (i = 0; i < 7; i++) {
  6204. if (bp->link_up)
  6205. break;
  6206. msleep_interruptible(1000);
  6207. }
  6208. }
  6209. if (bnx2_test_nvram(bp) != 0) {
  6210. buf[3] = 1;
  6211. etest->flags |= ETH_TEST_FL_FAILED;
  6212. }
  6213. if (bnx2_test_intr(bp) != 0) {
  6214. buf[4] = 1;
  6215. etest->flags |= ETH_TEST_FL_FAILED;
  6216. }
  6217. if (bnx2_test_link(bp) != 0) {
  6218. buf[5] = 1;
  6219. etest->flags |= ETH_TEST_FL_FAILED;
  6220. }
  6221. if (!netif_running(bp->dev))
  6222. bnx2_set_power_state(bp, PCI_D3hot);
  6223. }
  6224. static void
  6225. bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
  6226. {
  6227. switch (stringset) {
  6228. case ETH_SS_STATS:
  6229. memcpy(buf, bnx2_stats_str_arr,
  6230. sizeof(bnx2_stats_str_arr));
  6231. break;
  6232. case ETH_SS_TEST:
  6233. memcpy(buf, bnx2_tests_str_arr,
  6234. sizeof(bnx2_tests_str_arr));
  6235. break;
  6236. }
  6237. }
  6238. static void
  6239. bnx2_get_ethtool_stats(struct net_device *dev,
  6240. struct ethtool_stats *stats, u64 *buf)
  6241. {
  6242. struct bnx2 *bp = netdev_priv(dev);
  6243. int i;
  6244. u32 *hw_stats = (u32 *) bp->stats_blk;
  6245. u32 *temp_stats = (u32 *) bp->temp_stats_blk;
  6246. u8 *stats_len_arr = NULL;
  6247. if (hw_stats == NULL) {
  6248. memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
  6249. return;
  6250. }
  6251. if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
  6252. (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
  6253. (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
  6254. (CHIP_ID(bp) == CHIP_ID_5708_A0))
  6255. stats_len_arr = bnx2_5706_stats_len_arr;
  6256. else
  6257. stats_len_arr = bnx2_5708_stats_len_arr;
  6258. for (i = 0; i < BNX2_NUM_STATS; i++) {
  6259. unsigned long offset;
  6260. if (stats_len_arr[i] == 0) {
  6261. /* skip this counter */
  6262. buf[i] = 0;
  6263. continue;
  6264. }
  6265. offset = bnx2_stats_offset_arr[i];
  6266. if (stats_len_arr[i] == 4) {
  6267. /* 4-byte counter */
  6268. buf[i] = (u64) *(hw_stats + offset) +
  6269. *(temp_stats + offset);
  6270. continue;
  6271. }
  6272. /* 8-byte counter */
  6273. buf[i] = (((u64) *(hw_stats + offset)) << 32) +
  6274. *(hw_stats + offset + 1) +
  6275. (((u64) *(temp_stats + offset)) << 32) +
  6276. *(temp_stats + offset + 1);
  6277. }
  6278. }
  6279. static int
  6280. bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
  6281. {
  6282. struct bnx2 *bp = netdev_priv(dev);
  6283. switch (state) {
  6284. case ETHTOOL_ID_ACTIVE:
  6285. bnx2_set_power_state(bp, PCI_D0);
  6286. bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
  6287. BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
  6288. return 1; /* cycle on/off once per second */
  6289. case ETHTOOL_ID_ON:
  6290. BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
  6291. BNX2_EMAC_LED_1000MB_OVERRIDE |
  6292. BNX2_EMAC_LED_100MB_OVERRIDE |
  6293. BNX2_EMAC_LED_10MB_OVERRIDE |
  6294. BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
  6295. BNX2_EMAC_LED_TRAFFIC);
  6296. break;
  6297. case ETHTOOL_ID_OFF:
  6298. BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
  6299. break;
  6300. case ETHTOOL_ID_INACTIVE:
  6301. BNX2_WR(bp, BNX2_EMAC_LED, 0);
  6302. BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
  6303. if (!netif_running(dev))
  6304. bnx2_set_power_state(bp, PCI_D3hot);
  6305. break;
  6306. }
  6307. return 0;
  6308. }
  6309. static netdev_features_t
  6310. bnx2_fix_features(struct net_device *dev, netdev_features_t features)
  6311. {
  6312. struct bnx2 *bp = netdev_priv(dev);
  6313. if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
  6314. features |= NETIF_F_HW_VLAN_RX;
  6315. return features;
  6316. }
  6317. static int
  6318. bnx2_set_features(struct net_device *dev, netdev_features_t features)
  6319. {
  6320. struct bnx2 *bp = netdev_priv(dev);
  6321. /* TSO with VLAN tag won't work with current firmware */
  6322. if (features & NETIF_F_HW_VLAN_TX)
  6323. dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
  6324. else
  6325. dev->vlan_features &= ~NETIF_F_ALL_TSO;
  6326. if ((!!(features & NETIF_F_HW_VLAN_RX) !=
  6327. !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
  6328. netif_running(dev)) {
  6329. bnx2_netif_stop(bp, false);
  6330. dev->features = features;
  6331. bnx2_set_rx_mode(dev);
  6332. bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
  6333. bnx2_netif_start(bp, false);
  6334. return 1;
  6335. }
  6336. return 0;
  6337. }
  6338. static void bnx2_get_channels(struct net_device *dev,
  6339. struct ethtool_channels *channels)
  6340. {
  6341. struct bnx2 *bp = netdev_priv(dev);
  6342. u32 max_rx_rings = 1;
  6343. u32 max_tx_rings = 1;
  6344. if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
  6345. max_rx_rings = RX_MAX_RINGS;
  6346. max_tx_rings = TX_MAX_RINGS;
  6347. }
  6348. channels->max_rx = max_rx_rings;
  6349. channels->max_tx = max_tx_rings;
  6350. channels->max_other = 0;
  6351. channels->max_combined = 0;
  6352. channels->rx_count = bp->num_rx_rings;
  6353. channels->tx_count = bp->num_tx_rings;
  6354. channels->other_count = 0;
  6355. channels->combined_count = 0;
  6356. }
  6357. static int bnx2_set_channels(struct net_device *dev,
  6358. struct ethtool_channels *channels)
  6359. {
  6360. struct bnx2 *bp = netdev_priv(dev);
  6361. u32 max_rx_rings = 1;
  6362. u32 max_tx_rings = 1;
  6363. int rc = 0;
  6364. if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
  6365. max_rx_rings = RX_MAX_RINGS;
  6366. max_tx_rings = TX_MAX_RINGS;
  6367. }
  6368. if (channels->rx_count > max_rx_rings ||
  6369. channels->tx_count > max_tx_rings)
  6370. return -EINVAL;
  6371. bp->num_req_rx_rings = channels->rx_count;
  6372. bp->num_req_tx_rings = channels->tx_count;
  6373. if (netif_running(dev))
  6374. rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
  6375. bp->tx_ring_size, true);
  6376. return rc;
  6377. }
  6378. static const struct ethtool_ops bnx2_ethtool_ops = {
  6379. .get_settings = bnx2_get_settings,
  6380. .set_settings = bnx2_set_settings,
  6381. .get_drvinfo = bnx2_get_drvinfo,
  6382. .get_regs_len = bnx2_get_regs_len,
  6383. .get_regs = bnx2_get_regs,
  6384. .get_wol = bnx2_get_wol,
  6385. .set_wol = bnx2_set_wol,
  6386. .nway_reset = bnx2_nway_reset,
  6387. .get_link = bnx2_get_link,
  6388. .get_eeprom_len = bnx2_get_eeprom_len,
  6389. .get_eeprom = bnx2_get_eeprom,
  6390. .set_eeprom = bnx2_set_eeprom,
  6391. .get_coalesce = bnx2_get_coalesce,
  6392. .set_coalesce = bnx2_set_coalesce,
  6393. .get_ringparam = bnx2_get_ringparam,
  6394. .set_ringparam = bnx2_set_ringparam,
  6395. .get_pauseparam = bnx2_get_pauseparam,
  6396. .set_pauseparam = bnx2_set_pauseparam,
  6397. .self_test = bnx2_self_test,
  6398. .get_strings = bnx2_get_strings,
  6399. .set_phys_id = bnx2_set_phys_id,
  6400. .get_ethtool_stats = bnx2_get_ethtool_stats,
  6401. .get_sset_count = bnx2_get_sset_count,
  6402. .get_channels = bnx2_get_channels,
  6403. .set_channels = bnx2_set_channels,
  6404. };
  6405. /* Called with rtnl_lock */
  6406. static int
  6407. bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  6408. {
  6409. struct mii_ioctl_data *data = if_mii(ifr);
  6410. struct bnx2 *bp = netdev_priv(dev);
  6411. int err;
  6412. switch(cmd) {
  6413. case SIOCGMIIPHY:
  6414. data->phy_id = bp->phy_addr;
  6415. /* fallthru */
  6416. case SIOCGMIIREG: {
  6417. u32 mii_regval;
  6418. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  6419. return -EOPNOTSUPP;
  6420. if (!netif_running(dev))
  6421. return -EAGAIN;
  6422. spin_lock_bh(&bp->phy_lock);
  6423. err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
  6424. spin_unlock_bh(&bp->phy_lock);
  6425. data->val_out = mii_regval;
  6426. return err;
  6427. }
  6428. case SIOCSMIIREG:
  6429. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  6430. return -EOPNOTSUPP;
  6431. if (!netif_running(dev))
  6432. return -EAGAIN;
  6433. spin_lock_bh(&bp->phy_lock);
  6434. err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
  6435. spin_unlock_bh(&bp->phy_lock);
  6436. return err;
  6437. default:
  6438. /* do nothing */
  6439. break;
  6440. }
  6441. return -EOPNOTSUPP;
  6442. }
  6443. /* Called with rtnl_lock */
  6444. static int
  6445. bnx2_change_mac_addr(struct net_device *dev, void *p)
  6446. {
  6447. struct sockaddr *addr = p;
  6448. struct bnx2 *bp = netdev_priv(dev);
  6449. if (!is_valid_ether_addr(addr->sa_data))
  6450. return -EADDRNOTAVAIL;
  6451. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  6452. if (netif_running(dev))
  6453. bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
  6454. return 0;
  6455. }
  6456. /* Called with rtnl_lock */
  6457. static int
  6458. bnx2_change_mtu(struct net_device *dev, int new_mtu)
  6459. {
  6460. struct bnx2 *bp = netdev_priv(dev);
  6461. if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
  6462. ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
  6463. return -EINVAL;
  6464. dev->mtu = new_mtu;
  6465. return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
  6466. false);
  6467. }
  6468. #ifdef CONFIG_NET_POLL_CONTROLLER
  6469. static void
  6470. poll_bnx2(struct net_device *dev)
  6471. {
  6472. struct bnx2 *bp = netdev_priv(dev);
  6473. int i;
  6474. for (i = 0; i < bp->irq_nvecs; i++) {
  6475. struct bnx2_irq *irq = &bp->irq_tbl[i];
  6476. disable_irq(irq->vector);
  6477. irq->handler(irq->vector, &bp->bnx2_napi[i]);
  6478. enable_irq(irq->vector);
  6479. }
  6480. }
  6481. #endif
  6482. static void
  6483. bnx2_get_5709_media(struct bnx2 *bp)
  6484. {
  6485. u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
  6486. u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
  6487. u32 strap;
  6488. if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
  6489. return;
  6490. else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
  6491. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  6492. return;
  6493. }
  6494. if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
  6495. strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
  6496. else
  6497. strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
  6498. if (bp->func == 0) {
  6499. switch (strap) {
  6500. case 0x4:
  6501. case 0x5:
  6502. case 0x6:
  6503. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  6504. return;
  6505. }
  6506. } else {
  6507. switch (strap) {
  6508. case 0x1:
  6509. case 0x2:
  6510. case 0x4:
  6511. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  6512. return;
  6513. }
  6514. }
  6515. }
  6516. static void
  6517. bnx2_get_pci_speed(struct bnx2 *bp)
  6518. {
  6519. u32 reg;
  6520. reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
  6521. if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
  6522. u32 clkreg;
  6523. bp->flags |= BNX2_FLAG_PCIX;
  6524. clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
  6525. clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
  6526. switch (clkreg) {
  6527. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
  6528. bp->bus_speed_mhz = 133;
  6529. break;
  6530. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
  6531. bp->bus_speed_mhz = 100;
  6532. break;
  6533. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
  6534. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
  6535. bp->bus_speed_mhz = 66;
  6536. break;
  6537. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
  6538. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
  6539. bp->bus_speed_mhz = 50;
  6540. break;
  6541. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
  6542. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
  6543. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
  6544. bp->bus_speed_mhz = 33;
  6545. break;
  6546. }
  6547. }
  6548. else {
  6549. if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
  6550. bp->bus_speed_mhz = 66;
  6551. else
  6552. bp->bus_speed_mhz = 33;
  6553. }
  6554. if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
  6555. bp->flags |= BNX2_FLAG_PCI_32BIT;
  6556. }
  6557. static void
  6558. bnx2_read_vpd_fw_ver(struct bnx2 *bp)
  6559. {
  6560. int rc, i, j;
  6561. u8 *data;
  6562. unsigned int block_end, rosize, len;
  6563. #define BNX2_VPD_NVRAM_OFFSET 0x300
  6564. #define BNX2_VPD_LEN 128
  6565. #define BNX2_MAX_VER_SLEN 30
  6566. data = kmalloc(256, GFP_KERNEL);
  6567. if (!data)
  6568. return;
  6569. rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
  6570. BNX2_VPD_LEN);
  6571. if (rc)
  6572. goto vpd_done;
  6573. for (i = 0; i < BNX2_VPD_LEN; i += 4) {
  6574. data[i] = data[i + BNX2_VPD_LEN + 3];
  6575. data[i + 1] = data[i + BNX2_VPD_LEN + 2];
  6576. data[i + 2] = data[i + BNX2_VPD_LEN + 1];
  6577. data[i + 3] = data[i + BNX2_VPD_LEN];
  6578. }
  6579. i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
  6580. if (i < 0)
  6581. goto vpd_done;
  6582. rosize = pci_vpd_lrdt_size(&data[i]);
  6583. i += PCI_VPD_LRDT_TAG_SIZE;
  6584. block_end = i + rosize;
  6585. if (block_end > BNX2_VPD_LEN)
  6586. goto vpd_done;
  6587. j = pci_vpd_find_info_keyword(data, i, rosize,
  6588. PCI_VPD_RO_KEYWORD_MFR_ID);
  6589. if (j < 0)
  6590. goto vpd_done;
  6591. len = pci_vpd_info_field_size(&data[j]);
  6592. j += PCI_VPD_INFO_FLD_HDR_SIZE;
  6593. if (j + len > block_end || len != 4 ||
  6594. memcmp(&data[j], "1028", 4))
  6595. goto vpd_done;
  6596. j = pci_vpd_find_info_keyword(data, i, rosize,
  6597. PCI_VPD_RO_KEYWORD_VENDOR0);
  6598. if (j < 0)
  6599. goto vpd_done;
  6600. len = pci_vpd_info_field_size(&data[j]);
  6601. j += PCI_VPD_INFO_FLD_HDR_SIZE;
  6602. if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
  6603. goto vpd_done;
  6604. memcpy(bp->fw_version, &data[j], len);
  6605. bp->fw_version[len] = ' ';
  6606. vpd_done:
  6607. kfree(data);
  6608. }
  6609. static int
  6610. bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
  6611. {
  6612. struct bnx2 *bp;
  6613. int rc, i, j;
  6614. u32 reg;
  6615. u64 dma_mask, persist_dma_mask;
  6616. int err;
  6617. SET_NETDEV_DEV(dev, &pdev->dev);
  6618. bp = netdev_priv(dev);
  6619. bp->flags = 0;
  6620. bp->phy_flags = 0;
  6621. bp->temp_stats_blk =
  6622. kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
  6623. if (bp->temp_stats_blk == NULL) {
  6624. rc = -ENOMEM;
  6625. goto err_out;
  6626. }
  6627. /* enable device (incl. PCI PM wakeup), and bus-mastering */
  6628. rc = pci_enable_device(pdev);
  6629. if (rc) {
  6630. dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
  6631. goto err_out;
  6632. }
  6633. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  6634. dev_err(&pdev->dev,
  6635. "Cannot find PCI device base address, aborting\n");
  6636. rc = -ENODEV;
  6637. goto err_out_disable;
  6638. }
  6639. rc = pci_request_regions(pdev, DRV_MODULE_NAME);
  6640. if (rc) {
  6641. dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
  6642. goto err_out_disable;
  6643. }
  6644. pci_set_master(pdev);
  6645. bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
  6646. if (bp->pm_cap == 0) {
  6647. dev_err(&pdev->dev,
  6648. "Cannot find power management capability, aborting\n");
  6649. rc = -EIO;
  6650. goto err_out_release;
  6651. }
  6652. bp->dev = dev;
  6653. bp->pdev = pdev;
  6654. spin_lock_init(&bp->phy_lock);
  6655. spin_lock_init(&bp->indirect_lock);
  6656. #ifdef BCM_CNIC
  6657. mutex_init(&bp->cnic_lock);
  6658. #endif
  6659. INIT_WORK(&bp->reset_task, bnx2_reset_task);
  6660. bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
  6661. TX_MAX_TSS_RINGS + 1));
  6662. if (!bp->regview) {
  6663. dev_err(&pdev->dev, "Cannot map register space, aborting\n");
  6664. rc = -ENOMEM;
  6665. goto err_out_release;
  6666. }
  6667. bnx2_set_power_state(bp, PCI_D0);
  6668. /* Configure byte swap and enable write to the reg_window registers.
  6669. * Rely on CPU to do target byte swapping on big endian systems
  6670. * The chip's target access swapping will not swap all accesses
  6671. */
  6672. BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
  6673. BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
  6674. BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
  6675. bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
  6676. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  6677. if (!pci_is_pcie(pdev)) {
  6678. dev_err(&pdev->dev, "Not PCIE, aborting\n");
  6679. rc = -EIO;
  6680. goto err_out_unmap;
  6681. }
  6682. bp->flags |= BNX2_FLAG_PCIE;
  6683. if (CHIP_REV(bp) == CHIP_REV_Ax)
  6684. bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
  6685. /* AER (Advanced Error Reporting) hooks */
  6686. err = pci_enable_pcie_error_reporting(pdev);
  6687. if (!err)
  6688. bp->flags |= BNX2_FLAG_AER_ENABLED;
  6689. } else {
  6690. bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
  6691. if (bp->pcix_cap == 0) {
  6692. dev_err(&pdev->dev,
  6693. "Cannot find PCIX capability, aborting\n");
  6694. rc = -EIO;
  6695. goto err_out_unmap;
  6696. }
  6697. bp->flags |= BNX2_FLAG_BROKEN_STATS;
  6698. }
  6699. if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
  6700. if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
  6701. bp->flags |= BNX2_FLAG_MSIX_CAP;
  6702. }
  6703. if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
  6704. if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
  6705. bp->flags |= BNX2_FLAG_MSI_CAP;
  6706. }
  6707. /* 5708 cannot support DMA addresses > 40-bit. */
  6708. if (CHIP_NUM(bp) == CHIP_NUM_5708)
  6709. persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
  6710. else
  6711. persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
  6712. /* Configure DMA attributes. */
  6713. if (pci_set_dma_mask(pdev, dma_mask) == 0) {
  6714. dev->features |= NETIF_F_HIGHDMA;
  6715. rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
  6716. if (rc) {
  6717. dev_err(&pdev->dev,
  6718. "pci_set_consistent_dma_mask failed, aborting\n");
  6719. goto err_out_unmap;
  6720. }
  6721. } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
  6722. dev_err(&pdev->dev, "System does not support DMA, aborting\n");
  6723. goto err_out_unmap;
  6724. }
  6725. if (!(bp->flags & BNX2_FLAG_PCIE))
  6726. bnx2_get_pci_speed(bp);
  6727. /* 5706A0 may falsely detect SERR and PERR. */
  6728. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  6729. reg = BNX2_RD(bp, PCI_COMMAND);
  6730. reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
  6731. BNX2_WR(bp, PCI_COMMAND, reg);
  6732. }
  6733. else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
  6734. !(bp->flags & BNX2_FLAG_PCIX)) {
  6735. dev_err(&pdev->dev,
  6736. "5706 A1 can only be used in a PCIX bus, aborting\n");
  6737. goto err_out_unmap;
  6738. }
  6739. bnx2_init_nvram(bp);
  6740. reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
  6741. if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
  6742. bp->func = 1;
  6743. if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
  6744. BNX2_SHM_HDR_SIGNATURE_SIG) {
  6745. u32 off = bp->func << 2;
  6746. bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
  6747. } else
  6748. bp->shmem_base = HOST_VIEW_SHMEM_BASE;
  6749. /* Get the permanent MAC address. First we need to make sure the
  6750. * firmware is actually running.
  6751. */
  6752. reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
  6753. if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
  6754. BNX2_DEV_INFO_SIGNATURE_MAGIC) {
  6755. dev_err(&pdev->dev, "Firmware not running, aborting\n");
  6756. rc = -ENODEV;
  6757. goto err_out_unmap;
  6758. }
  6759. bnx2_read_vpd_fw_ver(bp);
  6760. j = strlen(bp->fw_version);
  6761. reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
  6762. for (i = 0; i < 3 && j < 24; i++) {
  6763. u8 num, k, skip0;
  6764. if (i == 0) {
  6765. bp->fw_version[j++] = 'b';
  6766. bp->fw_version[j++] = 'c';
  6767. bp->fw_version[j++] = ' ';
  6768. }
  6769. num = (u8) (reg >> (24 - (i * 8)));
  6770. for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
  6771. if (num >= k || !skip0 || k == 1) {
  6772. bp->fw_version[j++] = (num / k) + '0';
  6773. skip0 = 0;
  6774. }
  6775. }
  6776. if (i != 2)
  6777. bp->fw_version[j++] = '.';
  6778. }
  6779. reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
  6780. if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
  6781. bp->wol = 1;
  6782. if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
  6783. bp->flags |= BNX2_FLAG_ASF_ENABLE;
  6784. for (i = 0; i < 30; i++) {
  6785. reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
  6786. if (reg & BNX2_CONDITION_MFW_RUN_MASK)
  6787. break;
  6788. msleep(10);
  6789. }
  6790. }
  6791. reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
  6792. reg &= BNX2_CONDITION_MFW_RUN_MASK;
  6793. if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
  6794. reg != BNX2_CONDITION_MFW_RUN_NONE) {
  6795. u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
  6796. if (j < 32)
  6797. bp->fw_version[j++] = ' ';
  6798. for (i = 0; i < 3 && j < 28; i++) {
  6799. reg = bnx2_reg_rd_ind(bp, addr + i * 4);
  6800. reg = be32_to_cpu(reg);
  6801. memcpy(&bp->fw_version[j], &reg, 4);
  6802. j += 4;
  6803. }
  6804. }
  6805. reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
  6806. bp->mac_addr[0] = (u8) (reg >> 8);
  6807. bp->mac_addr[1] = (u8) reg;
  6808. reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
  6809. bp->mac_addr[2] = (u8) (reg >> 24);
  6810. bp->mac_addr[3] = (u8) (reg >> 16);
  6811. bp->mac_addr[4] = (u8) (reg >> 8);
  6812. bp->mac_addr[5] = (u8) reg;
  6813. bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
  6814. bnx2_set_rx_ring_size(bp, 255);
  6815. bp->tx_quick_cons_trip_int = 2;
  6816. bp->tx_quick_cons_trip = 20;
  6817. bp->tx_ticks_int = 18;
  6818. bp->tx_ticks = 80;
  6819. bp->rx_quick_cons_trip_int = 2;
  6820. bp->rx_quick_cons_trip = 12;
  6821. bp->rx_ticks_int = 18;
  6822. bp->rx_ticks = 18;
  6823. bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
  6824. bp->current_interval = BNX2_TIMER_INTERVAL;
  6825. bp->phy_addr = 1;
  6826. /* Disable WOL support if we are running on a SERDES chip. */
  6827. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  6828. bnx2_get_5709_media(bp);
  6829. else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
  6830. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  6831. bp->phy_port = PORT_TP;
  6832. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  6833. bp->phy_port = PORT_FIBRE;
  6834. reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
  6835. if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
  6836. bp->flags |= BNX2_FLAG_NO_WOL;
  6837. bp->wol = 0;
  6838. }
  6839. if (CHIP_NUM(bp) == CHIP_NUM_5706) {
  6840. /* Don't do parallel detect on this board because of
  6841. * some board problems. The link will not go down
  6842. * if we do parallel detect.
  6843. */
  6844. if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
  6845. pdev->subsystem_device == 0x310c)
  6846. bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
  6847. } else {
  6848. bp->phy_addr = 2;
  6849. if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
  6850. bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
  6851. }
  6852. } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
  6853. CHIP_NUM(bp) == CHIP_NUM_5708)
  6854. bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
  6855. else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
  6856. (CHIP_REV(bp) == CHIP_REV_Ax ||
  6857. CHIP_REV(bp) == CHIP_REV_Bx))
  6858. bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
  6859. bnx2_init_fw_cap(bp);
  6860. if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
  6861. (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
  6862. (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
  6863. !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
  6864. bp->flags |= BNX2_FLAG_NO_WOL;
  6865. bp->wol = 0;
  6866. }
  6867. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  6868. bp->tx_quick_cons_trip_int =
  6869. bp->tx_quick_cons_trip;
  6870. bp->tx_ticks_int = bp->tx_ticks;
  6871. bp->rx_quick_cons_trip_int =
  6872. bp->rx_quick_cons_trip;
  6873. bp->rx_ticks_int = bp->rx_ticks;
  6874. bp->comp_prod_trip_int = bp->comp_prod_trip;
  6875. bp->com_ticks_int = bp->com_ticks;
  6876. bp->cmd_ticks_int = bp->cmd_ticks;
  6877. }
  6878. /* Disable MSI on 5706 if AMD 8132 bridge is found.
  6879. *
  6880. * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
  6881. * with byte enables disabled on the unused 32-bit word. This is legal
  6882. * but causes problems on the AMD 8132 which will eventually stop
  6883. * responding after a while.
  6884. *
  6885. * AMD believes this incompatibility is unique to the 5706, and
  6886. * prefers to locally disable MSI rather than globally disabling it.
  6887. */
  6888. if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
  6889. struct pci_dev *amd_8132 = NULL;
  6890. while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
  6891. PCI_DEVICE_ID_AMD_8132_BRIDGE,
  6892. amd_8132))) {
  6893. if (amd_8132->revision >= 0x10 &&
  6894. amd_8132->revision <= 0x13) {
  6895. disable_msi = 1;
  6896. pci_dev_put(amd_8132);
  6897. break;
  6898. }
  6899. }
  6900. }
  6901. bnx2_set_default_link(bp);
  6902. bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
  6903. init_timer(&bp->timer);
  6904. bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
  6905. bp->timer.data = (unsigned long) bp;
  6906. bp->timer.function = bnx2_timer;
  6907. #ifdef BCM_CNIC
  6908. if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
  6909. bp->cnic_eth_dev.max_iscsi_conn =
  6910. (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
  6911. BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
  6912. #endif
  6913. pci_save_state(pdev);
  6914. return 0;
  6915. err_out_unmap:
  6916. if (bp->flags & BNX2_FLAG_AER_ENABLED) {
  6917. pci_disable_pcie_error_reporting(pdev);
  6918. bp->flags &= ~BNX2_FLAG_AER_ENABLED;
  6919. }
  6920. pci_iounmap(pdev, bp->regview);
  6921. bp->regview = NULL;
  6922. err_out_release:
  6923. pci_release_regions(pdev);
  6924. err_out_disable:
  6925. pci_disable_device(pdev);
  6926. pci_set_drvdata(pdev, NULL);
  6927. err_out:
  6928. return rc;
  6929. }
  6930. static char *
  6931. bnx2_bus_string(struct bnx2 *bp, char *str)
  6932. {
  6933. char *s = str;
  6934. if (bp->flags & BNX2_FLAG_PCIE) {
  6935. s += sprintf(s, "PCI Express");
  6936. } else {
  6937. s += sprintf(s, "PCI");
  6938. if (bp->flags & BNX2_FLAG_PCIX)
  6939. s += sprintf(s, "-X");
  6940. if (bp->flags & BNX2_FLAG_PCI_32BIT)
  6941. s += sprintf(s, " 32-bit");
  6942. else
  6943. s += sprintf(s, " 64-bit");
  6944. s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
  6945. }
  6946. return str;
  6947. }
  6948. static void
  6949. bnx2_del_napi(struct bnx2 *bp)
  6950. {
  6951. int i;
  6952. for (i = 0; i < bp->irq_nvecs; i++)
  6953. netif_napi_del(&bp->bnx2_napi[i].napi);
  6954. }
  6955. static void
  6956. bnx2_init_napi(struct bnx2 *bp)
  6957. {
  6958. int i;
  6959. for (i = 0; i < bp->irq_nvecs; i++) {
  6960. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  6961. int (*poll)(struct napi_struct *, int);
  6962. if (i == 0)
  6963. poll = bnx2_poll;
  6964. else
  6965. poll = bnx2_poll_msix;
  6966. netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
  6967. bnapi->bp = bp;
  6968. }
  6969. }
  6970. static const struct net_device_ops bnx2_netdev_ops = {
  6971. .ndo_open = bnx2_open,
  6972. .ndo_start_xmit = bnx2_start_xmit,
  6973. .ndo_stop = bnx2_close,
  6974. .ndo_get_stats64 = bnx2_get_stats64,
  6975. .ndo_set_rx_mode = bnx2_set_rx_mode,
  6976. .ndo_do_ioctl = bnx2_ioctl,
  6977. .ndo_validate_addr = eth_validate_addr,
  6978. .ndo_set_mac_address = bnx2_change_mac_addr,
  6979. .ndo_change_mtu = bnx2_change_mtu,
  6980. .ndo_fix_features = bnx2_fix_features,
  6981. .ndo_set_features = bnx2_set_features,
  6982. .ndo_tx_timeout = bnx2_tx_timeout,
  6983. #ifdef CONFIG_NET_POLL_CONTROLLER
  6984. .ndo_poll_controller = poll_bnx2,
  6985. #endif
  6986. };
  6987. static int
  6988. bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  6989. {
  6990. static int version_printed = 0;
  6991. struct net_device *dev;
  6992. struct bnx2 *bp;
  6993. int rc;
  6994. char str[40];
  6995. if (version_printed++ == 0)
  6996. pr_info("%s", version);
  6997. /* dev zeroed in init_etherdev */
  6998. dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
  6999. if (!dev)
  7000. return -ENOMEM;
  7001. rc = bnx2_init_board(pdev, dev);
  7002. if (rc < 0)
  7003. goto err_free;
  7004. dev->netdev_ops = &bnx2_netdev_ops;
  7005. dev->watchdog_timeo = TX_TIMEOUT;
  7006. dev->ethtool_ops = &bnx2_ethtool_ops;
  7007. bp = netdev_priv(dev);
  7008. pci_set_drvdata(pdev, dev);
  7009. memcpy(dev->dev_addr, bp->mac_addr, 6);
  7010. memcpy(dev->perm_addr, bp->mac_addr, 6);
  7011. dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
  7012. NETIF_F_TSO | NETIF_F_TSO_ECN |
  7013. NETIF_F_RXHASH | NETIF_F_RXCSUM;
  7014. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  7015. dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
  7016. dev->vlan_features = dev->hw_features;
  7017. dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  7018. dev->features |= dev->hw_features;
  7019. dev->priv_flags |= IFF_UNICAST_FLT;
  7020. if ((rc = register_netdev(dev))) {
  7021. dev_err(&pdev->dev, "Cannot register net device\n");
  7022. goto error;
  7023. }
  7024. netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
  7025. "node addr %pM\n", board_info[ent->driver_data].name,
  7026. ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
  7027. ((CHIP_ID(bp) & 0x0ff0) >> 4),
  7028. bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
  7029. pdev->irq, dev->dev_addr);
  7030. return 0;
  7031. error:
  7032. pci_iounmap(pdev, bp->regview);
  7033. pci_release_regions(pdev);
  7034. pci_disable_device(pdev);
  7035. pci_set_drvdata(pdev, NULL);
  7036. err_free:
  7037. free_netdev(dev);
  7038. return rc;
  7039. }
  7040. static void
  7041. bnx2_remove_one(struct pci_dev *pdev)
  7042. {
  7043. struct net_device *dev = pci_get_drvdata(pdev);
  7044. struct bnx2 *bp = netdev_priv(dev);
  7045. unregister_netdev(dev);
  7046. del_timer_sync(&bp->timer);
  7047. cancel_work_sync(&bp->reset_task);
  7048. pci_iounmap(bp->pdev, bp->regview);
  7049. kfree(bp->temp_stats_blk);
  7050. if (bp->flags & BNX2_FLAG_AER_ENABLED) {
  7051. pci_disable_pcie_error_reporting(pdev);
  7052. bp->flags &= ~BNX2_FLAG_AER_ENABLED;
  7053. }
  7054. bnx2_release_firmware(bp);
  7055. free_netdev(dev);
  7056. pci_release_regions(pdev);
  7057. pci_disable_device(pdev);
  7058. pci_set_drvdata(pdev, NULL);
  7059. }
  7060. static int
  7061. bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
  7062. {
  7063. struct net_device *dev = pci_get_drvdata(pdev);
  7064. struct bnx2 *bp = netdev_priv(dev);
  7065. /* PCI register 4 needs to be saved whether netif_running() or not.
  7066. * MSI address and data need to be saved if using MSI and
  7067. * netif_running().
  7068. */
  7069. pci_save_state(pdev);
  7070. if (!netif_running(dev))
  7071. return 0;
  7072. cancel_work_sync(&bp->reset_task);
  7073. bnx2_netif_stop(bp, true);
  7074. netif_device_detach(dev);
  7075. del_timer_sync(&bp->timer);
  7076. bnx2_shutdown_chip(bp);
  7077. bnx2_free_skbs(bp);
  7078. bnx2_set_power_state(bp, pci_choose_state(pdev, state));
  7079. return 0;
  7080. }
  7081. static int
  7082. bnx2_resume(struct pci_dev *pdev)
  7083. {
  7084. struct net_device *dev = pci_get_drvdata(pdev);
  7085. struct bnx2 *bp = netdev_priv(dev);
  7086. pci_restore_state(pdev);
  7087. if (!netif_running(dev))
  7088. return 0;
  7089. bnx2_set_power_state(bp, PCI_D0);
  7090. netif_device_attach(dev);
  7091. bnx2_init_nic(bp, 1);
  7092. bnx2_netif_start(bp, true);
  7093. return 0;
  7094. }
  7095. /**
  7096. * bnx2_io_error_detected - called when PCI error is detected
  7097. * @pdev: Pointer to PCI device
  7098. * @state: The current pci connection state
  7099. *
  7100. * This function is called after a PCI bus error affecting
  7101. * this device has been detected.
  7102. */
  7103. static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
  7104. pci_channel_state_t state)
  7105. {
  7106. struct net_device *dev = pci_get_drvdata(pdev);
  7107. struct bnx2 *bp = netdev_priv(dev);
  7108. rtnl_lock();
  7109. netif_device_detach(dev);
  7110. if (state == pci_channel_io_perm_failure) {
  7111. rtnl_unlock();
  7112. return PCI_ERS_RESULT_DISCONNECT;
  7113. }
  7114. if (netif_running(dev)) {
  7115. bnx2_netif_stop(bp, true);
  7116. del_timer_sync(&bp->timer);
  7117. bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
  7118. }
  7119. pci_disable_device(pdev);
  7120. rtnl_unlock();
  7121. /* Request a slot slot reset. */
  7122. return PCI_ERS_RESULT_NEED_RESET;
  7123. }
  7124. /**
  7125. * bnx2_io_slot_reset - called after the pci bus has been reset.
  7126. * @pdev: Pointer to PCI device
  7127. *
  7128. * Restart the card from scratch, as if from a cold-boot.
  7129. */
  7130. static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
  7131. {
  7132. struct net_device *dev = pci_get_drvdata(pdev);
  7133. struct bnx2 *bp = netdev_priv(dev);
  7134. pci_ers_result_t result;
  7135. int err;
  7136. rtnl_lock();
  7137. if (pci_enable_device(pdev)) {
  7138. dev_err(&pdev->dev,
  7139. "Cannot re-enable PCI device after reset\n");
  7140. result = PCI_ERS_RESULT_DISCONNECT;
  7141. } else {
  7142. pci_set_master(pdev);
  7143. pci_restore_state(pdev);
  7144. pci_save_state(pdev);
  7145. if (netif_running(dev)) {
  7146. bnx2_set_power_state(bp, PCI_D0);
  7147. bnx2_init_nic(bp, 1);
  7148. }
  7149. result = PCI_ERS_RESULT_RECOVERED;
  7150. }
  7151. rtnl_unlock();
  7152. if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
  7153. return result;
  7154. err = pci_cleanup_aer_uncorrect_error_status(pdev);
  7155. if (err) {
  7156. dev_err(&pdev->dev,
  7157. "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
  7158. err); /* non-fatal, continue */
  7159. }
  7160. return result;
  7161. }
  7162. /**
  7163. * bnx2_io_resume - called when traffic can start flowing again.
  7164. * @pdev: Pointer to PCI device
  7165. *
  7166. * This callback is called when the error recovery driver tells us that
  7167. * its OK to resume normal operation.
  7168. */
  7169. static void bnx2_io_resume(struct pci_dev *pdev)
  7170. {
  7171. struct net_device *dev = pci_get_drvdata(pdev);
  7172. struct bnx2 *bp = netdev_priv(dev);
  7173. rtnl_lock();
  7174. if (netif_running(dev))
  7175. bnx2_netif_start(bp, true);
  7176. netif_device_attach(dev);
  7177. rtnl_unlock();
  7178. }
  7179. static const struct pci_error_handlers bnx2_err_handler = {
  7180. .error_detected = bnx2_io_error_detected,
  7181. .slot_reset = bnx2_io_slot_reset,
  7182. .resume = bnx2_io_resume,
  7183. };
  7184. static struct pci_driver bnx2_pci_driver = {
  7185. .name = DRV_MODULE_NAME,
  7186. .id_table = bnx2_pci_tbl,
  7187. .probe = bnx2_init_one,
  7188. .remove = bnx2_remove_one,
  7189. .suspend = bnx2_suspend,
  7190. .resume = bnx2_resume,
  7191. .err_handler = &bnx2_err_handler,
  7192. };
  7193. static int __init bnx2_init(void)
  7194. {
  7195. return pci_register_driver(&bnx2_pci_driver);
  7196. }
  7197. static void __exit bnx2_cleanup(void)
  7198. {
  7199. pci_unregister_driver(&bnx2_pci_driver);
  7200. }
  7201. module_init(bnx2_init);
  7202. module_exit(bnx2_cleanup);