bnx2x_main.c 279 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483
  1. /* bnx2x_main.c: Broadcom Everest network driver.
  2. *
  3. * Copyright (c) 2007-2010 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. *
  9. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  10. * Written by: Eliezer Tamir
  11. * Based on code from Michael Chan's bnx2 driver
  12. * UDP CSUM errata workaround by Arik Gendelman
  13. * Slowpath and fastpath rework by Vladislav Zolotarov
  14. * Statistics and Link management by Yitchak Gertner
  15. *
  16. */
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/kernel.h>
  20. #include <linux/device.h> /* for dev_info() */
  21. #include <linux/timer.h>
  22. #include <linux/errno.h>
  23. #include <linux/ioport.h>
  24. #include <linux/slab.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/pci.h>
  27. #include <linux/init.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/dma-mapping.h>
  32. #include <linux/bitops.h>
  33. #include <linux/irq.h>
  34. #include <linux/delay.h>
  35. #include <asm/byteorder.h>
  36. #include <linux/time.h>
  37. #include <linux/ethtool.h>
  38. #include <linux/mii.h>
  39. #include <linux/if_vlan.h>
  40. #include <net/ip.h>
  41. #include <net/tcp.h>
  42. #include <net/checksum.h>
  43. #include <net/ip6_checksum.h>
  44. #include <linux/workqueue.h>
  45. #include <linux/crc32.h>
  46. #include <linux/crc32c.h>
  47. #include <linux/prefetch.h>
  48. #include <linux/zlib.h>
  49. #include <linux/io.h>
  50. #include <linux/stringify.h>
  51. #define BNX2X_MAIN
  52. #include "bnx2x.h"
  53. #include "bnx2x_init.h"
  54. #include "bnx2x_init_ops.h"
  55. #include "bnx2x_cmn.h"
  56. #include "bnx2x_dcb.h"
  57. #include <linux/firmware.h>
  58. #include "bnx2x_fw_file_hdr.h"
  59. /* FW files */
  60. #define FW_FILE_VERSION \
  61. __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
  62. __stringify(BCM_5710_FW_MINOR_VERSION) "." \
  63. __stringify(BCM_5710_FW_REVISION_VERSION) "." \
  64. __stringify(BCM_5710_FW_ENGINEERING_VERSION)
  65. #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
  66. #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
  67. #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
  68. /* Time in jiffies before concluding the transmitter is hung */
  69. #define TX_TIMEOUT (5*HZ)
  70. static char version[] __devinitdata =
  71. "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
  72. DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  73. MODULE_AUTHOR("Eliezer Tamir");
  74. MODULE_DESCRIPTION("Broadcom NetXtreme II "
  75. "BCM57710/57711/57711E/57712/57712E Driver");
  76. MODULE_LICENSE("GPL");
  77. MODULE_VERSION(DRV_MODULE_VERSION);
  78. MODULE_FIRMWARE(FW_FILE_NAME_E1);
  79. MODULE_FIRMWARE(FW_FILE_NAME_E1H);
  80. MODULE_FIRMWARE(FW_FILE_NAME_E2);
  81. static int multi_mode = 1;
  82. module_param(multi_mode, int, 0);
  83. MODULE_PARM_DESC(multi_mode, " Multi queue mode "
  84. "(0 Disable; 1 Enable (default))");
  85. int num_queues;
  86. module_param(num_queues, int, 0);
  87. MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
  88. " (default is as a number of CPUs)");
  89. static int disable_tpa;
  90. module_param(disable_tpa, int, 0);
  91. MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
  92. static int int_mode;
  93. module_param(int_mode, int, 0);
  94. MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
  95. "(1 INT#x; 2 MSI)");
  96. static int dropless_fc;
  97. module_param(dropless_fc, int, 0);
  98. MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
  99. static int poll;
  100. module_param(poll, int, 0);
  101. MODULE_PARM_DESC(poll, " Use polling (for debug)");
  102. static int mrrs = -1;
  103. module_param(mrrs, int, 0);
  104. MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
  105. static int debug;
  106. module_param(debug, int, 0);
  107. MODULE_PARM_DESC(debug, " Default debug msglevel");
  108. static struct workqueue_struct *bnx2x_wq;
  109. #ifdef BCM_CNIC
  110. static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
  111. #endif
  112. enum bnx2x_board_type {
  113. BCM57710 = 0,
  114. BCM57711 = 1,
  115. BCM57711E = 2,
  116. BCM57712 = 3,
  117. BCM57712E = 4
  118. };
  119. /* indexed by board_type, above */
  120. static struct {
  121. char *name;
  122. } board_info[] __devinitdata = {
  123. { "Broadcom NetXtreme II BCM57710 XGb" },
  124. { "Broadcom NetXtreme II BCM57711 XGb" },
  125. { "Broadcom NetXtreme II BCM57711E XGb" },
  126. { "Broadcom NetXtreme II BCM57712 XGb" },
  127. { "Broadcom NetXtreme II BCM57712E XGb" }
  128. };
  129. static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
  130. { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
  131. { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
  132. { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
  133. { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
  134. { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
  135. { 0 }
  136. };
  137. MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
  138. /****************************************************************************
  139. * General service functions
  140. ****************************************************************************/
  141. static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
  142. u32 addr, dma_addr_t mapping)
  143. {
  144. REG_WR(bp, addr, U64_LO(mapping));
  145. REG_WR(bp, addr + 4, U64_HI(mapping));
  146. }
  147. static inline void __storm_memset_fill(struct bnx2x *bp,
  148. u32 addr, size_t size, u32 val)
  149. {
  150. int i;
  151. for (i = 0; i < size/4; i++)
  152. REG_WR(bp, addr + (i * 4), val);
  153. }
  154. static inline void storm_memset_ustats_zero(struct bnx2x *bp,
  155. u8 port, u16 stat_id)
  156. {
  157. size_t size = sizeof(struct ustorm_per_client_stats);
  158. u32 addr = BAR_USTRORM_INTMEM +
  159. USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
  160. __storm_memset_fill(bp, addr, size, 0);
  161. }
  162. static inline void storm_memset_tstats_zero(struct bnx2x *bp,
  163. u8 port, u16 stat_id)
  164. {
  165. size_t size = sizeof(struct tstorm_per_client_stats);
  166. u32 addr = BAR_TSTRORM_INTMEM +
  167. TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
  168. __storm_memset_fill(bp, addr, size, 0);
  169. }
  170. static inline void storm_memset_xstats_zero(struct bnx2x *bp,
  171. u8 port, u16 stat_id)
  172. {
  173. size_t size = sizeof(struct xstorm_per_client_stats);
  174. u32 addr = BAR_XSTRORM_INTMEM +
  175. XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
  176. __storm_memset_fill(bp, addr, size, 0);
  177. }
  178. static inline void storm_memset_spq_addr(struct bnx2x *bp,
  179. dma_addr_t mapping, u16 abs_fid)
  180. {
  181. u32 addr = XSEM_REG_FAST_MEMORY +
  182. XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
  183. __storm_memset_dma_mapping(bp, addr, mapping);
  184. }
  185. static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
  186. {
  187. REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
  188. }
  189. static inline void storm_memset_func_cfg(struct bnx2x *bp,
  190. struct tstorm_eth_function_common_config *tcfg,
  191. u16 abs_fid)
  192. {
  193. size_t size = sizeof(struct tstorm_eth_function_common_config);
  194. u32 addr = BAR_TSTRORM_INTMEM +
  195. TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
  196. __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
  197. }
  198. static inline void storm_memset_xstats_flags(struct bnx2x *bp,
  199. struct stats_indication_flags *flags,
  200. u16 abs_fid)
  201. {
  202. size_t size = sizeof(struct stats_indication_flags);
  203. u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
  204. __storm_memset_struct(bp, addr, size, (u32 *)flags);
  205. }
  206. static inline void storm_memset_tstats_flags(struct bnx2x *bp,
  207. struct stats_indication_flags *flags,
  208. u16 abs_fid)
  209. {
  210. size_t size = sizeof(struct stats_indication_flags);
  211. u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
  212. __storm_memset_struct(bp, addr, size, (u32 *)flags);
  213. }
  214. static inline void storm_memset_ustats_flags(struct bnx2x *bp,
  215. struct stats_indication_flags *flags,
  216. u16 abs_fid)
  217. {
  218. size_t size = sizeof(struct stats_indication_flags);
  219. u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
  220. __storm_memset_struct(bp, addr, size, (u32 *)flags);
  221. }
  222. static inline void storm_memset_cstats_flags(struct bnx2x *bp,
  223. struct stats_indication_flags *flags,
  224. u16 abs_fid)
  225. {
  226. size_t size = sizeof(struct stats_indication_flags);
  227. u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
  228. __storm_memset_struct(bp, addr, size, (u32 *)flags);
  229. }
  230. static inline void storm_memset_xstats_addr(struct bnx2x *bp,
  231. dma_addr_t mapping, u16 abs_fid)
  232. {
  233. u32 addr = BAR_XSTRORM_INTMEM +
  234. XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
  235. __storm_memset_dma_mapping(bp, addr, mapping);
  236. }
  237. static inline void storm_memset_tstats_addr(struct bnx2x *bp,
  238. dma_addr_t mapping, u16 abs_fid)
  239. {
  240. u32 addr = BAR_TSTRORM_INTMEM +
  241. TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
  242. __storm_memset_dma_mapping(bp, addr, mapping);
  243. }
  244. static inline void storm_memset_ustats_addr(struct bnx2x *bp,
  245. dma_addr_t mapping, u16 abs_fid)
  246. {
  247. u32 addr = BAR_USTRORM_INTMEM +
  248. USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
  249. __storm_memset_dma_mapping(bp, addr, mapping);
  250. }
  251. static inline void storm_memset_cstats_addr(struct bnx2x *bp,
  252. dma_addr_t mapping, u16 abs_fid)
  253. {
  254. u32 addr = BAR_CSTRORM_INTMEM +
  255. CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
  256. __storm_memset_dma_mapping(bp, addr, mapping);
  257. }
  258. static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
  259. u16 pf_id)
  260. {
  261. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
  262. pf_id);
  263. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
  264. pf_id);
  265. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
  266. pf_id);
  267. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
  268. pf_id);
  269. }
  270. static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
  271. u8 enable)
  272. {
  273. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
  274. enable);
  275. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
  276. enable);
  277. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
  278. enable);
  279. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
  280. enable);
  281. }
  282. static inline void storm_memset_eq_data(struct bnx2x *bp,
  283. struct event_ring_data *eq_data,
  284. u16 pfid)
  285. {
  286. size_t size = sizeof(struct event_ring_data);
  287. u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
  288. __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
  289. }
  290. static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
  291. u16 pfid)
  292. {
  293. u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
  294. REG_WR16(bp, addr, eq_prod);
  295. }
  296. static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
  297. u16 fw_sb_id, u8 sb_index,
  298. u8 ticks)
  299. {
  300. int index_offset = CHIP_IS_E2(bp) ?
  301. offsetof(struct hc_status_block_data_e2, index_data) :
  302. offsetof(struct hc_status_block_data_e1x, index_data);
  303. u32 addr = BAR_CSTRORM_INTMEM +
  304. CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
  305. index_offset +
  306. sizeof(struct hc_index_data)*sb_index +
  307. offsetof(struct hc_index_data, timeout);
  308. REG_WR8(bp, addr, ticks);
  309. DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
  310. port, fw_sb_id, sb_index, ticks);
  311. }
  312. static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
  313. u16 fw_sb_id, u8 sb_index,
  314. u8 disable)
  315. {
  316. u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
  317. int index_offset = CHIP_IS_E2(bp) ?
  318. offsetof(struct hc_status_block_data_e2, index_data) :
  319. offsetof(struct hc_status_block_data_e1x, index_data);
  320. u32 addr = BAR_CSTRORM_INTMEM +
  321. CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
  322. index_offset +
  323. sizeof(struct hc_index_data)*sb_index +
  324. offsetof(struct hc_index_data, flags);
  325. u16 flags = REG_RD16(bp, addr);
  326. /* clear and set */
  327. flags &= ~HC_INDEX_DATA_HC_ENABLED;
  328. flags |= enable_flag;
  329. REG_WR16(bp, addr, flags);
  330. DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
  331. port, fw_sb_id, sb_index, disable);
  332. }
  333. /* used only at init
  334. * locking is done by mcp
  335. */
  336. static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
  337. {
  338. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
  339. pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
  340. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
  341. PCICFG_VENDOR_ID_OFFSET);
  342. }
  343. static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
  344. {
  345. u32 val;
  346. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
  347. pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
  348. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
  349. PCICFG_VENDOR_ID_OFFSET);
  350. return val;
  351. }
  352. #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
  353. #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
  354. #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
  355. #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
  356. #define DMAE_DP_DST_NONE "dst_addr [none]"
  357. static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
  358. int msglvl)
  359. {
  360. u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
  361. switch (dmae->opcode & DMAE_COMMAND_DST) {
  362. case DMAE_CMD_DST_PCI:
  363. if (src_type == DMAE_CMD_SRC_PCI)
  364. DP(msglvl, "DMAE: opcode 0x%08x\n"
  365. "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
  366. "comp_addr [%x:%08x], comp_val 0x%08x\n",
  367. dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
  368. dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
  369. dmae->comp_addr_hi, dmae->comp_addr_lo,
  370. dmae->comp_val);
  371. else
  372. DP(msglvl, "DMAE: opcode 0x%08x\n"
  373. "src [%08x], len [%d*4], dst [%x:%08x]\n"
  374. "comp_addr [%x:%08x], comp_val 0x%08x\n",
  375. dmae->opcode, dmae->src_addr_lo >> 2,
  376. dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
  377. dmae->comp_addr_hi, dmae->comp_addr_lo,
  378. dmae->comp_val);
  379. break;
  380. case DMAE_CMD_DST_GRC:
  381. if (src_type == DMAE_CMD_SRC_PCI)
  382. DP(msglvl, "DMAE: opcode 0x%08x\n"
  383. "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
  384. "comp_addr [%x:%08x], comp_val 0x%08x\n",
  385. dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
  386. dmae->len, dmae->dst_addr_lo >> 2,
  387. dmae->comp_addr_hi, dmae->comp_addr_lo,
  388. dmae->comp_val);
  389. else
  390. DP(msglvl, "DMAE: opcode 0x%08x\n"
  391. "src [%08x], len [%d*4], dst [%08x]\n"
  392. "comp_addr [%x:%08x], comp_val 0x%08x\n",
  393. dmae->opcode, dmae->src_addr_lo >> 2,
  394. dmae->len, dmae->dst_addr_lo >> 2,
  395. dmae->comp_addr_hi, dmae->comp_addr_lo,
  396. dmae->comp_val);
  397. break;
  398. default:
  399. if (src_type == DMAE_CMD_SRC_PCI)
  400. DP(msglvl, "DMAE: opcode 0x%08x\n"
  401. DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
  402. "dst_addr [none]\n"
  403. DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
  404. dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
  405. dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
  406. dmae->comp_val);
  407. else
  408. DP(msglvl, "DMAE: opcode 0x%08x\n"
  409. DP_LEVEL "src_addr [%08x] len [%d * 4] "
  410. "dst_addr [none]\n"
  411. DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
  412. dmae->opcode, dmae->src_addr_lo >> 2,
  413. dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
  414. dmae->comp_val);
  415. break;
  416. }
  417. }
  418. const u32 dmae_reg_go_c[] = {
  419. DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
  420. DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
  421. DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
  422. DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
  423. };
  424. /* copy command into DMAE command memory and set DMAE command go */
  425. void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
  426. {
  427. u32 cmd_offset;
  428. int i;
  429. cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
  430. for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
  431. REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
  432. DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
  433. idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
  434. }
  435. REG_WR(bp, dmae_reg_go_c[idx], 1);
  436. }
  437. u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
  438. {
  439. return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
  440. DMAE_CMD_C_ENABLE);
  441. }
  442. u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
  443. {
  444. return opcode & ~DMAE_CMD_SRC_RESET;
  445. }
  446. u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
  447. bool with_comp, u8 comp_type)
  448. {
  449. u32 opcode = 0;
  450. opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
  451. (dst_type << DMAE_COMMAND_DST_SHIFT));
  452. opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
  453. opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
  454. opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
  455. (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
  456. opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
  457. #ifdef __BIG_ENDIAN
  458. opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
  459. #else
  460. opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
  461. #endif
  462. if (with_comp)
  463. opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
  464. return opcode;
  465. }
  466. static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
  467. struct dmae_command *dmae,
  468. u8 src_type, u8 dst_type)
  469. {
  470. memset(dmae, 0, sizeof(struct dmae_command));
  471. /* set the opcode */
  472. dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
  473. true, DMAE_COMP_PCI);
  474. /* fill in the completion parameters */
  475. dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
  476. dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
  477. dmae->comp_val = DMAE_COMP_VAL;
  478. }
  479. /* issue a dmae command over the init-channel and wailt for completion */
  480. static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
  481. struct dmae_command *dmae)
  482. {
  483. u32 *wb_comp = bnx2x_sp(bp, wb_comp);
  484. int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
  485. int rc = 0;
  486. DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
  487. bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
  488. bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
  489. /* lock the dmae channel */
  490. spin_lock_bh(&bp->dmae_lock);
  491. /* reset completion */
  492. *wb_comp = 0;
  493. /* post the command on the channel used for initializations */
  494. bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
  495. /* wait for completion */
  496. udelay(5);
  497. while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
  498. DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
  499. if (!cnt) {
  500. BNX2X_ERR("DMAE timeout!\n");
  501. rc = DMAE_TIMEOUT;
  502. goto unlock;
  503. }
  504. cnt--;
  505. udelay(50);
  506. }
  507. if (*wb_comp & DMAE_PCI_ERR_FLAG) {
  508. BNX2X_ERR("DMAE PCI error!\n");
  509. rc = DMAE_PCI_ERROR;
  510. }
  511. DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
  512. bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
  513. bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
  514. unlock:
  515. spin_unlock_bh(&bp->dmae_lock);
  516. return rc;
  517. }
  518. void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
  519. u32 len32)
  520. {
  521. struct dmae_command dmae;
  522. if (!bp->dmae_ready) {
  523. u32 *data = bnx2x_sp(bp, wb_data[0]);
  524. DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
  525. " using indirect\n", dst_addr, len32);
  526. bnx2x_init_ind_wr(bp, dst_addr, data, len32);
  527. return;
  528. }
  529. /* set opcode and fixed command fields */
  530. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
  531. /* fill in addresses and len */
  532. dmae.src_addr_lo = U64_LO(dma_addr);
  533. dmae.src_addr_hi = U64_HI(dma_addr);
  534. dmae.dst_addr_lo = dst_addr >> 2;
  535. dmae.dst_addr_hi = 0;
  536. dmae.len = len32;
  537. bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
  538. /* issue the command and wait for completion */
  539. bnx2x_issue_dmae_with_comp(bp, &dmae);
  540. }
  541. void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
  542. {
  543. struct dmae_command dmae;
  544. if (!bp->dmae_ready) {
  545. u32 *data = bnx2x_sp(bp, wb_data[0]);
  546. int i;
  547. DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
  548. " using indirect\n", src_addr, len32);
  549. for (i = 0; i < len32; i++)
  550. data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
  551. return;
  552. }
  553. /* set opcode and fixed command fields */
  554. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
  555. /* fill in addresses and len */
  556. dmae.src_addr_lo = src_addr >> 2;
  557. dmae.src_addr_hi = 0;
  558. dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
  559. dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
  560. dmae.len = len32;
  561. bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
  562. /* issue the command and wait for completion */
  563. bnx2x_issue_dmae_with_comp(bp, &dmae);
  564. }
  565. static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
  566. u32 addr, u32 len)
  567. {
  568. int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
  569. int offset = 0;
  570. while (len > dmae_wr_max) {
  571. bnx2x_write_dmae(bp, phys_addr + offset,
  572. addr + offset, dmae_wr_max);
  573. offset += dmae_wr_max * 4;
  574. len -= dmae_wr_max;
  575. }
  576. bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
  577. }
  578. /* used only for slowpath so not inlined */
  579. static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
  580. {
  581. u32 wb_write[2];
  582. wb_write[0] = val_hi;
  583. wb_write[1] = val_lo;
  584. REG_WR_DMAE(bp, reg, wb_write, 2);
  585. }
  586. #ifdef USE_WB_RD
  587. static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
  588. {
  589. u32 wb_data[2];
  590. REG_RD_DMAE(bp, reg, wb_data, 2);
  591. return HILO_U64(wb_data[0], wb_data[1]);
  592. }
  593. #endif
  594. static int bnx2x_mc_assert(struct bnx2x *bp)
  595. {
  596. char last_idx;
  597. int i, rc = 0;
  598. u32 row0, row1, row2, row3;
  599. /* XSTORM */
  600. last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
  601. XSTORM_ASSERT_LIST_INDEX_OFFSET);
  602. if (last_idx)
  603. BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
  604. /* print the asserts */
  605. for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
  606. row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
  607. XSTORM_ASSERT_LIST_OFFSET(i));
  608. row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
  609. XSTORM_ASSERT_LIST_OFFSET(i) + 4);
  610. row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
  611. XSTORM_ASSERT_LIST_OFFSET(i) + 8);
  612. row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
  613. XSTORM_ASSERT_LIST_OFFSET(i) + 12);
  614. if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
  615. BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
  616. " 0x%08x 0x%08x 0x%08x\n",
  617. i, row3, row2, row1, row0);
  618. rc++;
  619. } else {
  620. break;
  621. }
  622. }
  623. /* TSTORM */
  624. last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
  625. TSTORM_ASSERT_LIST_INDEX_OFFSET);
  626. if (last_idx)
  627. BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
  628. /* print the asserts */
  629. for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
  630. row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
  631. TSTORM_ASSERT_LIST_OFFSET(i));
  632. row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
  633. TSTORM_ASSERT_LIST_OFFSET(i) + 4);
  634. row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
  635. TSTORM_ASSERT_LIST_OFFSET(i) + 8);
  636. row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
  637. TSTORM_ASSERT_LIST_OFFSET(i) + 12);
  638. if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
  639. BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
  640. " 0x%08x 0x%08x 0x%08x\n",
  641. i, row3, row2, row1, row0);
  642. rc++;
  643. } else {
  644. break;
  645. }
  646. }
  647. /* CSTORM */
  648. last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
  649. CSTORM_ASSERT_LIST_INDEX_OFFSET);
  650. if (last_idx)
  651. BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
  652. /* print the asserts */
  653. for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
  654. row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
  655. CSTORM_ASSERT_LIST_OFFSET(i));
  656. row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
  657. CSTORM_ASSERT_LIST_OFFSET(i) + 4);
  658. row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
  659. CSTORM_ASSERT_LIST_OFFSET(i) + 8);
  660. row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
  661. CSTORM_ASSERT_LIST_OFFSET(i) + 12);
  662. if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
  663. BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
  664. " 0x%08x 0x%08x 0x%08x\n",
  665. i, row3, row2, row1, row0);
  666. rc++;
  667. } else {
  668. break;
  669. }
  670. }
  671. /* USTORM */
  672. last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
  673. USTORM_ASSERT_LIST_INDEX_OFFSET);
  674. if (last_idx)
  675. BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
  676. /* print the asserts */
  677. for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
  678. row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
  679. USTORM_ASSERT_LIST_OFFSET(i));
  680. row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
  681. USTORM_ASSERT_LIST_OFFSET(i) + 4);
  682. row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
  683. USTORM_ASSERT_LIST_OFFSET(i) + 8);
  684. row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
  685. USTORM_ASSERT_LIST_OFFSET(i) + 12);
  686. if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
  687. BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
  688. " 0x%08x 0x%08x 0x%08x\n",
  689. i, row3, row2, row1, row0);
  690. rc++;
  691. } else {
  692. break;
  693. }
  694. }
  695. return rc;
  696. }
  697. static void bnx2x_fw_dump(struct bnx2x *bp)
  698. {
  699. u32 addr;
  700. u32 mark, offset;
  701. __be32 data[9];
  702. int word;
  703. u32 trace_shmem_base;
  704. if (BP_NOMCP(bp)) {
  705. BNX2X_ERR("NO MCP - can not dump\n");
  706. return;
  707. }
  708. if (BP_PATH(bp) == 0)
  709. trace_shmem_base = bp->common.shmem_base;
  710. else
  711. trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
  712. addr = trace_shmem_base - 0x0800 + 4;
  713. mark = REG_RD(bp, addr);
  714. mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
  715. + ((mark + 0x3) & ~0x3) - 0x08000000;
  716. pr_err("begin fw dump (mark 0x%x)\n", mark);
  717. pr_err("");
  718. for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
  719. for (word = 0; word < 8; word++)
  720. data[word] = htonl(REG_RD(bp, offset + 4*word));
  721. data[8] = 0x0;
  722. pr_cont("%s", (char *)data);
  723. }
  724. for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
  725. for (word = 0; word < 8; word++)
  726. data[word] = htonl(REG_RD(bp, offset + 4*word));
  727. data[8] = 0x0;
  728. pr_cont("%s", (char *)data);
  729. }
  730. pr_err("end of fw dump\n");
  731. }
  732. void bnx2x_panic_dump(struct bnx2x *bp)
  733. {
  734. int i;
  735. u16 j;
  736. struct hc_sp_status_block_data sp_sb_data;
  737. int func = BP_FUNC(bp);
  738. #ifdef BNX2X_STOP_ON_ERROR
  739. u16 start = 0, end = 0;
  740. #endif
  741. bp->stats_state = STATS_STATE_DISABLED;
  742. DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
  743. BNX2X_ERR("begin crash dump -----------------\n");
  744. /* Indices */
  745. /* Common */
  746. BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
  747. " spq_prod_idx(0x%x)\n",
  748. bp->def_idx, bp->def_att_idx,
  749. bp->attn_state, bp->spq_prod_idx);
  750. BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
  751. bp->def_status_blk->atten_status_block.attn_bits,
  752. bp->def_status_blk->atten_status_block.attn_bits_ack,
  753. bp->def_status_blk->atten_status_block.status_block_id,
  754. bp->def_status_blk->atten_status_block.attn_bits_index);
  755. BNX2X_ERR(" def (");
  756. for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
  757. pr_cont("0x%x%s",
  758. bp->def_status_blk->sp_sb.index_values[i],
  759. (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
  760. for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
  761. *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
  762. CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
  763. i*sizeof(u32));
  764. pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
  765. "pf_id(0x%x) vnic_id(0x%x) "
  766. "vf_id(0x%x) vf_valid (0x%x)\n",
  767. sp_sb_data.igu_sb_id,
  768. sp_sb_data.igu_seg_id,
  769. sp_sb_data.p_func.pf_id,
  770. sp_sb_data.p_func.vnic_id,
  771. sp_sb_data.p_func.vf_id,
  772. sp_sb_data.p_func.vf_valid);
  773. for_each_eth_queue(bp, i) {
  774. struct bnx2x_fastpath *fp = &bp->fp[i];
  775. int loop;
  776. struct hc_status_block_data_e2 sb_data_e2;
  777. struct hc_status_block_data_e1x sb_data_e1x;
  778. struct hc_status_block_sm *hc_sm_p =
  779. CHIP_IS_E2(bp) ?
  780. sb_data_e2.common.state_machine :
  781. sb_data_e1x.common.state_machine;
  782. struct hc_index_data *hc_index_p =
  783. CHIP_IS_E2(bp) ?
  784. sb_data_e2.index_data :
  785. sb_data_e1x.index_data;
  786. int data_size;
  787. u32 *sb_data_p;
  788. /* Rx */
  789. BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
  790. " rx_comp_prod(0x%x)"
  791. " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
  792. i, fp->rx_bd_prod, fp->rx_bd_cons,
  793. fp->rx_comp_prod,
  794. fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
  795. BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
  796. " fp_hc_idx(0x%x)\n",
  797. fp->rx_sge_prod, fp->last_max_sge,
  798. le16_to_cpu(fp->fp_hc_idx));
  799. /* Tx */
  800. BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
  801. " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
  802. " *tx_cons_sb(0x%x)\n",
  803. i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
  804. fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
  805. loop = CHIP_IS_E2(bp) ?
  806. HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
  807. /* host sb data */
  808. #ifdef BCM_CNIC
  809. if (IS_FCOE_FP(fp))
  810. continue;
  811. #endif
  812. BNX2X_ERR(" run indexes (");
  813. for (j = 0; j < HC_SB_MAX_SM; j++)
  814. pr_cont("0x%x%s",
  815. fp->sb_running_index[j],
  816. (j == HC_SB_MAX_SM - 1) ? ")" : " ");
  817. BNX2X_ERR(" indexes (");
  818. for (j = 0; j < loop; j++)
  819. pr_cont("0x%x%s",
  820. fp->sb_index_values[j],
  821. (j == loop - 1) ? ")" : " ");
  822. /* fw sb data */
  823. data_size = CHIP_IS_E2(bp) ?
  824. sizeof(struct hc_status_block_data_e2) :
  825. sizeof(struct hc_status_block_data_e1x);
  826. data_size /= sizeof(u32);
  827. sb_data_p = CHIP_IS_E2(bp) ?
  828. (u32 *)&sb_data_e2 :
  829. (u32 *)&sb_data_e1x;
  830. /* copy sb data in here */
  831. for (j = 0; j < data_size; j++)
  832. *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
  833. CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
  834. j * sizeof(u32));
  835. if (CHIP_IS_E2(bp)) {
  836. pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
  837. "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
  838. sb_data_e2.common.p_func.pf_id,
  839. sb_data_e2.common.p_func.vf_id,
  840. sb_data_e2.common.p_func.vf_valid,
  841. sb_data_e2.common.p_func.vnic_id,
  842. sb_data_e2.common.same_igu_sb_1b);
  843. } else {
  844. pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
  845. "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
  846. sb_data_e1x.common.p_func.pf_id,
  847. sb_data_e1x.common.p_func.vf_id,
  848. sb_data_e1x.common.p_func.vf_valid,
  849. sb_data_e1x.common.p_func.vnic_id,
  850. sb_data_e1x.common.same_igu_sb_1b);
  851. }
  852. /* SB_SMs data */
  853. for (j = 0; j < HC_SB_MAX_SM; j++) {
  854. pr_cont("SM[%d] __flags (0x%x) "
  855. "igu_sb_id (0x%x) igu_seg_id(0x%x) "
  856. "time_to_expire (0x%x) "
  857. "timer_value(0x%x)\n", j,
  858. hc_sm_p[j].__flags,
  859. hc_sm_p[j].igu_sb_id,
  860. hc_sm_p[j].igu_seg_id,
  861. hc_sm_p[j].time_to_expire,
  862. hc_sm_p[j].timer_value);
  863. }
  864. /* Indecies data */
  865. for (j = 0; j < loop; j++) {
  866. pr_cont("INDEX[%d] flags (0x%x) "
  867. "timeout (0x%x)\n", j,
  868. hc_index_p[j].flags,
  869. hc_index_p[j].timeout);
  870. }
  871. }
  872. #ifdef BNX2X_STOP_ON_ERROR
  873. /* Rings */
  874. /* Rx */
  875. for_each_rx_queue(bp, i) {
  876. struct bnx2x_fastpath *fp = &bp->fp[i];
  877. start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
  878. end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
  879. for (j = start; j != end; j = RX_BD(j + 1)) {
  880. u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
  881. struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
  882. BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
  883. i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
  884. }
  885. start = RX_SGE(fp->rx_sge_prod);
  886. end = RX_SGE(fp->last_max_sge);
  887. for (j = start; j != end; j = RX_SGE(j + 1)) {
  888. u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
  889. struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
  890. BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
  891. i, j, rx_sge[1], rx_sge[0], sw_page->page);
  892. }
  893. start = RCQ_BD(fp->rx_comp_cons - 10);
  894. end = RCQ_BD(fp->rx_comp_cons + 503);
  895. for (j = start; j != end; j = RCQ_BD(j + 1)) {
  896. u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
  897. BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
  898. i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
  899. }
  900. }
  901. /* Tx */
  902. for_each_tx_queue(bp, i) {
  903. struct bnx2x_fastpath *fp = &bp->fp[i];
  904. start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
  905. end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
  906. for (j = start; j != end; j = TX_BD(j + 1)) {
  907. struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
  908. BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
  909. i, j, sw_bd->skb, sw_bd->first_bd);
  910. }
  911. start = TX_BD(fp->tx_bd_cons - 10);
  912. end = TX_BD(fp->tx_bd_cons + 254);
  913. for (j = start; j != end; j = TX_BD(j + 1)) {
  914. u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
  915. BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
  916. i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
  917. }
  918. }
  919. #endif
  920. bnx2x_fw_dump(bp);
  921. bnx2x_mc_assert(bp);
  922. BNX2X_ERR("end crash dump -----------------\n");
  923. }
  924. static void bnx2x_hc_int_enable(struct bnx2x *bp)
  925. {
  926. int port = BP_PORT(bp);
  927. u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
  928. u32 val = REG_RD(bp, addr);
  929. int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
  930. int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
  931. if (msix) {
  932. val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
  933. HC_CONFIG_0_REG_INT_LINE_EN_0);
  934. val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
  935. HC_CONFIG_0_REG_ATTN_BIT_EN_0);
  936. } else if (msi) {
  937. val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
  938. val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
  939. HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
  940. HC_CONFIG_0_REG_ATTN_BIT_EN_0);
  941. } else {
  942. val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
  943. HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
  944. HC_CONFIG_0_REG_INT_LINE_EN_0 |
  945. HC_CONFIG_0_REG_ATTN_BIT_EN_0);
  946. if (!CHIP_IS_E1(bp)) {
  947. DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
  948. val, port, addr);
  949. REG_WR(bp, addr, val);
  950. val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
  951. }
  952. }
  953. if (CHIP_IS_E1(bp))
  954. REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
  955. DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
  956. val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
  957. REG_WR(bp, addr, val);
  958. /*
  959. * Ensure that HC_CONFIG is written before leading/trailing edge config
  960. */
  961. mmiowb();
  962. barrier();
  963. if (!CHIP_IS_E1(bp)) {
  964. /* init leading/trailing edge */
  965. if (IS_MF(bp)) {
  966. val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
  967. if (bp->port.pmf)
  968. /* enable nig and gpio3 attention */
  969. val |= 0x1100;
  970. } else
  971. val = 0xffff;
  972. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
  973. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
  974. }
  975. /* Make sure that interrupts are indeed enabled from here on */
  976. mmiowb();
  977. }
  978. static void bnx2x_igu_int_enable(struct bnx2x *bp)
  979. {
  980. u32 val;
  981. int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
  982. int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
  983. val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
  984. if (msix) {
  985. val &= ~(IGU_PF_CONF_INT_LINE_EN |
  986. IGU_PF_CONF_SINGLE_ISR_EN);
  987. val |= (IGU_PF_CONF_FUNC_EN |
  988. IGU_PF_CONF_MSI_MSIX_EN |
  989. IGU_PF_CONF_ATTN_BIT_EN);
  990. } else if (msi) {
  991. val &= ~IGU_PF_CONF_INT_LINE_EN;
  992. val |= (IGU_PF_CONF_FUNC_EN |
  993. IGU_PF_CONF_MSI_MSIX_EN |
  994. IGU_PF_CONF_ATTN_BIT_EN |
  995. IGU_PF_CONF_SINGLE_ISR_EN);
  996. } else {
  997. val &= ~IGU_PF_CONF_MSI_MSIX_EN;
  998. val |= (IGU_PF_CONF_FUNC_EN |
  999. IGU_PF_CONF_INT_LINE_EN |
  1000. IGU_PF_CONF_ATTN_BIT_EN |
  1001. IGU_PF_CONF_SINGLE_ISR_EN);
  1002. }
  1003. DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
  1004. val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
  1005. REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
  1006. barrier();
  1007. /* init leading/trailing edge */
  1008. if (IS_MF(bp)) {
  1009. val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
  1010. if (bp->port.pmf)
  1011. /* enable nig and gpio3 attention */
  1012. val |= 0x1100;
  1013. } else
  1014. val = 0xffff;
  1015. REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
  1016. REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
  1017. /* Make sure that interrupts are indeed enabled from here on */
  1018. mmiowb();
  1019. }
  1020. void bnx2x_int_enable(struct bnx2x *bp)
  1021. {
  1022. if (bp->common.int_block == INT_BLOCK_HC)
  1023. bnx2x_hc_int_enable(bp);
  1024. else
  1025. bnx2x_igu_int_enable(bp);
  1026. }
  1027. static void bnx2x_hc_int_disable(struct bnx2x *bp)
  1028. {
  1029. int port = BP_PORT(bp);
  1030. u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
  1031. u32 val = REG_RD(bp, addr);
  1032. /*
  1033. * in E1 we must use only PCI configuration space to disable
  1034. * MSI/MSIX capablility
  1035. * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
  1036. */
  1037. if (CHIP_IS_E1(bp)) {
  1038. /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
  1039. * Use mask register to prevent from HC sending interrupts
  1040. * after we exit the function
  1041. */
  1042. REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
  1043. val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
  1044. HC_CONFIG_0_REG_INT_LINE_EN_0 |
  1045. HC_CONFIG_0_REG_ATTN_BIT_EN_0);
  1046. } else
  1047. val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
  1048. HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
  1049. HC_CONFIG_0_REG_INT_LINE_EN_0 |
  1050. HC_CONFIG_0_REG_ATTN_BIT_EN_0);
  1051. DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
  1052. val, port, addr);
  1053. /* flush all outstanding writes */
  1054. mmiowb();
  1055. REG_WR(bp, addr, val);
  1056. if (REG_RD(bp, addr) != val)
  1057. BNX2X_ERR("BUG! proper val not read from IGU!\n");
  1058. }
  1059. static void bnx2x_igu_int_disable(struct bnx2x *bp)
  1060. {
  1061. u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
  1062. val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
  1063. IGU_PF_CONF_INT_LINE_EN |
  1064. IGU_PF_CONF_ATTN_BIT_EN);
  1065. DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
  1066. /* flush all outstanding writes */
  1067. mmiowb();
  1068. REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
  1069. if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
  1070. BNX2X_ERR("BUG! proper val not read from IGU!\n");
  1071. }
  1072. static void bnx2x_int_disable(struct bnx2x *bp)
  1073. {
  1074. if (bp->common.int_block == INT_BLOCK_HC)
  1075. bnx2x_hc_int_disable(bp);
  1076. else
  1077. bnx2x_igu_int_disable(bp);
  1078. }
  1079. void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
  1080. {
  1081. int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
  1082. int i, offset;
  1083. /* disable interrupt handling */
  1084. atomic_inc(&bp->intr_sem);
  1085. smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
  1086. if (disable_hw)
  1087. /* prevent the HW from sending interrupts */
  1088. bnx2x_int_disable(bp);
  1089. /* make sure all ISRs are done */
  1090. if (msix) {
  1091. synchronize_irq(bp->msix_table[0].vector);
  1092. offset = 1;
  1093. #ifdef BCM_CNIC
  1094. offset++;
  1095. #endif
  1096. for_each_eth_queue(bp, i)
  1097. synchronize_irq(bp->msix_table[i + offset].vector);
  1098. } else
  1099. synchronize_irq(bp->pdev->irq);
  1100. /* make sure sp_task is not running */
  1101. cancel_delayed_work(&bp->sp_task);
  1102. flush_workqueue(bnx2x_wq);
  1103. }
  1104. /* fast path */
  1105. /*
  1106. * General service functions
  1107. */
  1108. /* Return true if succeeded to acquire the lock */
  1109. static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
  1110. {
  1111. u32 lock_status;
  1112. u32 resource_bit = (1 << resource);
  1113. int func = BP_FUNC(bp);
  1114. u32 hw_lock_control_reg;
  1115. DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
  1116. /* Validating that the resource is within range */
  1117. if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
  1118. DP(NETIF_MSG_HW,
  1119. "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
  1120. resource, HW_LOCK_MAX_RESOURCE_VALUE);
  1121. return false;
  1122. }
  1123. if (func <= 5)
  1124. hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
  1125. else
  1126. hw_lock_control_reg =
  1127. (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
  1128. /* Try to acquire the lock */
  1129. REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
  1130. lock_status = REG_RD(bp, hw_lock_control_reg);
  1131. if (lock_status & resource_bit)
  1132. return true;
  1133. DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
  1134. return false;
  1135. }
  1136. #ifdef BCM_CNIC
  1137. static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
  1138. #endif
  1139. void bnx2x_sp_event(struct bnx2x_fastpath *fp,
  1140. union eth_rx_cqe *rr_cqe)
  1141. {
  1142. struct bnx2x *bp = fp->bp;
  1143. int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
  1144. int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
  1145. DP(BNX2X_MSG_SP,
  1146. "fp %d cid %d got ramrod #%d state is %x type is %d\n",
  1147. fp->index, cid, command, bp->state,
  1148. rr_cqe->ramrod_cqe.ramrod_type);
  1149. switch (command | fp->state) {
  1150. case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
  1151. DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
  1152. fp->state = BNX2X_FP_STATE_OPEN;
  1153. break;
  1154. case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
  1155. DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
  1156. fp->state = BNX2X_FP_STATE_HALTED;
  1157. break;
  1158. case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
  1159. DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
  1160. fp->state = BNX2X_FP_STATE_TERMINATED;
  1161. break;
  1162. default:
  1163. BNX2X_ERR("unexpected MC reply (%d) "
  1164. "fp[%d] state is %x\n",
  1165. command, fp->index, fp->state);
  1166. break;
  1167. }
  1168. smp_mb__before_atomic_inc();
  1169. atomic_inc(&bp->cq_spq_left);
  1170. /* push the change in fp->state and towards the memory */
  1171. smp_wmb();
  1172. return;
  1173. }
  1174. irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
  1175. {
  1176. struct bnx2x *bp = netdev_priv(dev_instance);
  1177. u16 status = bnx2x_ack_int(bp);
  1178. u16 mask;
  1179. int i;
  1180. /* Return here if interrupt is shared and it's not for us */
  1181. if (unlikely(status == 0)) {
  1182. DP(NETIF_MSG_INTR, "not our interrupt!\n");
  1183. return IRQ_NONE;
  1184. }
  1185. DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
  1186. /* Return here if interrupt is disabled */
  1187. if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
  1188. DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
  1189. return IRQ_HANDLED;
  1190. }
  1191. #ifdef BNX2X_STOP_ON_ERROR
  1192. if (unlikely(bp->panic))
  1193. return IRQ_HANDLED;
  1194. #endif
  1195. for_each_eth_queue(bp, i) {
  1196. struct bnx2x_fastpath *fp = &bp->fp[i];
  1197. mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
  1198. if (status & mask) {
  1199. /* Handle Rx and Tx according to SB id */
  1200. prefetch(fp->rx_cons_sb);
  1201. prefetch(fp->tx_cons_sb);
  1202. prefetch(&fp->sb_running_index[SM_RX_ID]);
  1203. napi_schedule(&bnx2x_fp(bp, fp->index, napi));
  1204. status &= ~mask;
  1205. }
  1206. }
  1207. #ifdef BCM_CNIC
  1208. mask = 0x2;
  1209. if (status & (mask | 0x1)) {
  1210. struct cnic_ops *c_ops = NULL;
  1211. rcu_read_lock();
  1212. c_ops = rcu_dereference(bp->cnic_ops);
  1213. if (c_ops)
  1214. c_ops->cnic_handler(bp->cnic_data, NULL);
  1215. rcu_read_unlock();
  1216. status &= ~mask;
  1217. }
  1218. #endif
  1219. if (unlikely(status & 0x1)) {
  1220. queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
  1221. status &= ~0x1;
  1222. if (!status)
  1223. return IRQ_HANDLED;
  1224. }
  1225. if (unlikely(status))
  1226. DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
  1227. status);
  1228. return IRQ_HANDLED;
  1229. }
  1230. /* end of fast path */
  1231. /* Link */
  1232. /*
  1233. * General service functions
  1234. */
  1235. int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
  1236. {
  1237. u32 lock_status;
  1238. u32 resource_bit = (1 << resource);
  1239. int func = BP_FUNC(bp);
  1240. u32 hw_lock_control_reg;
  1241. int cnt;
  1242. /* Validating that the resource is within range */
  1243. if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
  1244. DP(NETIF_MSG_HW,
  1245. "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
  1246. resource, HW_LOCK_MAX_RESOURCE_VALUE);
  1247. return -EINVAL;
  1248. }
  1249. if (func <= 5) {
  1250. hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
  1251. } else {
  1252. hw_lock_control_reg =
  1253. (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
  1254. }
  1255. /* Validating that the resource is not already taken */
  1256. lock_status = REG_RD(bp, hw_lock_control_reg);
  1257. if (lock_status & resource_bit) {
  1258. DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
  1259. lock_status, resource_bit);
  1260. return -EEXIST;
  1261. }
  1262. /* Try for 5 second every 5ms */
  1263. for (cnt = 0; cnt < 1000; cnt++) {
  1264. /* Try to acquire the lock */
  1265. REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
  1266. lock_status = REG_RD(bp, hw_lock_control_reg);
  1267. if (lock_status & resource_bit)
  1268. return 0;
  1269. msleep(5);
  1270. }
  1271. DP(NETIF_MSG_HW, "Timeout\n");
  1272. return -EAGAIN;
  1273. }
  1274. int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
  1275. {
  1276. u32 lock_status;
  1277. u32 resource_bit = (1 << resource);
  1278. int func = BP_FUNC(bp);
  1279. u32 hw_lock_control_reg;
  1280. DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
  1281. /* Validating that the resource is within range */
  1282. if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
  1283. DP(NETIF_MSG_HW,
  1284. "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
  1285. resource, HW_LOCK_MAX_RESOURCE_VALUE);
  1286. return -EINVAL;
  1287. }
  1288. if (func <= 5) {
  1289. hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
  1290. } else {
  1291. hw_lock_control_reg =
  1292. (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
  1293. }
  1294. /* Validating that the resource is currently taken */
  1295. lock_status = REG_RD(bp, hw_lock_control_reg);
  1296. if (!(lock_status & resource_bit)) {
  1297. DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
  1298. lock_status, resource_bit);
  1299. return -EFAULT;
  1300. }
  1301. REG_WR(bp, hw_lock_control_reg, resource_bit);
  1302. return 0;
  1303. }
  1304. int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
  1305. {
  1306. /* The GPIO should be swapped if swap register is set and active */
  1307. int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
  1308. REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
  1309. int gpio_shift = gpio_num +
  1310. (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
  1311. u32 gpio_mask = (1 << gpio_shift);
  1312. u32 gpio_reg;
  1313. int value;
  1314. if (gpio_num > MISC_REGISTERS_GPIO_3) {
  1315. BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
  1316. return -EINVAL;
  1317. }
  1318. /* read GPIO value */
  1319. gpio_reg = REG_RD(bp, MISC_REG_GPIO);
  1320. /* get the requested pin value */
  1321. if ((gpio_reg & gpio_mask) == gpio_mask)
  1322. value = 1;
  1323. else
  1324. value = 0;
  1325. DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
  1326. return value;
  1327. }
  1328. int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
  1329. {
  1330. /* The GPIO should be swapped if swap register is set and active */
  1331. int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
  1332. REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
  1333. int gpio_shift = gpio_num +
  1334. (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
  1335. u32 gpio_mask = (1 << gpio_shift);
  1336. u32 gpio_reg;
  1337. if (gpio_num > MISC_REGISTERS_GPIO_3) {
  1338. BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
  1339. return -EINVAL;
  1340. }
  1341. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
  1342. /* read GPIO and mask except the float bits */
  1343. gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
  1344. switch (mode) {
  1345. case MISC_REGISTERS_GPIO_OUTPUT_LOW:
  1346. DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
  1347. gpio_num, gpio_shift);
  1348. /* clear FLOAT and set CLR */
  1349. gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
  1350. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
  1351. break;
  1352. case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
  1353. DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
  1354. gpio_num, gpio_shift);
  1355. /* clear FLOAT and set SET */
  1356. gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
  1357. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
  1358. break;
  1359. case MISC_REGISTERS_GPIO_INPUT_HI_Z:
  1360. DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
  1361. gpio_num, gpio_shift);
  1362. /* set FLOAT */
  1363. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
  1364. break;
  1365. default:
  1366. break;
  1367. }
  1368. REG_WR(bp, MISC_REG_GPIO, gpio_reg);
  1369. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
  1370. return 0;
  1371. }
  1372. int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
  1373. {
  1374. /* The GPIO should be swapped if swap register is set and active */
  1375. int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
  1376. REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
  1377. int gpio_shift = gpio_num +
  1378. (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
  1379. u32 gpio_mask = (1 << gpio_shift);
  1380. u32 gpio_reg;
  1381. if (gpio_num > MISC_REGISTERS_GPIO_3) {
  1382. BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
  1383. return -EINVAL;
  1384. }
  1385. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
  1386. /* read GPIO int */
  1387. gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
  1388. switch (mode) {
  1389. case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
  1390. DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
  1391. "output low\n", gpio_num, gpio_shift);
  1392. /* clear SET and set CLR */
  1393. gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
  1394. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
  1395. break;
  1396. case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
  1397. DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
  1398. "output high\n", gpio_num, gpio_shift);
  1399. /* clear CLR and set SET */
  1400. gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
  1401. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
  1402. break;
  1403. default:
  1404. break;
  1405. }
  1406. REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
  1407. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
  1408. return 0;
  1409. }
  1410. static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
  1411. {
  1412. u32 spio_mask = (1 << spio_num);
  1413. u32 spio_reg;
  1414. if ((spio_num < MISC_REGISTERS_SPIO_4) ||
  1415. (spio_num > MISC_REGISTERS_SPIO_7)) {
  1416. BNX2X_ERR("Invalid SPIO %d\n", spio_num);
  1417. return -EINVAL;
  1418. }
  1419. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
  1420. /* read SPIO and mask except the float bits */
  1421. spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
  1422. switch (mode) {
  1423. case MISC_REGISTERS_SPIO_OUTPUT_LOW:
  1424. DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
  1425. /* clear FLOAT and set CLR */
  1426. spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
  1427. spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
  1428. break;
  1429. case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
  1430. DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
  1431. /* clear FLOAT and set SET */
  1432. spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
  1433. spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
  1434. break;
  1435. case MISC_REGISTERS_SPIO_INPUT_HI_Z:
  1436. DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
  1437. /* set FLOAT */
  1438. spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
  1439. break;
  1440. default:
  1441. break;
  1442. }
  1443. REG_WR(bp, MISC_REG_SPIO, spio_reg);
  1444. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
  1445. return 0;
  1446. }
  1447. int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
  1448. {
  1449. u32 sel_phy_idx = 0;
  1450. if (bp->link_vars.link_up) {
  1451. sel_phy_idx = EXT_PHY1;
  1452. /* In case link is SERDES, check if the EXT_PHY2 is the one */
  1453. if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
  1454. (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
  1455. sel_phy_idx = EXT_PHY2;
  1456. } else {
  1457. switch (bnx2x_phy_selection(&bp->link_params)) {
  1458. case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
  1459. case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
  1460. case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
  1461. sel_phy_idx = EXT_PHY1;
  1462. break;
  1463. case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
  1464. case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
  1465. sel_phy_idx = EXT_PHY2;
  1466. break;
  1467. }
  1468. }
  1469. /*
  1470. * The selected actived PHY is always after swapping (in case PHY
  1471. * swapping is enabled). So when swapping is enabled, we need to reverse
  1472. * the configuration
  1473. */
  1474. if (bp->link_params.multi_phy_config &
  1475. PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
  1476. if (sel_phy_idx == EXT_PHY1)
  1477. sel_phy_idx = EXT_PHY2;
  1478. else if (sel_phy_idx == EXT_PHY2)
  1479. sel_phy_idx = EXT_PHY1;
  1480. }
  1481. return LINK_CONFIG_IDX(sel_phy_idx);
  1482. }
  1483. void bnx2x_calc_fc_adv(struct bnx2x *bp)
  1484. {
  1485. u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
  1486. switch (bp->link_vars.ieee_fc &
  1487. MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
  1488. case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
  1489. bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
  1490. ADVERTISED_Pause);
  1491. break;
  1492. case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
  1493. bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
  1494. ADVERTISED_Pause);
  1495. break;
  1496. case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
  1497. bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
  1498. break;
  1499. default:
  1500. bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
  1501. ADVERTISED_Pause);
  1502. break;
  1503. }
  1504. }
  1505. u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
  1506. {
  1507. if (!BP_NOMCP(bp)) {
  1508. u8 rc;
  1509. int cfx_idx = bnx2x_get_link_cfg_idx(bp);
  1510. u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
  1511. /* Initialize link parameters structure variables */
  1512. /* It is recommended to turn off RX FC for jumbo frames
  1513. for better performance */
  1514. if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
  1515. bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
  1516. else
  1517. bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
  1518. bnx2x_acquire_phy_lock(bp);
  1519. if (load_mode == LOAD_DIAG) {
  1520. bp->link_params.loopback_mode = LOOPBACK_XGXS;
  1521. bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
  1522. }
  1523. rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
  1524. bnx2x_release_phy_lock(bp);
  1525. bnx2x_calc_fc_adv(bp);
  1526. if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
  1527. bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
  1528. bnx2x_link_report(bp);
  1529. }
  1530. bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
  1531. return rc;
  1532. }
  1533. BNX2X_ERR("Bootcode is missing - can not initialize link\n");
  1534. return -EINVAL;
  1535. }
  1536. void bnx2x_link_set(struct bnx2x *bp)
  1537. {
  1538. if (!BP_NOMCP(bp)) {
  1539. bnx2x_acquire_phy_lock(bp);
  1540. bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
  1541. bnx2x_phy_init(&bp->link_params, &bp->link_vars);
  1542. bnx2x_release_phy_lock(bp);
  1543. bnx2x_calc_fc_adv(bp);
  1544. } else
  1545. BNX2X_ERR("Bootcode is missing - can not set link\n");
  1546. }
  1547. static void bnx2x__link_reset(struct bnx2x *bp)
  1548. {
  1549. if (!BP_NOMCP(bp)) {
  1550. bnx2x_acquire_phy_lock(bp);
  1551. bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
  1552. bnx2x_release_phy_lock(bp);
  1553. } else
  1554. BNX2X_ERR("Bootcode is missing - can not reset link\n");
  1555. }
  1556. u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
  1557. {
  1558. u8 rc = 0;
  1559. if (!BP_NOMCP(bp)) {
  1560. bnx2x_acquire_phy_lock(bp);
  1561. rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
  1562. is_serdes);
  1563. bnx2x_release_phy_lock(bp);
  1564. } else
  1565. BNX2X_ERR("Bootcode is missing - can not test link\n");
  1566. return rc;
  1567. }
  1568. static void bnx2x_init_port_minmax(struct bnx2x *bp)
  1569. {
  1570. u32 r_param = bp->link_vars.line_speed / 8;
  1571. u32 fair_periodic_timeout_usec;
  1572. u32 t_fair;
  1573. memset(&(bp->cmng.rs_vars), 0,
  1574. sizeof(struct rate_shaping_vars_per_port));
  1575. memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
  1576. /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
  1577. bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
  1578. /* this is the threshold below which no timer arming will occur
  1579. 1.25 coefficient is for the threshold to be a little bigger
  1580. than the real time, to compensate for timer in-accuracy */
  1581. bp->cmng.rs_vars.rs_threshold =
  1582. (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
  1583. /* resolution of fairness timer */
  1584. fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
  1585. /* for 10G it is 1000usec. for 1G it is 10000usec. */
  1586. t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
  1587. /* this is the threshold below which we won't arm the timer anymore */
  1588. bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
  1589. /* we multiply by 1e3/8 to get bytes/msec.
  1590. We don't want the credits to pass a credit
  1591. of the t_fair*FAIR_MEM (algorithm resolution) */
  1592. bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
  1593. /* since each tick is 4 usec */
  1594. bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
  1595. }
  1596. /* Calculates the sum of vn_min_rates.
  1597. It's needed for further normalizing of the min_rates.
  1598. Returns:
  1599. sum of vn_min_rates.
  1600. or
  1601. 0 - if all the min_rates are 0.
  1602. In the later case fainess algorithm should be deactivated.
  1603. If not all min_rates are zero then those that are zeroes will be set to 1.
  1604. */
  1605. static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
  1606. {
  1607. int all_zero = 1;
  1608. int vn;
  1609. bp->vn_weight_sum = 0;
  1610. for (vn = VN_0; vn < E1HVN_MAX; vn++) {
  1611. u32 vn_cfg = bp->mf_config[vn];
  1612. u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
  1613. FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
  1614. /* Skip hidden vns */
  1615. if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
  1616. continue;
  1617. /* If min rate is zero - set it to 1 */
  1618. if (!vn_min_rate)
  1619. vn_min_rate = DEF_MIN_RATE;
  1620. else
  1621. all_zero = 0;
  1622. bp->vn_weight_sum += vn_min_rate;
  1623. }
  1624. /* ... only if all min rates are zeros - disable fairness */
  1625. if (all_zero) {
  1626. bp->cmng.flags.cmng_enables &=
  1627. ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
  1628. DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
  1629. " fairness will be disabled\n");
  1630. } else
  1631. bp->cmng.flags.cmng_enables |=
  1632. CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
  1633. }
  1634. static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
  1635. {
  1636. struct rate_shaping_vars_per_vn m_rs_vn;
  1637. struct fairness_vars_per_vn m_fair_vn;
  1638. u32 vn_cfg = bp->mf_config[vn];
  1639. int func = 2*vn + BP_PORT(bp);
  1640. u16 vn_min_rate, vn_max_rate;
  1641. int i;
  1642. /* If function is hidden - set min and max to zeroes */
  1643. if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
  1644. vn_min_rate = 0;
  1645. vn_max_rate = 0;
  1646. } else {
  1647. u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
  1648. vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
  1649. FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
  1650. /* If fairness is enabled (not all min rates are zeroes) and
  1651. if current min rate is zero - set it to 1.
  1652. This is a requirement of the algorithm. */
  1653. if (bp->vn_weight_sum && (vn_min_rate == 0))
  1654. vn_min_rate = DEF_MIN_RATE;
  1655. if (IS_MF_SI(bp))
  1656. /* maxCfg in percents of linkspeed */
  1657. vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
  1658. else
  1659. /* maxCfg is absolute in 100Mb units */
  1660. vn_max_rate = maxCfg * 100;
  1661. }
  1662. DP(NETIF_MSG_IFUP,
  1663. "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
  1664. func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
  1665. memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
  1666. memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
  1667. /* global vn counter - maximal Mbps for this vn */
  1668. m_rs_vn.vn_counter.rate = vn_max_rate;
  1669. /* quota - number of bytes transmitted in this period */
  1670. m_rs_vn.vn_counter.quota =
  1671. (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
  1672. if (bp->vn_weight_sum) {
  1673. /* credit for each period of the fairness algorithm:
  1674. number of bytes in T_FAIR (the vn share the port rate).
  1675. vn_weight_sum should not be larger than 10000, thus
  1676. T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
  1677. than zero */
  1678. m_fair_vn.vn_credit_delta =
  1679. max_t(u32, (vn_min_rate * (T_FAIR_COEF /
  1680. (8 * bp->vn_weight_sum))),
  1681. (bp->cmng.fair_vars.fair_threshold +
  1682. MIN_ABOVE_THRESH));
  1683. DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
  1684. m_fair_vn.vn_credit_delta);
  1685. }
  1686. /* Store it to internal memory */
  1687. for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
  1688. REG_WR(bp, BAR_XSTRORM_INTMEM +
  1689. XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
  1690. ((u32 *)(&m_rs_vn))[i]);
  1691. for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
  1692. REG_WR(bp, BAR_XSTRORM_INTMEM +
  1693. XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
  1694. ((u32 *)(&m_fair_vn))[i]);
  1695. }
  1696. static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
  1697. {
  1698. if (CHIP_REV_IS_SLOW(bp))
  1699. return CMNG_FNS_NONE;
  1700. if (IS_MF(bp))
  1701. return CMNG_FNS_MINMAX;
  1702. return CMNG_FNS_NONE;
  1703. }
  1704. static void bnx2x_read_mf_cfg(struct bnx2x *bp)
  1705. {
  1706. int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
  1707. if (BP_NOMCP(bp))
  1708. return; /* what should be the default bvalue in this case */
  1709. /* For 2 port configuration the absolute function number formula
  1710. * is:
  1711. * abs_func = 2 * vn + BP_PORT + BP_PATH
  1712. *
  1713. * and there are 4 functions per port
  1714. *
  1715. * For 4 port configuration it is
  1716. * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
  1717. *
  1718. * and there are 2 functions per port
  1719. */
  1720. for (vn = VN_0; vn < E1HVN_MAX; vn++) {
  1721. int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
  1722. if (func >= E1H_FUNC_MAX)
  1723. break;
  1724. bp->mf_config[vn] =
  1725. MF_CFG_RD(bp, func_mf_config[func].config);
  1726. }
  1727. }
  1728. static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
  1729. {
  1730. if (cmng_type == CMNG_FNS_MINMAX) {
  1731. int vn;
  1732. /* clear cmng_enables */
  1733. bp->cmng.flags.cmng_enables = 0;
  1734. /* read mf conf from shmem */
  1735. if (read_cfg)
  1736. bnx2x_read_mf_cfg(bp);
  1737. /* Init rate shaping and fairness contexts */
  1738. bnx2x_init_port_minmax(bp);
  1739. /* vn_weight_sum and enable fairness if not 0 */
  1740. bnx2x_calc_vn_weight_sum(bp);
  1741. /* calculate and set min-max rate for each vn */
  1742. if (bp->port.pmf)
  1743. for (vn = VN_0; vn < E1HVN_MAX; vn++)
  1744. bnx2x_init_vn_minmax(bp, vn);
  1745. /* always enable rate shaping and fairness */
  1746. bp->cmng.flags.cmng_enables |=
  1747. CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
  1748. if (!bp->vn_weight_sum)
  1749. DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
  1750. " fairness will be disabled\n");
  1751. return;
  1752. }
  1753. /* rate shaping and fairness are disabled */
  1754. DP(NETIF_MSG_IFUP,
  1755. "rate shaping and fairness are disabled\n");
  1756. }
  1757. static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
  1758. {
  1759. int port = BP_PORT(bp);
  1760. int func;
  1761. int vn;
  1762. /* Set the attention towards other drivers on the same port */
  1763. for (vn = VN_0; vn < E1HVN_MAX; vn++) {
  1764. if (vn == BP_E1HVN(bp))
  1765. continue;
  1766. func = ((vn << 1) | port);
  1767. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
  1768. (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
  1769. }
  1770. }
  1771. /* This function is called upon link interrupt */
  1772. static void bnx2x_link_attn(struct bnx2x *bp)
  1773. {
  1774. u32 prev_link_status = bp->link_vars.link_status;
  1775. /* Make sure that we are synced with the current statistics */
  1776. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  1777. bnx2x_link_update(&bp->link_params, &bp->link_vars);
  1778. if (bp->link_vars.link_up) {
  1779. /* dropless flow control */
  1780. if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
  1781. int port = BP_PORT(bp);
  1782. u32 pause_enabled = 0;
  1783. if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
  1784. pause_enabled = 1;
  1785. REG_WR(bp, BAR_USTRORM_INTMEM +
  1786. USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
  1787. pause_enabled);
  1788. }
  1789. if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
  1790. struct host_port_stats *pstats;
  1791. pstats = bnx2x_sp(bp, port_stats);
  1792. /* reset old bmac stats */
  1793. memset(&(pstats->mac_stx[0]), 0,
  1794. sizeof(struct mac_stx));
  1795. }
  1796. if (bp->state == BNX2X_STATE_OPEN)
  1797. bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
  1798. }
  1799. if (bp->link_vars.link_up && bp->link_vars.line_speed) {
  1800. int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
  1801. if (cmng_fns != CMNG_FNS_NONE) {
  1802. bnx2x_cmng_fns_init(bp, false, cmng_fns);
  1803. storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
  1804. } else
  1805. /* rate shaping and fairness are disabled */
  1806. DP(NETIF_MSG_IFUP,
  1807. "single function mode without fairness\n");
  1808. }
  1809. if (IS_MF(bp))
  1810. bnx2x_link_sync_notify(bp);
  1811. /* indicate link status only if link status actually changed */
  1812. if (prev_link_status != bp->link_vars.link_status)
  1813. bnx2x_link_report(bp);
  1814. }
  1815. void bnx2x__link_status_update(struct bnx2x *bp)
  1816. {
  1817. if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
  1818. return;
  1819. bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
  1820. if (bp->link_vars.link_up)
  1821. bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
  1822. else
  1823. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  1824. /* the link status update could be the result of a DCC event
  1825. hence re-read the shmem mf configuration */
  1826. bnx2x_read_mf_cfg(bp);
  1827. /* indicate link status */
  1828. bnx2x_link_report(bp);
  1829. }
  1830. static void bnx2x_pmf_update(struct bnx2x *bp)
  1831. {
  1832. int port = BP_PORT(bp);
  1833. u32 val;
  1834. bp->port.pmf = 1;
  1835. DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
  1836. /* enable nig attention */
  1837. val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
  1838. if (bp->common.int_block == INT_BLOCK_HC) {
  1839. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
  1840. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
  1841. } else if (CHIP_IS_E2(bp)) {
  1842. REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
  1843. REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
  1844. }
  1845. bnx2x_stats_handle(bp, STATS_EVENT_PMF);
  1846. }
  1847. /* end of Link */
  1848. /* slow path */
  1849. /*
  1850. * General service functions
  1851. */
  1852. /* send the MCP a request, block until there is a reply */
  1853. u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
  1854. {
  1855. int mb_idx = BP_FW_MB_IDX(bp);
  1856. u32 seq = ++bp->fw_seq;
  1857. u32 rc = 0;
  1858. u32 cnt = 1;
  1859. u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
  1860. mutex_lock(&bp->fw_mb_mutex);
  1861. SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
  1862. SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
  1863. DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
  1864. do {
  1865. /* let the FW do it's magic ... */
  1866. msleep(delay);
  1867. rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
  1868. /* Give the FW up to 5 second (500*10ms) */
  1869. } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
  1870. DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
  1871. cnt*delay, rc, seq);
  1872. /* is this a reply to our command? */
  1873. if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
  1874. rc &= FW_MSG_CODE_MASK;
  1875. else {
  1876. /* FW BUG! */
  1877. BNX2X_ERR("FW failed to respond!\n");
  1878. bnx2x_fw_dump(bp);
  1879. rc = 0;
  1880. }
  1881. mutex_unlock(&bp->fw_mb_mutex);
  1882. return rc;
  1883. }
  1884. static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
  1885. {
  1886. #ifdef BCM_CNIC
  1887. if (IS_FCOE_FP(fp) && IS_MF(bp))
  1888. return false;
  1889. #endif
  1890. return true;
  1891. }
  1892. /* must be called under rtnl_lock */
  1893. static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
  1894. {
  1895. u32 mask = (1 << cl_id);
  1896. /* initial seeting is BNX2X_ACCEPT_NONE */
  1897. u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
  1898. u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
  1899. u8 unmatched_unicast = 0;
  1900. if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
  1901. unmatched_unicast = 1;
  1902. if (filters & BNX2X_PROMISCUOUS_MODE) {
  1903. /* promiscious - accept all, drop none */
  1904. drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
  1905. accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
  1906. if (IS_MF_SI(bp)) {
  1907. /*
  1908. * SI mode defines to accept in promiscuos mode
  1909. * only unmatched packets
  1910. */
  1911. unmatched_unicast = 1;
  1912. accp_all_ucast = 0;
  1913. }
  1914. }
  1915. if (filters & BNX2X_ACCEPT_UNICAST) {
  1916. /* accept matched ucast */
  1917. drop_all_ucast = 0;
  1918. }
  1919. if (filters & BNX2X_ACCEPT_MULTICAST)
  1920. /* accept matched mcast */
  1921. drop_all_mcast = 0;
  1922. if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
  1923. /* accept all mcast */
  1924. drop_all_ucast = 0;
  1925. accp_all_ucast = 1;
  1926. }
  1927. if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
  1928. /* accept all mcast */
  1929. drop_all_mcast = 0;
  1930. accp_all_mcast = 1;
  1931. }
  1932. if (filters & BNX2X_ACCEPT_BROADCAST) {
  1933. /* accept (all) bcast */
  1934. drop_all_bcast = 0;
  1935. accp_all_bcast = 1;
  1936. }
  1937. bp->mac_filters.ucast_drop_all = drop_all_ucast ?
  1938. bp->mac_filters.ucast_drop_all | mask :
  1939. bp->mac_filters.ucast_drop_all & ~mask;
  1940. bp->mac_filters.mcast_drop_all = drop_all_mcast ?
  1941. bp->mac_filters.mcast_drop_all | mask :
  1942. bp->mac_filters.mcast_drop_all & ~mask;
  1943. bp->mac_filters.bcast_drop_all = drop_all_bcast ?
  1944. bp->mac_filters.bcast_drop_all | mask :
  1945. bp->mac_filters.bcast_drop_all & ~mask;
  1946. bp->mac_filters.ucast_accept_all = accp_all_ucast ?
  1947. bp->mac_filters.ucast_accept_all | mask :
  1948. bp->mac_filters.ucast_accept_all & ~mask;
  1949. bp->mac_filters.mcast_accept_all = accp_all_mcast ?
  1950. bp->mac_filters.mcast_accept_all | mask :
  1951. bp->mac_filters.mcast_accept_all & ~mask;
  1952. bp->mac_filters.bcast_accept_all = accp_all_bcast ?
  1953. bp->mac_filters.bcast_accept_all | mask :
  1954. bp->mac_filters.bcast_accept_all & ~mask;
  1955. bp->mac_filters.unmatched_unicast = unmatched_unicast ?
  1956. bp->mac_filters.unmatched_unicast | mask :
  1957. bp->mac_filters.unmatched_unicast & ~mask;
  1958. }
  1959. static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
  1960. {
  1961. struct tstorm_eth_function_common_config tcfg = {0};
  1962. u16 rss_flgs;
  1963. /* tpa */
  1964. if (p->func_flgs & FUNC_FLG_TPA)
  1965. tcfg.config_flags |=
  1966. TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
  1967. /* set rss flags */
  1968. rss_flgs = (p->rss->mode <<
  1969. TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
  1970. if (p->rss->cap & RSS_IPV4_CAP)
  1971. rss_flgs |= RSS_IPV4_CAP_MASK;
  1972. if (p->rss->cap & RSS_IPV4_TCP_CAP)
  1973. rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
  1974. if (p->rss->cap & RSS_IPV6_CAP)
  1975. rss_flgs |= RSS_IPV6_CAP_MASK;
  1976. if (p->rss->cap & RSS_IPV6_TCP_CAP)
  1977. rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
  1978. tcfg.config_flags |= rss_flgs;
  1979. tcfg.rss_result_mask = p->rss->result_mask;
  1980. storm_memset_func_cfg(bp, &tcfg, p->func_id);
  1981. /* Enable the function in the FW */
  1982. storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
  1983. storm_memset_func_en(bp, p->func_id, 1);
  1984. /* statistics */
  1985. if (p->func_flgs & FUNC_FLG_STATS) {
  1986. struct stats_indication_flags stats_flags = {0};
  1987. stats_flags.collect_eth = 1;
  1988. storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
  1989. storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
  1990. storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
  1991. storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
  1992. storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
  1993. storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
  1994. storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
  1995. storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
  1996. }
  1997. /* spq */
  1998. if (p->func_flgs & FUNC_FLG_SPQ) {
  1999. storm_memset_spq_addr(bp, p->spq_map, p->func_id);
  2000. REG_WR(bp, XSEM_REG_FAST_MEMORY +
  2001. XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
  2002. }
  2003. }
  2004. static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
  2005. struct bnx2x_fastpath *fp)
  2006. {
  2007. u16 flags = 0;
  2008. /* calculate queue flags */
  2009. flags |= QUEUE_FLG_CACHE_ALIGN;
  2010. flags |= QUEUE_FLG_HC;
  2011. flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
  2012. flags |= QUEUE_FLG_VLAN;
  2013. DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
  2014. if (!fp->disable_tpa)
  2015. flags |= QUEUE_FLG_TPA;
  2016. flags = stat_counter_valid(bp, fp) ?
  2017. (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
  2018. return flags;
  2019. }
  2020. static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
  2021. struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
  2022. struct bnx2x_rxq_init_params *rxq_init)
  2023. {
  2024. u16 max_sge = 0;
  2025. u16 sge_sz = 0;
  2026. u16 tpa_agg_size = 0;
  2027. /* calculate queue flags */
  2028. u16 flags = bnx2x_get_cl_flags(bp, fp);
  2029. if (!fp->disable_tpa) {
  2030. pause->sge_th_hi = 250;
  2031. pause->sge_th_lo = 150;
  2032. tpa_agg_size = min_t(u32,
  2033. (min_t(u32, 8, MAX_SKB_FRAGS) *
  2034. SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
  2035. max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
  2036. SGE_PAGE_SHIFT;
  2037. max_sge = ((max_sge + PAGES_PER_SGE - 1) &
  2038. (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
  2039. sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
  2040. 0xffff);
  2041. }
  2042. /* pause - not for e1 */
  2043. if (!CHIP_IS_E1(bp)) {
  2044. pause->bd_th_hi = 350;
  2045. pause->bd_th_lo = 250;
  2046. pause->rcq_th_hi = 350;
  2047. pause->rcq_th_lo = 250;
  2048. pause->sge_th_hi = 0;
  2049. pause->sge_th_lo = 0;
  2050. pause->pri_map = 1;
  2051. }
  2052. /* rxq setup */
  2053. rxq_init->flags = flags;
  2054. rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
  2055. rxq_init->dscr_map = fp->rx_desc_mapping;
  2056. rxq_init->sge_map = fp->rx_sge_mapping;
  2057. rxq_init->rcq_map = fp->rx_comp_mapping;
  2058. rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
  2059. /* Always use mini-jumbo MTU for FCoE L2 ring */
  2060. if (IS_FCOE_FP(fp))
  2061. rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
  2062. else
  2063. rxq_init->mtu = bp->dev->mtu;
  2064. rxq_init->buf_sz = fp->rx_buf_size;
  2065. rxq_init->cl_qzone_id = fp->cl_qzone_id;
  2066. rxq_init->cl_id = fp->cl_id;
  2067. rxq_init->spcl_id = fp->cl_id;
  2068. rxq_init->stat_id = fp->cl_id;
  2069. rxq_init->tpa_agg_sz = tpa_agg_size;
  2070. rxq_init->sge_buf_sz = sge_sz;
  2071. rxq_init->max_sges_pkt = max_sge;
  2072. rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
  2073. rxq_init->fw_sb_id = fp->fw_sb_id;
  2074. if (IS_FCOE_FP(fp))
  2075. rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
  2076. else
  2077. rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
  2078. rxq_init->cid = HW_CID(bp, fp->cid);
  2079. rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
  2080. }
  2081. static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
  2082. struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
  2083. {
  2084. u16 flags = bnx2x_get_cl_flags(bp, fp);
  2085. txq_init->flags = flags;
  2086. txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
  2087. txq_init->dscr_map = fp->tx_desc_mapping;
  2088. txq_init->stat_id = fp->cl_id;
  2089. txq_init->cid = HW_CID(bp, fp->cid);
  2090. txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
  2091. txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
  2092. txq_init->fw_sb_id = fp->fw_sb_id;
  2093. if (IS_FCOE_FP(fp)) {
  2094. txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
  2095. txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
  2096. }
  2097. txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
  2098. }
  2099. static void bnx2x_pf_init(struct bnx2x *bp)
  2100. {
  2101. struct bnx2x_func_init_params func_init = {0};
  2102. struct bnx2x_rss_params rss = {0};
  2103. struct event_ring_data eq_data = { {0} };
  2104. u16 flags;
  2105. /* pf specific setups */
  2106. if (!CHIP_IS_E1(bp))
  2107. storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
  2108. if (CHIP_IS_E2(bp)) {
  2109. /* reset IGU PF statistics: MSIX + ATTN */
  2110. /* PF */
  2111. REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
  2112. BNX2X_IGU_STAS_MSG_VF_CNT*4 +
  2113. (CHIP_MODE_IS_4_PORT(bp) ?
  2114. BP_FUNC(bp) : BP_VN(bp))*4, 0);
  2115. /* ATTN */
  2116. REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
  2117. BNX2X_IGU_STAS_MSG_VF_CNT*4 +
  2118. BNX2X_IGU_STAS_MSG_PF_CNT*4 +
  2119. (CHIP_MODE_IS_4_PORT(bp) ?
  2120. BP_FUNC(bp) : BP_VN(bp))*4, 0);
  2121. }
  2122. /* function setup flags */
  2123. flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
  2124. if (CHIP_IS_E1x(bp))
  2125. flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
  2126. else
  2127. flags |= FUNC_FLG_TPA;
  2128. /* function setup */
  2129. /**
  2130. * Although RSS is meaningless when there is a single HW queue we
  2131. * still need it enabled in order to have HW Rx hash generated.
  2132. */
  2133. rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
  2134. RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
  2135. rss.mode = bp->multi_mode;
  2136. rss.result_mask = MULTI_MASK;
  2137. func_init.rss = &rss;
  2138. func_init.func_flgs = flags;
  2139. func_init.pf_id = BP_FUNC(bp);
  2140. func_init.func_id = BP_FUNC(bp);
  2141. func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
  2142. func_init.spq_map = bp->spq_mapping;
  2143. func_init.spq_prod = bp->spq_prod_idx;
  2144. bnx2x_func_init(bp, &func_init);
  2145. memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
  2146. /*
  2147. Congestion management values depend on the link rate
  2148. There is no active link so initial link rate is set to 10 Gbps.
  2149. When the link comes up The congestion management values are
  2150. re-calculated according to the actual link rate.
  2151. */
  2152. bp->link_vars.line_speed = SPEED_10000;
  2153. bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
  2154. /* Only the PMF sets the HW */
  2155. if (bp->port.pmf)
  2156. storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
  2157. /* no rx until link is up */
  2158. bp->rx_mode = BNX2X_RX_MODE_NONE;
  2159. bnx2x_set_storm_rx_mode(bp);
  2160. /* init Event Queue */
  2161. eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
  2162. eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
  2163. eq_data.producer = bp->eq_prod;
  2164. eq_data.index_id = HC_SP_INDEX_EQ_CONS;
  2165. eq_data.sb_id = DEF_SB_ID;
  2166. storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
  2167. }
  2168. static void bnx2x_e1h_disable(struct bnx2x *bp)
  2169. {
  2170. int port = BP_PORT(bp);
  2171. netif_tx_disable(bp->dev);
  2172. REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
  2173. netif_carrier_off(bp->dev);
  2174. }
  2175. static void bnx2x_e1h_enable(struct bnx2x *bp)
  2176. {
  2177. int port = BP_PORT(bp);
  2178. REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
  2179. /* Tx queue should be only reenabled */
  2180. netif_tx_wake_all_queues(bp->dev);
  2181. /*
  2182. * Should not call netif_carrier_on since it will be called if the link
  2183. * is up when checking for link state
  2184. */
  2185. }
  2186. /* called due to MCP event (on pmf):
  2187. * reread new bandwidth configuration
  2188. * configure FW
  2189. * notify others function about the change
  2190. */
  2191. static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
  2192. {
  2193. if (bp->link_vars.link_up) {
  2194. bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
  2195. bnx2x_link_sync_notify(bp);
  2196. }
  2197. storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
  2198. }
  2199. static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
  2200. {
  2201. bnx2x_config_mf_bw(bp);
  2202. bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
  2203. }
  2204. static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
  2205. {
  2206. DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
  2207. if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
  2208. /*
  2209. * This is the only place besides the function initialization
  2210. * where the bp->flags can change so it is done without any
  2211. * locks
  2212. */
  2213. if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
  2214. DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
  2215. bp->flags |= MF_FUNC_DIS;
  2216. bnx2x_e1h_disable(bp);
  2217. } else {
  2218. DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
  2219. bp->flags &= ~MF_FUNC_DIS;
  2220. bnx2x_e1h_enable(bp);
  2221. }
  2222. dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
  2223. }
  2224. if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
  2225. bnx2x_config_mf_bw(bp);
  2226. dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
  2227. }
  2228. /* Report results to MCP */
  2229. if (dcc_event)
  2230. bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
  2231. else
  2232. bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
  2233. }
  2234. /* must be called under the spq lock */
  2235. static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
  2236. {
  2237. struct eth_spe *next_spe = bp->spq_prod_bd;
  2238. if (bp->spq_prod_bd == bp->spq_last_bd) {
  2239. bp->spq_prod_bd = bp->spq;
  2240. bp->spq_prod_idx = 0;
  2241. DP(NETIF_MSG_TIMER, "end of spq\n");
  2242. } else {
  2243. bp->spq_prod_bd++;
  2244. bp->spq_prod_idx++;
  2245. }
  2246. return next_spe;
  2247. }
  2248. /* must be called under the spq lock */
  2249. static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
  2250. {
  2251. int func = BP_FUNC(bp);
  2252. /* Make sure that BD data is updated before writing the producer */
  2253. wmb();
  2254. REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
  2255. bp->spq_prod_idx);
  2256. mmiowb();
  2257. }
  2258. /* the slow path queue is odd since completions arrive on the fastpath ring */
  2259. int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
  2260. u32 data_hi, u32 data_lo, int common)
  2261. {
  2262. struct eth_spe *spe;
  2263. u16 type;
  2264. #ifdef BNX2X_STOP_ON_ERROR
  2265. if (unlikely(bp->panic))
  2266. return -EIO;
  2267. #endif
  2268. spin_lock_bh(&bp->spq_lock);
  2269. if (common) {
  2270. if (!atomic_read(&bp->eq_spq_left)) {
  2271. BNX2X_ERR("BUG! EQ ring full!\n");
  2272. spin_unlock_bh(&bp->spq_lock);
  2273. bnx2x_panic();
  2274. return -EBUSY;
  2275. }
  2276. } else if (!atomic_read(&bp->cq_spq_left)) {
  2277. BNX2X_ERR("BUG! SPQ ring full!\n");
  2278. spin_unlock_bh(&bp->spq_lock);
  2279. bnx2x_panic();
  2280. return -EBUSY;
  2281. }
  2282. spe = bnx2x_sp_get_next(bp);
  2283. /* CID needs port number to be encoded int it */
  2284. spe->hdr.conn_and_cmd_data =
  2285. cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
  2286. HW_CID(bp, cid));
  2287. if (common)
  2288. /* Common ramrods:
  2289. * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
  2290. * TRAFFIC_STOP, TRAFFIC_START
  2291. */
  2292. type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
  2293. & SPE_HDR_CONN_TYPE;
  2294. else
  2295. /* ETH ramrods: SETUP, HALT */
  2296. type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
  2297. & SPE_HDR_CONN_TYPE;
  2298. type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
  2299. SPE_HDR_FUNCTION_ID);
  2300. spe->hdr.type = cpu_to_le16(type);
  2301. spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
  2302. spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
  2303. /* stats ramrod has it's own slot on the spq */
  2304. if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
  2305. /* It's ok if the actual decrement is issued towards the memory
  2306. * somewhere between the spin_lock and spin_unlock. Thus no
  2307. * more explict memory barrier is needed.
  2308. */
  2309. if (common)
  2310. atomic_dec(&bp->eq_spq_left);
  2311. else
  2312. atomic_dec(&bp->cq_spq_left);
  2313. }
  2314. DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
  2315. "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
  2316. "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
  2317. bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
  2318. (u32)(U64_LO(bp->spq_mapping) +
  2319. (void *)bp->spq_prod_bd - (void *)bp->spq), command,
  2320. HW_CID(bp, cid), data_hi, data_lo, type,
  2321. atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
  2322. bnx2x_sp_prod_update(bp);
  2323. spin_unlock_bh(&bp->spq_lock);
  2324. return 0;
  2325. }
  2326. /* acquire split MCP access lock register */
  2327. static int bnx2x_acquire_alr(struct bnx2x *bp)
  2328. {
  2329. u32 j, val;
  2330. int rc = 0;
  2331. might_sleep();
  2332. for (j = 0; j < 1000; j++) {
  2333. val = (1UL << 31);
  2334. REG_WR(bp, GRCBASE_MCP + 0x9c, val);
  2335. val = REG_RD(bp, GRCBASE_MCP + 0x9c);
  2336. if (val & (1L << 31))
  2337. break;
  2338. msleep(5);
  2339. }
  2340. if (!(val & (1L << 31))) {
  2341. BNX2X_ERR("Cannot acquire MCP access lock register\n");
  2342. rc = -EBUSY;
  2343. }
  2344. return rc;
  2345. }
  2346. /* release split MCP access lock register */
  2347. static void bnx2x_release_alr(struct bnx2x *bp)
  2348. {
  2349. REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
  2350. }
  2351. #define BNX2X_DEF_SB_ATT_IDX 0x0001
  2352. #define BNX2X_DEF_SB_IDX 0x0002
  2353. static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
  2354. {
  2355. struct host_sp_status_block *def_sb = bp->def_status_blk;
  2356. u16 rc = 0;
  2357. barrier(); /* status block is written to by the chip */
  2358. if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
  2359. bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
  2360. rc |= BNX2X_DEF_SB_ATT_IDX;
  2361. }
  2362. if (bp->def_idx != def_sb->sp_sb.running_index) {
  2363. bp->def_idx = def_sb->sp_sb.running_index;
  2364. rc |= BNX2X_DEF_SB_IDX;
  2365. }
  2366. /* Do not reorder: indecies reading should complete before handling */
  2367. barrier();
  2368. return rc;
  2369. }
  2370. /*
  2371. * slow path service functions
  2372. */
  2373. static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
  2374. {
  2375. int port = BP_PORT(bp);
  2376. u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
  2377. MISC_REG_AEU_MASK_ATTN_FUNC_0;
  2378. u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
  2379. NIG_REG_MASK_INTERRUPT_PORT0;
  2380. u32 aeu_mask;
  2381. u32 nig_mask = 0;
  2382. u32 reg_addr;
  2383. if (bp->attn_state & asserted)
  2384. BNX2X_ERR("IGU ERROR\n");
  2385. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
  2386. aeu_mask = REG_RD(bp, aeu_addr);
  2387. DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
  2388. aeu_mask, asserted);
  2389. aeu_mask &= ~(asserted & 0x3ff);
  2390. DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
  2391. REG_WR(bp, aeu_addr, aeu_mask);
  2392. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
  2393. DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
  2394. bp->attn_state |= asserted;
  2395. DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
  2396. if (asserted & ATTN_HARD_WIRED_MASK) {
  2397. if (asserted & ATTN_NIG_FOR_FUNC) {
  2398. bnx2x_acquire_phy_lock(bp);
  2399. /* save nig interrupt mask */
  2400. nig_mask = REG_RD(bp, nig_int_mask_addr);
  2401. REG_WR(bp, nig_int_mask_addr, 0);
  2402. bnx2x_link_attn(bp);
  2403. /* handle unicore attn? */
  2404. }
  2405. if (asserted & ATTN_SW_TIMER_4_FUNC)
  2406. DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
  2407. if (asserted & GPIO_2_FUNC)
  2408. DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
  2409. if (asserted & GPIO_3_FUNC)
  2410. DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
  2411. if (asserted & GPIO_4_FUNC)
  2412. DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
  2413. if (port == 0) {
  2414. if (asserted & ATTN_GENERAL_ATTN_1) {
  2415. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
  2416. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
  2417. }
  2418. if (asserted & ATTN_GENERAL_ATTN_2) {
  2419. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
  2420. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
  2421. }
  2422. if (asserted & ATTN_GENERAL_ATTN_3) {
  2423. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
  2424. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
  2425. }
  2426. } else {
  2427. if (asserted & ATTN_GENERAL_ATTN_4) {
  2428. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
  2429. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
  2430. }
  2431. if (asserted & ATTN_GENERAL_ATTN_5) {
  2432. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
  2433. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
  2434. }
  2435. if (asserted & ATTN_GENERAL_ATTN_6) {
  2436. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
  2437. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
  2438. }
  2439. }
  2440. } /* if hardwired */
  2441. if (bp->common.int_block == INT_BLOCK_HC)
  2442. reg_addr = (HC_REG_COMMAND_REG + port*32 +
  2443. COMMAND_REG_ATTN_BITS_SET);
  2444. else
  2445. reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
  2446. DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
  2447. (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
  2448. REG_WR(bp, reg_addr, asserted);
  2449. /* now set back the mask */
  2450. if (asserted & ATTN_NIG_FOR_FUNC) {
  2451. REG_WR(bp, nig_int_mask_addr, nig_mask);
  2452. bnx2x_release_phy_lock(bp);
  2453. }
  2454. }
  2455. static inline void bnx2x_fan_failure(struct bnx2x *bp)
  2456. {
  2457. int port = BP_PORT(bp);
  2458. u32 ext_phy_config;
  2459. /* mark the failure */
  2460. ext_phy_config =
  2461. SHMEM_RD(bp,
  2462. dev_info.port_hw_config[port].external_phy_config);
  2463. ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
  2464. ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
  2465. SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
  2466. ext_phy_config);
  2467. /* log the failure */
  2468. netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
  2469. " the driver to shutdown the card to prevent permanent"
  2470. " damage. Please contact OEM Support for assistance\n");
  2471. }
  2472. static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
  2473. {
  2474. int port = BP_PORT(bp);
  2475. int reg_offset;
  2476. u32 val;
  2477. reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
  2478. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
  2479. if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
  2480. val = REG_RD(bp, reg_offset);
  2481. val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
  2482. REG_WR(bp, reg_offset, val);
  2483. BNX2X_ERR("SPIO5 hw attention\n");
  2484. /* Fan failure attention */
  2485. bnx2x_hw_reset_phy(&bp->link_params);
  2486. bnx2x_fan_failure(bp);
  2487. }
  2488. if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
  2489. AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
  2490. bnx2x_acquire_phy_lock(bp);
  2491. bnx2x_handle_module_detect_int(&bp->link_params);
  2492. bnx2x_release_phy_lock(bp);
  2493. }
  2494. if (attn & HW_INTERRUT_ASSERT_SET_0) {
  2495. val = REG_RD(bp, reg_offset);
  2496. val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
  2497. REG_WR(bp, reg_offset, val);
  2498. BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
  2499. (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
  2500. bnx2x_panic();
  2501. }
  2502. }
  2503. static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
  2504. {
  2505. u32 val;
  2506. if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
  2507. val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
  2508. BNX2X_ERR("DB hw attention 0x%x\n", val);
  2509. /* DORQ discard attention */
  2510. if (val & 0x2)
  2511. BNX2X_ERR("FATAL error from DORQ\n");
  2512. }
  2513. if (attn & HW_INTERRUT_ASSERT_SET_1) {
  2514. int port = BP_PORT(bp);
  2515. int reg_offset;
  2516. reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
  2517. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
  2518. val = REG_RD(bp, reg_offset);
  2519. val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
  2520. REG_WR(bp, reg_offset, val);
  2521. BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
  2522. (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
  2523. bnx2x_panic();
  2524. }
  2525. }
  2526. static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
  2527. {
  2528. u32 val;
  2529. if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
  2530. val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
  2531. BNX2X_ERR("CFC hw attention 0x%x\n", val);
  2532. /* CFC error attention */
  2533. if (val & 0x2)
  2534. BNX2X_ERR("FATAL error from CFC\n");
  2535. }
  2536. if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
  2537. val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
  2538. BNX2X_ERR("PXP hw attention 0x%x\n", val);
  2539. /* RQ_USDMDP_FIFO_OVERFLOW */
  2540. if (val & 0x18000)
  2541. BNX2X_ERR("FATAL error from PXP\n");
  2542. if (CHIP_IS_E2(bp)) {
  2543. val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
  2544. BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
  2545. }
  2546. }
  2547. if (attn & HW_INTERRUT_ASSERT_SET_2) {
  2548. int port = BP_PORT(bp);
  2549. int reg_offset;
  2550. reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
  2551. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
  2552. val = REG_RD(bp, reg_offset);
  2553. val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
  2554. REG_WR(bp, reg_offset, val);
  2555. BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
  2556. (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
  2557. bnx2x_panic();
  2558. }
  2559. }
  2560. static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
  2561. {
  2562. u32 val;
  2563. if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
  2564. if (attn & BNX2X_PMF_LINK_ASSERT) {
  2565. int func = BP_FUNC(bp);
  2566. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
  2567. bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
  2568. func_mf_config[BP_ABS_FUNC(bp)].config);
  2569. val = SHMEM_RD(bp,
  2570. func_mb[BP_FW_MB_IDX(bp)].drv_status);
  2571. if (val & DRV_STATUS_DCC_EVENT_MASK)
  2572. bnx2x_dcc_event(bp,
  2573. (val & DRV_STATUS_DCC_EVENT_MASK));
  2574. if (val & DRV_STATUS_SET_MF_BW)
  2575. bnx2x_set_mf_bw(bp);
  2576. bnx2x__link_status_update(bp);
  2577. if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
  2578. bnx2x_pmf_update(bp);
  2579. if (bp->port.pmf &&
  2580. (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
  2581. bp->dcbx_enabled > 0)
  2582. /* start dcbx state machine */
  2583. bnx2x_dcbx_set_params(bp,
  2584. BNX2X_DCBX_STATE_NEG_RECEIVED);
  2585. } else if (attn & BNX2X_MC_ASSERT_BITS) {
  2586. BNX2X_ERR("MC assert!\n");
  2587. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
  2588. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
  2589. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
  2590. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
  2591. bnx2x_panic();
  2592. } else if (attn & BNX2X_MCP_ASSERT) {
  2593. BNX2X_ERR("MCP assert!\n");
  2594. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
  2595. bnx2x_fw_dump(bp);
  2596. } else
  2597. BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
  2598. }
  2599. if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
  2600. BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
  2601. if (attn & BNX2X_GRC_TIMEOUT) {
  2602. val = CHIP_IS_E1(bp) ? 0 :
  2603. REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
  2604. BNX2X_ERR("GRC time-out 0x%08x\n", val);
  2605. }
  2606. if (attn & BNX2X_GRC_RSV) {
  2607. val = CHIP_IS_E1(bp) ? 0 :
  2608. REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
  2609. BNX2X_ERR("GRC reserved 0x%08x\n", val);
  2610. }
  2611. REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
  2612. }
  2613. }
  2614. #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
  2615. #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
  2616. #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
  2617. #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
  2618. #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
  2619. /*
  2620. * should be run under rtnl lock
  2621. */
  2622. static inline void bnx2x_set_reset_done(struct bnx2x *bp)
  2623. {
  2624. u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2625. val &= ~(1 << RESET_DONE_FLAG_SHIFT);
  2626. REG_WR(bp, BNX2X_MISC_GEN_REG, val);
  2627. barrier();
  2628. mmiowb();
  2629. }
  2630. /*
  2631. * should be run under rtnl lock
  2632. */
  2633. static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
  2634. {
  2635. u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2636. val |= (1 << 16);
  2637. REG_WR(bp, BNX2X_MISC_GEN_REG, val);
  2638. barrier();
  2639. mmiowb();
  2640. }
  2641. /*
  2642. * should be run under rtnl lock
  2643. */
  2644. bool bnx2x_reset_is_done(struct bnx2x *bp)
  2645. {
  2646. u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2647. DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
  2648. return (val & RESET_DONE_FLAG_MASK) ? false : true;
  2649. }
  2650. /*
  2651. * should be run under rtnl lock
  2652. */
  2653. inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
  2654. {
  2655. u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2656. DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
  2657. val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
  2658. REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
  2659. barrier();
  2660. mmiowb();
  2661. }
  2662. /*
  2663. * should be run under rtnl lock
  2664. */
  2665. u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
  2666. {
  2667. u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2668. DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
  2669. val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
  2670. REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
  2671. barrier();
  2672. mmiowb();
  2673. return val1;
  2674. }
  2675. /*
  2676. * should be run under rtnl lock
  2677. */
  2678. static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
  2679. {
  2680. return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
  2681. }
  2682. static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
  2683. {
  2684. u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2685. REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
  2686. }
  2687. static inline void _print_next_block(int idx, const char *blk)
  2688. {
  2689. if (idx)
  2690. pr_cont(", ");
  2691. pr_cont("%s", blk);
  2692. }
  2693. static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
  2694. {
  2695. int i = 0;
  2696. u32 cur_bit = 0;
  2697. for (i = 0; sig; i++) {
  2698. cur_bit = ((u32)0x1 << i);
  2699. if (sig & cur_bit) {
  2700. switch (cur_bit) {
  2701. case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
  2702. _print_next_block(par_num++, "BRB");
  2703. break;
  2704. case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
  2705. _print_next_block(par_num++, "PARSER");
  2706. break;
  2707. case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
  2708. _print_next_block(par_num++, "TSDM");
  2709. break;
  2710. case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
  2711. _print_next_block(par_num++, "SEARCHER");
  2712. break;
  2713. case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
  2714. _print_next_block(par_num++, "TSEMI");
  2715. break;
  2716. }
  2717. /* Clear the bit */
  2718. sig &= ~cur_bit;
  2719. }
  2720. }
  2721. return par_num;
  2722. }
  2723. static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
  2724. {
  2725. int i = 0;
  2726. u32 cur_bit = 0;
  2727. for (i = 0; sig; i++) {
  2728. cur_bit = ((u32)0x1 << i);
  2729. if (sig & cur_bit) {
  2730. switch (cur_bit) {
  2731. case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
  2732. _print_next_block(par_num++, "PBCLIENT");
  2733. break;
  2734. case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
  2735. _print_next_block(par_num++, "QM");
  2736. break;
  2737. case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
  2738. _print_next_block(par_num++, "XSDM");
  2739. break;
  2740. case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
  2741. _print_next_block(par_num++, "XSEMI");
  2742. break;
  2743. case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
  2744. _print_next_block(par_num++, "DOORBELLQ");
  2745. break;
  2746. case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
  2747. _print_next_block(par_num++, "VAUX PCI CORE");
  2748. break;
  2749. case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
  2750. _print_next_block(par_num++, "DEBUG");
  2751. break;
  2752. case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
  2753. _print_next_block(par_num++, "USDM");
  2754. break;
  2755. case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
  2756. _print_next_block(par_num++, "USEMI");
  2757. break;
  2758. case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
  2759. _print_next_block(par_num++, "UPB");
  2760. break;
  2761. case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
  2762. _print_next_block(par_num++, "CSDM");
  2763. break;
  2764. }
  2765. /* Clear the bit */
  2766. sig &= ~cur_bit;
  2767. }
  2768. }
  2769. return par_num;
  2770. }
  2771. static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
  2772. {
  2773. int i = 0;
  2774. u32 cur_bit = 0;
  2775. for (i = 0; sig; i++) {
  2776. cur_bit = ((u32)0x1 << i);
  2777. if (sig & cur_bit) {
  2778. switch (cur_bit) {
  2779. case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
  2780. _print_next_block(par_num++, "CSEMI");
  2781. break;
  2782. case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
  2783. _print_next_block(par_num++, "PXP");
  2784. break;
  2785. case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
  2786. _print_next_block(par_num++,
  2787. "PXPPCICLOCKCLIENT");
  2788. break;
  2789. case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
  2790. _print_next_block(par_num++, "CFC");
  2791. break;
  2792. case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
  2793. _print_next_block(par_num++, "CDU");
  2794. break;
  2795. case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
  2796. _print_next_block(par_num++, "IGU");
  2797. break;
  2798. case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
  2799. _print_next_block(par_num++, "MISC");
  2800. break;
  2801. }
  2802. /* Clear the bit */
  2803. sig &= ~cur_bit;
  2804. }
  2805. }
  2806. return par_num;
  2807. }
  2808. static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
  2809. {
  2810. int i = 0;
  2811. u32 cur_bit = 0;
  2812. for (i = 0; sig; i++) {
  2813. cur_bit = ((u32)0x1 << i);
  2814. if (sig & cur_bit) {
  2815. switch (cur_bit) {
  2816. case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
  2817. _print_next_block(par_num++, "MCP ROM");
  2818. break;
  2819. case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
  2820. _print_next_block(par_num++, "MCP UMP RX");
  2821. break;
  2822. case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
  2823. _print_next_block(par_num++, "MCP UMP TX");
  2824. break;
  2825. case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
  2826. _print_next_block(par_num++, "MCP SCPAD");
  2827. break;
  2828. }
  2829. /* Clear the bit */
  2830. sig &= ~cur_bit;
  2831. }
  2832. }
  2833. return par_num;
  2834. }
  2835. static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
  2836. u32 sig2, u32 sig3)
  2837. {
  2838. if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
  2839. (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
  2840. int par_num = 0;
  2841. DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
  2842. "[0]:0x%08x [1]:0x%08x "
  2843. "[2]:0x%08x [3]:0x%08x\n",
  2844. sig0 & HW_PRTY_ASSERT_SET_0,
  2845. sig1 & HW_PRTY_ASSERT_SET_1,
  2846. sig2 & HW_PRTY_ASSERT_SET_2,
  2847. sig3 & HW_PRTY_ASSERT_SET_3);
  2848. printk(KERN_ERR"%s: Parity errors detected in blocks: ",
  2849. bp->dev->name);
  2850. par_num = bnx2x_print_blocks_with_parity0(
  2851. sig0 & HW_PRTY_ASSERT_SET_0, par_num);
  2852. par_num = bnx2x_print_blocks_with_parity1(
  2853. sig1 & HW_PRTY_ASSERT_SET_1, par_num);
  2854. par_num = bnx2x_print_blocks_with_parity2(
  2855. sig2 & HW_PRTY_ASSERT_SET_2, par_num);
  2856. par_num = bnx2x_print_blocks_with_parity3(
  2857. sig3 & HW_PRTY_ASSERT_SET_3, par_num);
  2858. printk("\n");
  2859. return true;
  2860. } else
  2861. return false;
  2862. }
  2863. bool bnx2x_chk_parity_attn(struct bnx2x *bp)
  2864. {
  2865. struct attn_route attn;
  2866. int port = BP_PORT(bp);
  2867. attn.sig[0] = REG_RD(bp,
  2868. MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
  2869. port*4);
  2870. attn.sig[1] = REG_RD(bp,
  2871. MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
  2872. port*4);
  2873. attn.sig[2] = REG_RD(bp,
  2874. MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
  2875. port*4);
  2876. attn.sig[3] = REG_RD(bp,
  2877. MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
  2878. port*4);
  2879. return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
  2880. attn.sig[3]);
  2881. }
  2882. static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
  2883. {
  2884. u32 val;
  2885. if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
  2886. val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
  2887. BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
  2888. if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
  2889. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2890. "ADDRESS_ERROR\n");
  2891. if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
  2892. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2893. "INCORRECT_RCV_BEHAVIOR\n");
  2894. if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
  2895. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2896. "WAS_ERROR_ATTN\n");
  2897. if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
  2898. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2899. "VF_LENGTH_VIOLATION_ATTN\n");
  2900. if (val &
  2901. PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
  2902. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2903. "VF_GRC_SPACE_VIOLATION_ATTN\n");
  2904. if (val &
  2905. PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
  2906. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2907. "VF_MSIX_BAR_VIOLATION_ATTN\n");
  2908. if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
  2909. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2910. "TCPL_ERROR_ATTN\n");
  2911. if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
  2912. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2913. "TCPL_IN_TWO_RCBS_ATTN\n");
  2914. if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
  2915. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2916. "CSSNOOP_FIFO_OVERFLOW\n");
  2917. }
  2918. if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
  2919. val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
  2920. BNX2X_ERR("ATC hw attention 0x%x\n", val);
  2921. if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
  2922. BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
  2923. if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
  2924. BNX2X_ERR("ATC_ATC_INT_STS_REG"
  2925. "_ATC_TCPL_TO_NOT_PEND\n");
  2926. if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
  2927. BNX2X_ERR("ATC_ATC_INT_STS_REG_"
  2928. "ATC_GPA_MULTIPLE_HITS\n");
  2929. if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
  2930. BNX2X_ERR("ATC_ATC_INT_STS_REG_"
  2931. "ATC_RCPL_TO_EMPTY_CNT\n");
  2932. if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
  2933. BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
  2934. if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
  2935. BNX2X_ERR("ATC_ATC_INT_STS_REG_"
  2936. "ATC_IREQ_LESS_THAN_STU\n");
  2937. }
  2938. if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
  2939. AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
  2940. BNX2X_ERR("FATAL parity attention set4 0x%x\n",
  2941. (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
  2942. AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
  2943. }
  2944. }
  2945. static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
  2946. {
  2947. struct attn_route attn, *group_mask;
  2948. int port = BP_PORT(bp);
  2949. int index;
  2950. u32 reg_addr;
  2951. u32 val;
  2952. u32 aeu_mask;
  2953. /* need to take HW lock because MCP or other port might also
  2954. try to handle this event */
  2955. bnx2x_acquire_alr(bp);
  2956. if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
  2957. bp->recovery_state = BNX2X_RECOVERY_INIT;
  2958. bnx2x_set_reset_in_progress(bp);
  2959. schedule_delayed_work(&bp->reset_task, 0);
  2960. /* Disable HW interrupts */
  2961. bnx2x_int_disable(bp);
  2962. bnx2x_release_alr(bp);
  2963. /* In case of parity errors don't handle attentions so that
  2964. * other function would "see" parity errors.
  2965. */
  2966. return;
  2967. }
  2968. attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
  2969. attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
  2970. attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
  2971. attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
  2972. if (CHIP_IS_E2(bp))
  2973. attn.sig[4] =
  2974. REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
  2975. else
  2976. attn.sig[4] = 0;
  2977. DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
  2978. attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
  2979. for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
  2980. if (deasserted & (1 << index)) {
  2981. group_mask = &bp->attn_group[index];
  2982. DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
  2983. "%08x %08x %08x\n",
  2984. index,
  2985. group_mask->sig[0], group_mask->sig[1],
  2986. group_mask->sig[2], group_mask->sig[3],
  2987. group_mask->sig[4]);
  2988. bnx2x_attn_int_deasserted4(bp,
  2989. attn.sig[4] & group_mask->sig[4]);
  2990. bnx2x_attn_int_deasserted3(bp,
  2991. attn.sig[3] & group_mask->sig[3]);
  2992. bnx2x_attn_int_deasserted1(bp,
  2993. attn.sig[1] & group_mask->sig[1]);
  2994. bnx2x_attn_int_deasserted2(bp,
  2995. attn.sig[2] & group_mask->sig[2]);
  2996. bnx2x_attn_int_deasserted0(bp,
  2997. attn.sig[0] & group_mask->sig[0]);
  2998. }
  2999. }
  3000. bnx2x_release_alr(bp);
  3001. if (bp->common.int_block == INT_BLOCK_HC)
  3002. reg_addr = (HC_REG_COMMAND_REG + port*32 +
  3003. COMMAND_REG_ATTN_BITS_CLR);
  3004. else
  3005. reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
  3006. val = ~deasserted;
  3007. DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
  3008. (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
  3009. REG_WR(bp, reg_addr, val);
  3010. if (~bp->attn_state & deasserted)
  3011. BNX2X_ERR("IGU ERROR\n");
  3012. reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
  3013. MISC_REG_AEU_MASK_ATTN_FUNC_0;
  3014. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
  3015. aeu_mask = REG_RD(bp, reg_addr);
  3016. DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
  3017. aeu_mask, deasserted);
  3018. aeu_mask |= (deasserted & 0x3ff);
  3019. DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
  3020. REG_WR(bp, reg_addr, aeu_mask);
  3021. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
  3022. DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
  3023. bp->attn_state &= ~deasserted;
  3024. DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
  3025. }
  3026. static void bnx2x_attn_int(struct bnx2x *bp)
  3027. {
  3028. /* read local copy of bits */
  3029. u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
  3030. attn_bits);
  3031. u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
  3032. attn_bits_ack);
  3033. u32 attn_state = bp->attn_state;
  3034. /* look for changed bits */
  3035. u32 asserted = attn_bits & ~attn_ack & ~attn_state;
  3036. u32 deasserted = ~attn_bits & attn_ack & attn_state;
  3037. DP(NETIF_MSG_HW,
  3038. "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
  3039. attn_bits, attn_ack, asserted, deasserted);
  3040. if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
  3041. BNX2X_ERR("BAD attention state\n");
  3042. /* handle bits that were raised */
  3043. if (asserted)
  3044. bnx2x_attn_int_asserted(bp, asserted);
  3045. if (deasserted)
  3046. bnx2x_attn_int_deasserted(bp, deasserted);
  3047. }
  3048. static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
  3049. {
  3050. /* No memory barriers */
  3051. storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
  3052. mmiowb(); /* keep prod updates ordered */
  3053. }
  3054. #ifdef BCM_CNIC
  3055. static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
  3056. union event_ring_elem *elem)
  3057. {
  3058. if (!bp->cnic_eth_dev.starting_cid ||
  3059. cid < bp->cnic_eth_dev.starting_cid)
  3060. return 1;
  3061. DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
  3062. if (unlikely(elem->message.data.cfc_del_event.error)) {
  3063. BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
  3064. cid);
  3065. bnx2x_panic_dump(bp);
  3066. }
  3067. bnx2x_cnic_cfc_comp(bp, cid);
  3068. return 0;
  3069. }
  3070. #endif
  3071. static void bnx2x_eq_int(struct bnx2x *bp)
  3072. {
  3073. u16 hw_cons, sw_cons, sw_prod;
  3074. union event_ring_elem *elem;
  3075. u32 cid;
  3076. u8 opcode;
  3077. int spqe_cnt = 0;
  3078. hw_cons = le16_to_cpu(*bp->eq_cons_sb);
  3079. /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
  3080. * when we get the the next-page we nned to adjust so the loop
  3081. * condition below will be met. The next element is the size of a
  3082. * regular element and hence incrementing by 1
  3083. */
  3084. if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
  3085. hw_cons++;
  3086. /* This function may never run in parralel with itself for a
  3087. * specific bp, thus there is no need in "paired" read memory
  3088. * barrier here.
  3089. */
  3090. sw_cons = bp->eq_cons;
  3091. sw_prod = bp->eq_prod;
  3092. DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
  3093. hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
  3094. for (; sw_cons != hw_cons;
  3095. sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
  3096. elem = &bp->eq_ring[EQ_DESC(sw_cons)];
  3097. cid = SW_CID(elem->message.data.cfc_del_event.cid);
  3098. opcode = elem->message.opcode;
  3099. /* handle eq element */
  3100. switch (opcode) {
  3101. case EVENT_RING_OPCODE_STAT_QUERY:
  3102. DP(NETIF_MSG_TIMER, "got statistics comp event\n");
  3103. /* nothing to do with stats comp */
  3104. continue;
  3105. case EVENT_RING_OPCODE_CFC_DEL:
  3106. /* handle according to cid range */
  3107. /*
  3108. * we may want to verify here that the bp state is
  3109. * HALTING
  3110. */
  3111. DP(NETIF_MSG_IFDOWN,
  3112. "got delete ramrod for MULTI[%d]\n", cid);
  3113. #ifdef BCM_CNIC
  3114. if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
  3115. goto next_spqe;
  3116. if (cid == BNX2X_FCOE_ETH_CID)
  3117. bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
  3118. else
  3119. #endif
  3120. bnx2x_fp(bp, cid, state) =
  3121. BNX2X_FP_STATE_CLOSED;
  3122. goto next_spqe;
  3123. case EVENT_RING_OPCODE_STOP_TRAFFIC:
  3124. DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
  3125. bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
  3126. goto next_spqe;
  3127. case EVENT_RING_OPCODE_START_TRAFFIC:
  3128. DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
  3129. bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
  3130. goto next_spqe;
  3131. }
  3132. switch (opcode | bp->state) {
  3133. case (EVENT_RING_OPCODE_FUNCTION_START |
  3134. BNX2X_STATE_OPENING_WAIT4_PORT):
  3135. DP(NETIF_MSG_IFUP, "got setup ramrod\n");
  3136. bp->state = BNX2X_STATE_FUNC_STARTED;
  3137. break;
  3138. case (EVENT_RING_OPCODE_FUNCTION_STOP |
  3139. BNX2X_STATE_CLOSING_WAIT4_HALT):
  3140. DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
  3141. bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
  3142. break;
  3143. case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
  3144. case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
  3145. DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
  3146. if (elem->message.data.set_mac_event.echo)
  3147. bp->set_mac_pending = 0;
  3148. break;
  3149. case (EVENT_RING_OPCODE_SET_MAC |
  3150. BNX2X_STATE_CLOSING_WAIT4_HALT):
  3151. DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
  3152. if (elem->message.data.set_mac_event.echo)
  3153. bp->set_mac_pending = 0;
  3154. break;
  3155. default:
  3156. /* unknown event log error and continue */
  3157. BNX2X_ERR("Unknown EQ event %d\n",
  3158. elem->message.opcode);
  3159. }
  3160. next_spqe:
  3161. spqe_cnt++;
  3162. } /* for */
  3163. smp_mb__before_atomic_inc();
  3164. atomic_add(spqe_cnt, &bp->eq_spq_left);
  3165. bp->eq_cons = sw_cons;
  3166. bp->eq_prod = sw_prod;
  3167. /* Make sure that above mem writes were issued towards the memory */
  3168. smp_wmb();
  3169. /* update producer */
  3170. bnx2x_update_eq_prod(bp, bp->eq_prod);
  3171. }
  3172. static void bnx2x_sp_task(struct work_struct *work)
  3173. {
  3174. struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
  3175. u16 status;
  3176. /* Return here if interrupt is disabled */
  3177. if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
  3178. DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
  3179. return;
  3180. }
  3181. status = bnx2x_update_dsb_idx(bp);
  3182. /* if (status == 0) */
  3183. /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
  3184. DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
  3185. /* HW attentions */
  3186. if (status & BNX2X_DEF_SB_ATT_IDX) {
  3187. bnx2x_attn_int(bp);
  3188. status &= ~BNX2X_DEF_SB_ATT_IDX;
  3189. }
  3190. /* SP events: STAT_QUERY and others */
  3191. if (status & BNX2X_DEF_SB_IDX) {
  3192. #ifdef BCM_CNIC
  3193. struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
  3194. if ((!NO_FCOE(bp)) &&
  3195. (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
  3196. napi_schedule(&bnx2x_fcoe(bp, napi));
  3197. #endif
  3198. /* Handle EQ completions */
  3199. bnx2x_eq_int(bp);
  3200. bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
  3201. le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
  3202. status &= ~BNX2X_DEF_SB_IDX;
  3203. }
  3204. if (unlikely(status))
  3205. DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
  3206. status);
  3207. bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
  3208. le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
  3209. }
  3210. irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
  3211. {
  3212. struct net_device *dev = dev_instance;
  3213. struct bnx2x *bp = netdev_priv(dev);
  3214. /* Return here if interrupt is disabled */
  3215. if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
  3216. DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
  3217. return IRQ_HANDLED;
  3218. }
  3219. bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
  3220. IGU_INT_DISABLE, 0);
  3221. #ifdef BNX2X_STOP_ON_ERROR
  3222. if (unlikely(bp->panic))
  3223. return IRQ_HANDLED;
  3224. #endif
  3225. #ifdef BCM_CNIC
  3226. {
  3227. struct cnic_ops *c_ops;
  3228. rcu_read_lock();
  3229. c_ops = rcu_dereference(bp->cnic_ops);
  3230. if (c_ops)
  3231. c_ops->cnic_handler(bp->cnic_data, NULL);
  3232. rcu_read_unlock();
  3233. }
  3234. #endif
  3235. queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
  3236. return IRQ_HANDLED;
  3237. }
  3238. /* end of slow path */
  3239. static void bnx2x_timer(unsigned long data)
  3240. {
  3241. struct bnx2x *bp = (struct bnx2x *) data;
  3242. if (!netif_running(bp->dev))
  3243. return;
  3244. if (atomic_read(&bp->intr_sem) != 0)
  3245. goto timer_restart;
  3246. if (poll) {
  3247. struct bnx2x_fastpath *fp = &bp->fp[0];
  3248. int rc;
  3249. bnx2x_tx_int(fp);
  3250. rc = bnx2x_rx_int(fp, 1000);
  3251. }
  3252. if (!BP_NOMCP(bp)) {
  3253. int mb_idx = BP_FW_MB_IDX(bp);
  3254. u32 drv_pulse;
  3255. u32 mcp_pulse;
  3256. ++bp->fw_drv_pulse_wr_seq;
  3257. bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
  3258. /* TBD - add SYSTEM_TIME */
  3259. drv_pulse = bp->fw_drv_pulse_wr_seq;
  3260. SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
  3261. mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
  3262. MCP_PULSE_SEQ_MASK);
  3263. /* The delta between driver pulse and mcp response
  3264. * should be 1 (before mcp response) or 0 (after mcp response)
  3265. */
  3266. if ((drv_pulse != mcp_pulse) &&
  3267. (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
  3268. /* someone lost a heartbeat... */
  3269. BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
  3270. drv_pulse, mcp_pulse);
  3271. }
  3272. }
  3273. if (bp->state == BNX2X_STATE_OPEN)
  3274. bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
  3275. timer_restart:
  3276. mod_timer(&bp->timer, jiffies + bp->current_interval);
  3277. }
  3278. /* end of Statistics */
  3279. /* nic init */
  3280. /*
  3281. * nic init service functions
  3282. */
  3283. static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
  3284. {
  3285. u32 i;
  3286. if (!(len%4) && !(addr%4))
  3287. for (i = 0; i < len; i += 4)
  3288. REG_WR(bp, addr + i, fill);
  3289. else
  3290. for (i = 0; i < len; i++)
  3291. REG_WR8(bp, addr + i, fill);
  3292. }
  3293. /* helper: writes FP SP data to FW - data_size in dwords */
  3294. static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
  3295. int fw_sb_id,
  3296. u32 *sb_data_p,
  3297. u32 data_size)
  3298. {
  3299. int index;
  3300. for (index = 0; index < data_size; index++)
  3301. REG_WR(bp, BAR_CSTRORM_INTMEM +
  3302. CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
  3303. sizeof(u32)*index,
  3304. *(sb_data_p + index));
  3305. }
  3306. static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
  3307. {
  3308. u32 *sb_data_p;
  3309. u32 data_size = 0;
  3310. struct hc_status_block_data_e2 sb_data_e2;
  3311. struct hc_status_block_data_e1x sb_data_e1x;
  3312. /* disable the function first */
  3313. if (CHIP_IS_E2(bp)) {
  3314. memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
  3315. sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
  3316. sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
  3317. sb_data_e2.common.p_func.vf_valid = false;
  3318. sb_data_p = (u32 *)&sb_data_e2;
  3319. data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
  3320. } else {
  3321. memset(&sb_data_e1x, 0,
  3322. sizeof(struct hc_status_block_data_e1x));
  3323. sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
  3324. sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
  3325. sb_data_e1x.common.p_func.vf_valid = false;
  3326. sb_data_p = (u32 *)&sb_data_e1x;
  3327. data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
  3328. }
  3329. bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
  3330. bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
  3331. CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
  3332. CSTORM_STATUS_BLOCK_SIZE);
  3333. bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
  3334. CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
  3335. CSTORM_SYNC_BLOCK_SIZE);
  3336. }
  3337. /* helper: writes SP SB data to FW */
  3338. static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
  3339. struct hc_sp_status_block_data *sp_sb_data)
  3340. {
  3341. int func = BP_FUNC(bp);
  3342. int i;
  3343. for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
  3344. REG_WR(bp, BAR_CSTRORM_INTMEM +
  3345. CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
  3346. i*sizeof(u32),
  3347. *((u32 *)sp_sb_data + i));
  3348. }
  3349. static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
  3350. {
  3351. int func = BP_FUNC(bp);
  3352. struct hc_sp_status_block_data sp_sb_data;
  3353. memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
  3354. sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
  3355. sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
  3356. sp_sb_data.p_func.vf_valid = false;
  3357. bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
  3358. bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
  3359. CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
  3360. CSTORM_SP_STATUS_BLOCK_SIZE);
  3361. bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
  3362. CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
  3363. CSTORM_SP_SYNC_BLOCK_SIZE);
  3364. }
  3365. static inline
  3366. void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
  3367. int igu_sb_id, int igu_seg_id)
  3368. {
  3369. hc_sm->igu_sb_id = igu_sb_id;
  3370. hc_sm->igu_seg_id = igu_seg_id;
  3371. hc_sm->timer_value = 0xFF;
  3372. hc_sm->time_to_expire = 0xFFFFFFFF;
  3373. }
  3374. static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
  3375. u8 vf_valid, int fw_sb_id, int igu_sb_id)
  3376. {
  3377. int igu_seg_id;
  3378. struct hc_status_block_data_e2 sb_data_e2;
  3379. struct hc_status_block_data_e1x sb_data_e1x;
  3380. struct hc_status_block_sm *hc_sm_p;
  3381. struct hc_index_data *hc_index_p;
  3382. int data_size;
  3383. u32 *sb_data_p;
  3384. if (CHIP_INT_MODE_IS_BC(bp))
  3385. igu_seg_id = HC_SEG_ACCESS_NORM;
  3386. else
  3387. igu_seg_id = IGU_SEG_ACCESS_NORM;
  3388. bnx2x_zero_fp_sb(bp, fw_sb_id);
  3389. if (CHIP_IS_E2(bp)) {
  3390. memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
  3391. sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
  3392. sb_data_e2.common.p_func.vf_id = vfid;
  3393. sb_data_e2.common.p_func.vf_valid = vf_valid;
  3394. sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
  3395. sb_data_e2.common.same_igu_sb_1b = true;
  3396. sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
  3397. sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
  3398. hc_sm_p = sb_data_e2.common.state_machine;
  3399. hc_index_p = sb_data_e2.index_data;
  3400. sb_data_p = (u32 *)&sb_data_e2;
  3401. data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
  3402. } else {
  3403. memset(&sb_data_e1x, 0,
  3404. sizeof(struct hc_status_block_data_e1x));
  3405. sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
  3406. sb_data_e1x.common.p_func.vf_id = 0xff;
  3407. sb_data_e1x.common.p_func.vf_valid = false;
  3408. sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
  3409. sb_data_e1x.common.same_igu_sb_1b = true;
  3410. sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
  3411. sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
  3412. hc_sm_p = sb_data_e1x.common.state_machine;
  3413. hc_index_p = sb_data_e1x.index_data;
  3414. sb_data_p = (u32 *)&sb_data_e1x;
  3415. data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
  3416. }
  3417. bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
  3418. igu_sb_id, igu_seg_id);
  3419. bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
  3420. igu_sb_id, igu_seg_id);
  3421. DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
  3422. /* write indecies to HW */
  3423. bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
  3424. }
  3425. static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
  3426. u8 sb_index, u8 disable, u16 usec)
  3427. {
  3428. int port = BP_PORT(bp);
  3429. u8 ticks = usec / BNX2X_BTR;
  3430. storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
  3431. disable = disable ? 1 : (usec ? 0 : 1);
  3432. storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
  3433. }
  3434. static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
  3435. u16 tx_usec, u16 rx_usec)
  3436. {
  3437. bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
  3438. false, rx_usec);
  3439. bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
  3440. false, tx_usec);
  3441. }
  3442. static void bnx2x_init_def_sb(struct bnx2x *bp)
  3443. {
  3444. struct host_sp_status_block *def_sb = bp->def_status_blk;
  3445. dma_addr_t mapping = bp->def_status_blk_mapping;
  3446. int igu_sp_sb_index;
  3447. int igu_seg_id;
  3448. int port = BP_PORT(bp);
  3449. int func = BP_FUNC(bp);
  3450. int reg_offset;
  3451. u64 section;
  3452. int index;
  3453. struct hc_sp_status_block_data sp_sb_data;
  3454. memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
  3455. if (CHIP_INT_MODE_IS_BC(bp)) {
  3456. igu_sp_sb_index = DEF_SB_IGU_ID;
  3457. igu_seg_id = HC_SEG_ACCESS_DEF;
  3458. } else {
  3459. igu_sp_sb_index = bp->igu_dsb_id;
  3460. igu_seg_id = IGU_SEG_ACCESS_DEF;
  3461. }
  3462. /* ATTN */
  3463. section = ((u64)mapping) + offsetof(struct host_sp_status_block,
  3464. atten_status_block);
  3465. def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
  3466. bp->attn_state = 0;
  3467. reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
  3468. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
  3469. for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
  3470. int sindex;
  3471. /* take care of sig[0]..sig[4] */
  3472. for (sindex = 0; sindex < 4; sindex++)
  3473. bp->attn_group[index].sig[sindex] =
  3474. REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
  3475. if (CHIP_IS_E2(bp))
  3476. /*
  3477. * enable5 is separate from the rest of the registers,
  3478. * and therefore the address skip is 4
  3479. * and not 16 between the different groups
  3480. */
  3481. bp->attn_group[index].sig[4] = REG_RD(bp,
  3482. reg_offset + 0x10 + 0x4*index);
  3483. else
  3484. bp->attn_group[index].sig[4] = 0;
  3485. }
  3486. if (bp->common.int_block == INT_BLOCK_HC) {
  3487. reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
  3488. HC_REG_ATTN_MSG0_ADDR_L);
  3489. REG_WR(bp, reg_offset, U64_LO(section));
  3490. REG_WR(bp, reg_offset + 4, U64_HI(section));
  3491. } else if (CHIP_IS_E2(bp)) {
  3492. REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
  3493. REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
  3494. }
  3495. section = ((u64)mapping) + offsetof(struct host_sp_status_block,
  3496. sp_sb);
  3497. bnx2x_zero_sp_sb(bp);
  3498. sp_sb_data.host_sb_addr.lo = U64_LO(section);
  3499. sp_sb_data.host_sb_addr.hi = U64_HI(section);
  3500. sp_sb_data.igu_sb_id = igu_sp_sb_index;
  3501. sp_sb_data.igu_seg_id = igu_seg_id;
  3502. sp_sb_data.p_func.pf_id = func;
  3503. sp_sb_data.p_func.vnic_id = BP_VN(bp);
  3504. sp_sb_data.p_func.vf_id = 0xff;
  3505. bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
  3506. bp->stats_pending = 0;
  3507. bp->set_mac_pending = 0;
  3508. bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
  3509. }
  3510. void bnx2x_update_coalesce(struct bnx2x *bp)
  3511. {
  3512. int i;
  3513. for_each_eth_queue(bp, i)
  3514. bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
  3515. bp->tx_ticks, bp->rx_ticks);
  3516. }
  3517. static void bnx2x_init_sp_ring(struct bnx2x *bp)
  3518. {
  3519. spin_lock_init(&bp->spq_lock);
  3520. atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
  3521. bp->spq_prod_idx = 0;
  3522. bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
  3523. bp->spq_prod_bd = bp->spq;
  3524. bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
  3525. }
  3526. static void bnx2x_init_eq_ring(struct bnx2x *bp)
  3527. {
  3528. int i;
  3529. for (i = 1; i <= NUM_EQ_PAGES; i++) {
  3530. union event_ring_elem *elem =
  3531. &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
  3532. elem->next_page.addr.hi =
  3533. cpu_to_le32(U64_HI(bp->eq_mapping +
  3534. BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
  3535. elem->next_page.addr.lo =
  3536. cpu_to_le32(U64_LO(bp->eq_mapping +
  3537. BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
  3538. }
  3539. bp->eq_cons = 0;
  3540. bp->eq_prod = NUM_EQ_DESC;
  3541. bp->eq_cons_sb = BNX2X_EQ_INDEX;
  3542. /* we want a warning message before it gets rought... */
  3543. atomic_set(&bp->eq_spq_left,
  3544. min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
  3545. }
  3546. void bnx2x_push_indir_table(struct bnx2x *bp)
  3547. {
  3548. int func = BP_FUNC(bp);
  3549. int i;
  3550. if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
  3551. return;
  3552. for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
  3553. REG_WR8(bp, BAR_TSTRORM_INTMEM +
  3554. TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
  3555. bp->fp->cl_id + bp->rx_indir_table[i]);
  3556. }
  3557. static void bnx2x_init_ind_table(struct bnx2x *bp)
  3558. {
  3559. int i;
  3560. for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
  3561. bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
  3562. bnx2x_push_indir_table(bp);
  3563. }
  3564. void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
  3565. {
  3566. int mode = bp->rx_mode;
  3567. int port = BP_PORT(bp);
  3568. u16 cl_id;
  3569. u32 def_q_filters = 0;
  3570. /* All but management unicast packets should pass to the host as well */
  3571. u32 llh_mask =
  3572. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
  3573. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
  3574. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
  3575. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
  3576. switch (mode) {
  3577. case BNX2X_RX_MODE_NONE: /* no Rx */
  3578. def_q_filters = BNX2X_ACCEPT_NONE;
  3579. #ifdef BCM_CNIC
  3580. if (!NO_FCOE(bp)) {
  3581. cl_id = bnx2x_fcoe(bp, cl_id);
  3582. bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
  3583. }
  3584. #endif
  3585. break;
  3586. case BNX2X_RX_MODE_NORMAL:
  3587. def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
  3588. BNX2X_ACCEPT_MULTICAST;
  3589. #ifdef BCM_CNIC
  3590. if (!NO_FCOE(bp)) {
  3591. cl_id = bnx2x_fcoe(bp, cl_id);
  3592. bnx2x_rxq_set_mac_filters(bp, cl_id,
  3593. BNX2X_ACCEPT_UNICAST |
  3594. BNX2X_ACCEPT_MULTICAST);
  3595. }
  3596. #endif
  3597. break;
  3598. case BNX2X_RX_MODE_ALLMULTI:
  3599. def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
  3600. BNX2X_ACCEPT_ALL_MULTICAST;
  3601. #ifdef BCM_CNIC
  3602. /*
  3603. * Prevent duplication of multicast packets by configuring FCoE
  3604. * L2 Client to receive only matched unicast frames.
  3605. */
  3606. if (!NO_FCOE(bp)) {
  3607. cl_id = bnx2x_fcoe(bp, cl_id);
  3608. bnx2x_rxq_set_mac_filters(bp, cl_id,
  3609. BNX2X_ACCEPT_UNICAST);
  3610. }
  3611. #endif
  3612. break;
  3613. case BNX2X_RX_MODE_PROMISC:
  3614. def_q_filters |= BNX2X_PROMISCUOUS_MODE;
  3615. #ifdef BCM_CNIC
  3616. /*
  3617. * Prevent packets duplication by configuring DROP_ALL for FCoE
  3618. * L2 Client.
  3619. */
  3620. if (!NO_FCOE(bp)) {
  3621. cl_id = bnx2x_fcoe(bp, cl_id);
  3622. bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
  3623. }
  3624. #endif
  3625. /* pass management unicast packets as well */
  3626. llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
  3627. break;
  3628. default:
  3629. BNX2X_ERR("BAD rx mode (%d)\n", mode);
  3630. break;
  3631. }
  3632. cl_id = BP_L_ID(bp);
  3633. bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
  3634. REG_WR(bp,
  3635. (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
  3636. NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
  3637. DP(NETIF_MSG_IFUP, "rx mode %d\n"
  3638. "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
  3639. "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
  3640. "unmatched_ucast 0x%x\n", mode,
  3641. bp->mac_filters.ucast_drop_all,
  3642. bp->mac_filters.mcast_drop_all,
  3643. bp->mac_filters.bcast_drop_all,
  3644. bp->mac_filters.ucast_accept_all,
  3645. bp->mac_filters.mcast_accept_all,
  3646. bp->mac_filters.bcast_accept_all,
  3647. bp->mac_filters.unmatched_unicast
  3648. );
  3649. storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
  3650. }
  3651. static void bnx2x_init_internal_common(struct bnx2x *bp)
  3652. {
  3653. int i;
  3654. if (!CHIP_IS_E1(bp)) {
  3655. /* xstorm needs to know whether to add ovlan to packets or not,
  3656. * in switch-independent we'll write 0 to here... */
  3657. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
  3658. bp->mf_mode);
  3659. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
  3660. bp->mf_mode);
  3661. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
  3662. bp->mf_mode);
  3663. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
  3664. bp->mf_mode);
  3665. }
  3666. if (IS_MF_SI(bp))
  3667. /*
  3668. * In switch independent mode, the TSTORM needs to accept
  3669. * packets that failed classification, since approximate match
  3670. * mac addresses aren't written to NIG LLH
  3671. */
  3672. REG_WR8(bp, BAR_TSTRORM_INTMEM +
  3673. TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
  3674. /* Zero this manually as its initialization is
  3675. currently missing in the initTool */
  3676. for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
  3677. REG_WR(bp, BAR_USTRORM_INTMEM +
  3678. USTORM_AGG_DATA_OFFSET + i * 4, 0);
  3679. if (CHIP_IS_E2(bp)) {
  3680. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
  3681. CHIP_INT_MODE_IS_BC(bp) ?
  3682. HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
  3683. }
  3684. }
  3685. static void bnx2x_init_internal_port(struct bnx2x *bp)
  3686. {
  3687. /* port */
  3688. bnx2x_dcb_init_intmem_pfc(bp);
  3689. }
  3690. static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
  3691. {
  3692. switch (load_code) {
  3693. case FW_MSG_CODE_DRV_LOAD_COMMON:
  3694. case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
  3695. bnx2x_init_internal_common(bp);
  3696. /* no break */
  3697. case FW_MSG_CODE_DRV_LOAD_PORT:
  3698. bnx2x_init_internal_port(bp);
  3699. /* no break */
  3700. case FW_MSG_CODE_DRV_LOAD_FUNCTION:
  3701. /* internal memory per function is
  3702. initialized inside bnx2x_pf_init */
  3703. break;
  3704. default:
  3705. BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
  3706. break;
  3707. }
  3708. }
  3709. static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
  3710. {
  3711. struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
  3712. fp->state = BNX2X_FP_STATE_CLOSED;
  3713. fp->index = fp->cid = fp_idx;
  3714. fp->cl_id = BP_L_ID(bp) + fp_idx;
  3715. fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
  3716. fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
  3717. /* qZone id equals to FW (per path) client id */
  3718. fp->cl_qzone_id = fp->cl_id +
  3719. BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
  3720. ETH_MAX_RX_CLIENTS_E1H);
  3721. /* init shortcut */
  3722. fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
  3723. USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
  3724. USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
  3725. /* Setup SB indicies */
  3726. fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
  3727. fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
  3728. DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
  3729. "cl_id %d fw_sb %d igu_sb %d\n",
  3730. fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
  3731. fp->igu_sb_id);
  3732. bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
  3733. fp->fw_sb_id, fp->igu_sb_id);
  3734. bnx2x_update_fpsb_idx(fp);
  3735. }
  3736. void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
  3737. {
  3738. int i;
  3739. for_each_eth_queue(bp, i)
  3740. bnx2x_init_fp_sb(bp, i);
  3741. #ifdef BCM_CNIC
  3742. if (!NO_FCOE(bp))
  3743. bnx2x_init_fcoe_fp(bp);
  3744. bnx2x_init_sb(bp, bp->cnic_sb_mapping,
  3745. BNX2X_VF_ID_INVALID, false,
  3746. CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
  3747. #endif
  3748. /* ensure status block indices were read */
  3749. rmb();
  3750. bnx2x_init_def_sb(bp);
  3751. bnx2x_update_dsb_idx(bp);
  3752. bnx2x_init_rx_rings(bp);
  3753. bnx2x_init_tx_rings(bp);
  3754. bnx2x_init_sp_ring(bp);
  3755. bnx2x_init_eq_ring(bp);
  3756. bnx2x_init_internal(bp, load_code);
  3757. bnx2x_pf_init(bp);
  3758. bnx2x_init_ind_table(bp);
  3759. bnx2x_stats_init(bp);
  3760. /* At this point, we are ready for interrupts */
  3761. atomic_set(&bp->intr_sem, 0);
  3762. /* flush all before enabling interrupts */
  3763. mb();
  3764. mmiowb();
  3765. bnx2x_int_enable(bp);
  3766. /* Check for SPIO5 */
  3767. bnx2x_attn_int_deasserted0(bp,
  3768. REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
  3769. AEU_INPUTS_ATTN_BITS_SPIO5);
  3770. }
  3771. /* end of nic init */
  3772. /*
  3773. * gzip service functions
  3774. */
  3775. static int bnx2x_gunzip_init(struct bnx2x *bp)
  3776. {
  3777. bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
  3778. &bp->gunzip_mapping, GFP_KERNEL);
  3779. if (bp->gunzip_buf == NULL)
  3780. goto gunzip_nomem1;
  3781. bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
  3782. if (bp->strm == NULL)
  3783. goto gunzip_nomem2;
  3784. bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
  3785. GFP_KERNEL);
  3786. if (bp->strm->workspace == NULL)
  3787. goto gunzip_nomem3;
  3788. return 0;
  3789. gunzip_nomem3:
  3790. kfree(bp->strm);
  3791. bp->strm = NULL;
  3792. gunzip_nomem2:
  3793. dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
  3794. bp->gunzip_mapping);
  3795. bp->gunzip_buf = NULL;
  3796. gunzip_nomem1:
  3797. netdev_err(bp->dev, "Cannot allocate firmware buffer for"
  3798. " un-compression\n");
  3799. return -ENOMEM;
  3800. }
  3801. static void bnx2x_gunzip_end(struct bnx2x *bp)
  3802. {
  3803. kfree(bp->strm->workspace);
  3804. kfree(bp->strm);
  3805. bp->strm = NULL;
  3806. if (bp->gunzip_buf) {
  3807. dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
  3808. bp->gunzip_mapping);
  3809. bp->gunzip_buf = NULL;
  3810. }
  3811. }
  3812. static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
  3813. {
  3814. int n, rc;
  3815. /* check gzip header */
  3816. if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
  3817. BNX2X_ERR("Bad gzip header\n");
  3818. return -EINVAL;
  3819. }
  3820. n = 10;
  3821. #define FNAME 0x8
  3822. if (zbuf[3] & FNAME)
  3823. while ((zbuf[n++] != 0) && (n < len));
  3824. bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
  3825. bp->strm->avail_in = len - n;
  3826. bp->strm->next_out = bp->gunzip_buf;
  3827. bp->strm->avail_out = FW_BUF_SIZE;
  3828. rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
  3829. if (rc != Z_OK)
  3830. return rc;
  3831. rc = zlib_inflate(bp->strm, Z_FINISH);
  3832. if ((rc != Z_OK) && (rc != Z_STREAM_END))
  3833. netdev_err(bp->dev, "Firmware decompression error: %s\n",
  3834. bp->strm->msg);
  3835. bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
  3836. if (bp->gunzip_outlen & 0x3)
  3837. netdev_err(bp->dev, "Firmware decompression error:"
  3838. " gunzip_outlen (%d) not aligned\n",
  3839. bp->gunzip_outlen);
  3840. bp->gunzip_outlen >>= 2;
  3841. zlib_inflateEnd(bp->strm);
  3842. if (rc == Z_STREAM_END)
  3843. return 0;
  3844. return rc;
  3845. }
  3846. /* nic load/unload */
  3847. /*
  3848. * General service functions
  3849. */
  3850. /* send a NIG loopback debug packet */
  3851. static void bnx2x_lb_pckt(struct bnx2x *bp)
  3852. {
  3853. u32 wb_write[3];
  3854. /* Ethernet source and destination addresses */
  3855. wb_write[0] = 0x55555555;
  3856. wb_write[1] = 0x55555555;
  3857. wb_write[2] = 0x20; /* SOP */
  3858. REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
  3859. /* NON-IP protocol */
  3860. wb_write[0] = 0x09000000;
  3861. wb_write[1] = 0x55555555;
  3862. wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
  3863. REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
  3864. }
  3865. /* some of the internal memories
  3866. * are not directly readable from the driver
  3867. * to test them we send debug packets
  3868. */
  3869. static int bnx2x_int_mem_test(struct bnx2x *bp)
  3870. {
  3871. int factor;
  3872. int count, i;
  3873. u32 val = 0;
  3874. if (CHIP_REV_IS_FPGA(bp))
  3875. factor = 120;
  3876. else if (CHIP_REV_IS_EMUL(bp))
  3877. factor = 200;
  3878. else
  3879. factor = 1;
  3880. /* Disable inputs of parser neighbor blocks */
  3881. REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
  3882. REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
  3883. REG_WR(bp, CFC_REG_DEBUG0, 0x1);
  3884. REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
  3885. /* Write 0 to parser credits for CFC search request */
  3886. REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
  3887. /* send Ethernet packet */
  3888. bnx2x_lb_pckt(bp);
  3889. /* TODO do i reset NIG statistic? */
  3890. /* Wait until NIG register shows 1 packet of size 0x10 */
  3891. count = 1000 * factor;
  3892. while (count) {
  3893. bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
  3894. val = *bnx2x_sp(bp, wb_data[0]);
  3895. if (val == 0x10)
  3896. break;
  3897. msleep(10);
  3898. count--;
  3899. }
  3900. if (val != 0x10) {
  3901. BNX2X_ERR("NIG timeout val = 0x%x\n", val);
  3902. return -1;
  3903. }
  3904. /* Wait until PRS register shows 1 packet */
  3905. count = 1000 * factor;
  3906. while (count) {
  3907. val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
  3908. if (val == 1)
  3909. break;
  3910. msleep(10);
  3911. count--;
  3912. }
  3913. if (val != 0x1) {
  3914. BNX2X_ERR("PRS timeout val = 0x%x\n", val);
  3915. return -2;
  3916. }
  3917. /* Reset and init BRB, PRS */
  3918. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
  3919. msleep(50);
  3920. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
  3921. msleep(50);
  3922. bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
  3923. bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
  3924. DP(NETIF_MSG_HW, "part2\n");
  3925. /* Disable inputs of parser neighbor blocks */
  3926. REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
  3927. REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
  3928. REG_WR(bp, CFC_REG_DEBUG0, 0x1);
  3929. REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
  3930. /* Write 0 to parser credits for CFC search request */
  3931. REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
  3932. /* send 10 Ethernet packets */
  3933. for (i = 0; i < 10; i++)
  3934. bnx2x_lb_pckt(bp);
  3935. /* Wait until NIG register shows 10 + 1
  3936. packets of size 11*0x10 = 0xb0 */
  3937. count = 1000 * factor;
  3938. while (count) {
  3939. bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
  3940. val = *bnx2x_sp(bp, wb_data[0]);
  3941. if (val == 0xb0)
  3942. break;
  3943. msleep(10);
  3944. count--;
  3945. }
  3946. if (val != 0xb0) {
  3947. BNX2X_ERR("NIG timeout val = 0x%x\n", val);
  3948. return -3;
  3949. }
  3950. /* Wait until PRS register shows 2 packets */
  3951. val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
  3952. if (val != 2)
  3953. BNX2X_ERR("PRS timeout val = 0x%x\n", val);
  3954. /* Write 1 to parser credits for CFC search request */
  3955. REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
  3956. /* Wait until PRS register shows 3 packets */
  3957. msleep(10 * factor);
  3958. /* Wait until NIG register shows 1 packet of size 0x10 */
  3959. val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
  3960. if (val != 3)
  3961. BNX2X_ERR("PRS timeout val = 0x%x\n", val);
  3962. /* clear NIG EOP FIFO */
  3963. for (i = 0; i < 11; i++)
  3964. REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
  3965. val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
  3966. if (val != 1) {
  3967. BNX2X_ERR("clear of NIG failed\n");
  3968. return -4;
  3969. }
  3970. /* Reset and init BRB, PRS, NIG */
  3971. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
  3972. msleep(50);
  3973. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
  3974. msleep(50);
  3975. bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
  3976. bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
  3977. #ifndef BCM_CNIC
  3978. /* set NIC mode */
  3979. REG_WR(bp, PRS_REG_NIC_MODE, 1);
  3980. #endif
  3981. /* Enable inputs of parser neighbor blocks */
  3982. REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
  3983. REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
  3984. REG_WR(bp, CFC_REG_DEBUG0, 0x0);
  3985. REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
  3986. DP(NETIF_MSG_HW, "done\n");
  3987. return 0; /* OK */
  3988. }
  3989. static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
  3990. {
  3991. REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
  3992. if (CHIP_IS_E2(bp))
  3993. REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
  3994. else
  3995. REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
  3996. REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
  3997. REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
  3998. /*
  3999. * mask read length error interrupts in brb for parser
  4000. * (parsing unit and 'checksum and crc' unit)
  4001. * these errors are legal (PU reads fixed length and CAC can cause
  4002. * read length error on truncated packets)
  4003. */
  4004. REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
  4005. REG_WR(bp, QM_REG_QM_INT_MASK, 0);
  4006. REG_WR(bp, TM_REG_TM_INT_MASK, 0);
  4007. REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
  4008. REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
  4009. REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
  4010. /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
  4011. /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
  4012. REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
  4013. REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
  4014. REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
  4015. /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
  4016. /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
  4017. REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
  4018. REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
  4019. REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
  4020. REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
  4021. /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
  4022. /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
  4023. if (CHIP_REV_IS_FPGA(bp))
  4024. REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
  4025. else if (CHIP_IS_E2(bp))
  4026. REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
  4027. (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
  4028. | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
  4029. | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
  4030. | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
  4031. | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
  4032. else
  4033. REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
  4034. REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
  4035. REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
  4036. REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
  4037. /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
  4038. /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
  4039. REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
  4040. REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
  4041. /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
  4042. REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
  4043. }
  4044. static void bnx2x_reset_common(struct bnx2x *bp)
  4045. {
  4046. /* reset_common */
  4047. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
  4048. 0xd3ffff7f);
  4049. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
  4050. }
  4051. static void bnx2x_init_pxp(struct bnx2x *bp)
  4052. {
  4053. u16 devctl;
  4054. int r_order, w_order;
  4055. pci_read_config_word(bp->pdev,
  4056. bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
  4057. DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
  4058. w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
  4059. if (bp->mrrs == -1)
  4060. r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
  4061. else {
  4062. DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
  4063. r_order = bp->mrrs;
  4064. }
  4065. bnx2x_init_pxp_arb(bp, r_order, w_order);
  4066. }
  4067. static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
  4068. {
  4069. int is_required;
  4070. u32 val;
  4071. int port;
  4072. if (BP_NOMCP(bp))
  4073. return;
  4074. is_required = 0;
  4075. val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
  4076. SHARED_HW_CFG_FAN_FAILURE_MASK;
  4077. if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
  4078. is_required = 1;
  4079. /*
  4080. * The fan failure mechanism is usually related to the PHY type since
  4081. * the power consumption of the board is affected by the PHY. Currently,
  4082. * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
  4083. */
  4084. else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
  4085. for (port = PORT_0; port < PORT_MAX; port++) {
  4086. is_required |=
  4087. bnx2x_fan_failure_det_req(
  4088. bp,
  4089. bp->common.shmem_base,
  4090. bp->common.shmem2_base,
  4091. port);
  4092. }
  4093. DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
  4094. if (is_required == 0)
  4095. return;
  4096. /* Fan failure is indicated by SPIO 5 */
  4097. bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
  4098. MISC_REGISTERS_SPIO_INPUT_HI_Z);
  4099. /* set to active low mode */
  4100. val = REG_RD(bp, MISC_REG_SPIO_INT);
  4101. val |= ((1 << MISC_REGISTERS_SPIO_5) <<
  4102. MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
  4103. REG_WR(bp, MISC_REG_SPIO_INT, val);
  4104. /* enable interrupt to signal the IGU */
  4105. val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
  4106. val |= (1 << MISC_REGISTERS_SPIO_5);
  4107. REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
  4108. }
  4109. static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
  4110. {
  4111. u32 offset = 0;
  4112. if (CHIP_IS_E1(bp))
  4113. return;
  4114. if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
  4115. return;
  4116. switch (BP_ABS_FUNC(bp)) {
  4117. case 0:
  4118. offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
  4119. break;
  4120. case 1:
  4121. offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
  4122. break;
  4123. case 2:
  4124. offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
  4125. break;
  4126. case 3:
  4127. offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
  4128. break;
  4129. case 4:
  4130. offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
  4131. break;
  4132. case 5:
  4133. offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
  4134. break;
  4135. case 6:
  4136. offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
  4137. break;
  4138. case 7:
  4139. offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
  4140. break;
  4141. default:
  4142. return;
  4143. }
  4144. REG_WR(bp, offset, pretend_func_num);
  4145. REG_RD(bp, offset);
  4146. DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
  4147. }
  4148. static void bnx2x_pf_disable(struct bnx2x *bp)
  4149. {
  4150. u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
  4151. val &= ~IGU_PF_CONF_FUNC_EN;
  4152. REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
  4153. REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
  4154. REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
  4155. }
  4156. static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
  4157. {
  4158. u32 val, i;
  4159. DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
  4160. bnx2x_reset_common(bp);
  4161. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
  4162. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
  4163. bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
  4164. if (!CHIP_IS_E1(bp))
  4165. REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
  4166. if (CHIP_IS_E2(bp)) {
  4167. u8 fid;
  4168. /**
  4169. * 4-port mode or 2-port mode we need to turn of master-enable
  4170. * for everyone, after that, turn it back on for self.
  4171. * so, we disregard multi-function or not, and always disable
  4172. * for all functions on the given path, this means 0,2,4,6 for
  4173. * path 0 and 1,3,5,7 for path 1
  4174. */
  4175. for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
  4176. if (fid == BP_ABS_FUNC(bp)) {
  4177. REG_WR(bp,
  4178. PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
  4179. 1);
  4180. continue;
  4181. }
  4182. bnx2x_pretend_func(bp, fid);
  4183. /* clear pf enable */
  4184. bnx2x_pf_disable(bp);
  4185. bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
  4186. }
  4187. }
  4188. bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
  4189. if (CHIP_IS_E1(bp)) {
  4190. /* enable HW interrupt from PXP on USDM overflow
  4191. bit 16 on INT_MASK_0 */
  4192. REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
  4193. }
  4194. bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
  4195. bnx2x_init_pxp(bp);
  4196. #ifdef __BIG_ENDIAN
  4197. REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
  4198. REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
  4199. REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
  4200. REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
  4201. REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
  4202. /* make sure this value is 0 */
  4203. REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
  4204. /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
  4205. REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
  4206. REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
  4207. REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
  4208. REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
  4209. #endif
  4210. bnx2x_ilt_init_page_size(bp, INITOP_SET);
  4211. if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
  4212. REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
  4213. /* let the HW do it's magic ... */
  4214. msleep(100);
  4215. /* finish PXP init */
  4216. val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
  4217. if (val != 1) {
  4218. BNX2X_ERR("PXP2 CFG failed\n");
  4219. return -EBUSY;
  4220. }
  4221. val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
  4222. if (val != 1) {
  4223. BNX2X_ERR("PXP2 RD_INIT failed\n");
  4224. return -EBUSY;
  4225. }
  4226. /* Timers bug workaround E2 only. We need to set the entire ILT to
  4227. * have entries with value "0" and valid bit on.
  4228. * This needs to be done by the first PF that is loaded in a path
  4229. * (i.e. common phase)
  4230. */
  4231. if (CHIP_IS_E2(bp)) {
  4232. struct ilt_client_info ilt_cli;
  4233. struct bnx2x_ilt ilt;
  4234. memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
  4235. memset(&ilt, 0, sizeof(struct bnx2x_ilt));
  4236. /* initialize dummy TM client */
  4237. ilt_cli.start = 0;
  4238. ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
  4239. ilt_cli.client_num = ILT_CLIENT_TM;
  4240. /* Step 1: set zeroes to all ilt page entries with valid bit on
  4241. * Step 2: set the timers first/last ilt entry to point
  4242. * to the entire range to prevent ILT range error for 3rd/4th
  4243. * vnic (this code assumes existance of the vnic)
  4244. *
  4245. * both steps performed by call to bnx2x_ilt_client_init_op()
  4246. * with dummy TM client
  4247. *
  4248. * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
  4249. * and his brother are split registers
  4250. */
  4251. bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
  4252. bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
  4253. bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
  4254. REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
  4255. REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
  4256. REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
  4257. }
  4258. REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
  4259. REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
  4260. if (CHIP_IS_E2(bp)) {
  4261. int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
  4262. (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
  4263. bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
  4264. bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
  4265. /* let the HW do it's magic ... */
  4266. do {
  4267. msleep(200);
  4268. val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
  4269. } while (factor-- && (val != 1));
  4270. if (val != 1) {
  4271. BNX2X_ERR("ATC_INIT failed\n");
  4272. return -EBUSY;
  4273. }
  4274. }
  4275. bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
  4276. /* clean the DMAE memory */
  4277. bp->dmae_ready = 1;
  4278. bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
  4279. bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
  4280. bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
  4281. bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
  4282. bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
  4283. bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
  4284. bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
  4285. bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
  4286. bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
  4287. bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
  4288. if (CHIP_MODE_IS_4_PORT(bp))
  4289. bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
  4290. /* QM queues pointers table */
  4291. bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
  4292. /* soft reset pulse */
  4293. REG_WR(bp, QM_REG_SOFT_RESET, 1);
  4294. REG_WR(bp, QM_REG_SOFT_RESET, 0);
  4295. #ifdef BCM_CNIC
  4296. bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
  4297. #endif
  4298. bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
  4299. REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
  4300. if (!CHIP_REV_IS_SLOW(bp)) {
  4301. /* enable hw interrupt from doorbell Q */
  4302. REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
  4303. }
  4304. bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
  4305. if (CHIP_MODE_IS_4_PORT(bp)) {
  4306. REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
  4307. REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
  4308. }
  4309. bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
  4310. REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
  4311. #ifndef BCM_CNIC
  4312. /* set NIC mode */
  4313. REG_WR(bp, PRS_REG_NIC_MODE, 1);
  4314. #endif
  4315. if (!CHIP_IS_E1(bp))
  4316. REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
  4317. if (CHIP_IS_E2(bp)) {
  4318. /* Bit-map indicating which L2 hdrs may appear after the
  4319. basic Ethernet header */
  4320. int has_ovlan = IS_MF_SD(bp);
  4321. REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
  4322. REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
  4323. }
  4324. bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
  4325. bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
  4326. bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
  4327. bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
  4328. bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
  4329. bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
  4330. bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
  4331. bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
  4332. bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
  4333. bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
  4334. bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
  4335. bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
  4336. if (CHIP_MODE_IS_4_PORT(bp))
  4337. bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
  4338. /* sync semi rtc */
  4339. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
  4340. 0x80000000);
  4341. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
  4342. 0x80000000);
  4343. bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
  4344. bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
  4345. bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
  4346. if (CHIP_IS_E2(bp)) {
  4347. int has_ovlan = IS_MF_SD(bp);
  4348. REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
  4349. REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
  4350. }
  4351. REG_WR(bp, SRC_REG_SOFT_RST, 1);
  4352. for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
  4353. REG_WR(bp, i, random32());
  4354. bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
  4355. #ifdef BCM_CNIC
  4356. REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
  4357. REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
  4358. REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
  4359. REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
  4360. REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
  4361. REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
  4362. REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
  4363. REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
  4364. REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
  4365. REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
  4366. #endif
  4367. REG_WR(bp, SRC_REG_SOFT_RST, 0);
  4368. if (sizeof(union cdu_context) != 1024)
  4369. /* we currently assume that a context is 1024 bytes */
  4370. dev_alert(&bp->pdev->dev, "please adjust the size "
  4371. "of cdu_context(%ld)\n",
  4372. (long)sizeof(union cdu_context));
  4373. bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
  4374. val = (4 << 24) + (0 << 12) + 1024;
  4375. REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
  4376. bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
  4377. REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
  4378. /* enable context validation interrupt from CFC */
  4379. REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
  4380. /* set the thresholds to prevent CFC/CDU race */
  4381. REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
  4382. bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
  4383. if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
  4384. REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
  4385. bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
  4386. bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
  4387. bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
  4388. /* Reset PCIE errors for debug */
  4389. REG_WR(bp, 0x2814, 0xffffffff);
  4390. REG_WR(bp, 0x3820, 0xffffffff);
  4391. if (CHIP_IS_E2(bp)) {
  4392. REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
  4393. (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
  4394. PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
  4395. REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
  4396. (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
  4397. PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
  4398. PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
  4399. REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
  4400. (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
  4401. PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
  4402. PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
  4403. }
  4404. bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
  4405. bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
  4406. bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
  4407. bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
  4408. bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
  4409. if (!CHIP_IS_E1(bp)) {
  4410. REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
  4411. REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
  4412. }
  4413. if (CHIP_IS_E2(bp)) {
  4414. /* Bit-map indicating which L2 hdrs may appear after the
  4415. basic Ethernet header */
  4416. REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
  4417. }
  4418. if (CHIP_REV_IS_SLOW(bp))
  4419. msleep(200);
  4420. /* finish CFC init */
  4421. val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
  4422. if (val != 1) {
  4423. BNX2X_ERR("CFC LL_INIT failed\n");
  4424. return -EBUSY;
  4425. }
  4426. val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
  4427. if (val != 1) {
  4428. BNX2X_ERR("CFC AC_INIT failed\n");
  4429. return -EBUSY;
  4430. }
  4431. val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
  4432. if (val != 1) {
  4433. BNX2X_ERR("CFC CAM_INIT failed\n");
  4434. return -EBUSY;
  4435. }
  4436. REG_WR(bp, CFC_REG_DEBUG0, 0);
  4437. if (CHIP_IS_E1(bp)) {
  4438. /* read NIG statistic
  4439. to see if this is our first up since powerup */
  4440. bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
  4441. val = *bnx2x_sp(bp, wb_data[0]);
  4442. /* do internal memory self test */
  4443. if ((val == 0) && bnx2x_int_mem_test(bp)) {
  4444. BNX2X_ERR("internal mem self test failed\n");
  4445. return -EBUSY;
  4446. }
  4447. }
  4448. bnx2x_setup_fan_failure_detection(bp);
  4449. /* clear PXP2 attentions */
  4450. REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
  4451. bnx2x_enable_blocks_attention(bp);
  4452. if (CHIP_PARITY_ENABLED(bp))
  4453. bnx2x_enable_blocks_parity(bp);
  4454. if (!BP_NOMCP(bp)) {
  4455. /* In E2 2-PORT mode, same ext phy is used for the two paths */
  4456. if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
  4457. CHIP_IS_E1x(bp)) {
  4458. u32 shmem_base[2], shmem2_base[2];
  4459. shmem_base[0] = bp->common.shmem_base;
  4460. shmem2_base[0] = bp->common.shmem2_base;
  4461. if (CHIP_IS_E2(bp)) {
  4462. shmem_base[1] =
  4463. SHMEM2_RD(bp, other_shmem_base_addr);
  4464. shmem2_base[1] =
  4465. SHMEM2_RD(bp, other_shmem2_base_addr);
  4466. }
  4467. bnx2x_acquire_phy_lock(bp);
  4468. bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
  4469. bp->common.chip_id);
  4470. bnx2x_release_phy_lock(bp);
  4471. }
  4472. } else
  4473. BNX2X_ERR("Bootcode is missing - can not initialize link\n");
  4474. return 0;
  4475. }
  4476. static int bnx2x_init_hw_port(struct bnx2x *bp)
  4477. {
  4478. int port = BP_PORT(bp);
  4479. int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
  4480. u32 low, high;
  4481. u32 val;
  4482. DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
  4483. REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
  4484. bnx2x_init_block(bp, PXP_BLOCK, init_stage);
  4485. bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
  4486. /* Timers bug workaround: disables the pf_master bit in pglue at
  4487. * common phase, we need to enable it here before any dmae access are
  4488. * attempted. Therefore we manually added the enable-master to the
  4489. * port phase (it also happens in the function phase)
  4490. */
  4491. if (CHIP_IS_E2(bp))
  4492. REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
  4493. bnx2x_init_block(bp, TCM_BLOCK, init_stage);
  4494. bnx2x_init_block(bp, UCM_BLOCK, init_stage);
  4495. bnx2x_init_block(bp, CCM_BLOCK, init_stage);
  4496. bnx2x_init_block(bp, XCM_BLOCK, init_stage);
  4497. /* QM cid (connection) count */
  4498. bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
  4499. #ifdef BCM_CNIC
  4500. bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
  4501. REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
  4502. REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
  4503. #endif
  4504. bnx2x_init_block(bp, DQ_BLOCK, init_stage);
  4505. if (CHIP_MODE_IS_4_PORT(bp))
  4506. bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
  4507. if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
  4508. bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
  4509. if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
  4510. /* no pause for emulation and FPGA */
  4511. low = 0;
  4512. high = 513;
  4513. } else {
  4514. if (IS_MF(bp))
  4515. low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
  4516. else if (bp->dev->mtu > 4096) {
  4517. if (bp->flags & ONE_PORT_FLAG)
  4518. low = 160;
  4519. else {
  4520. val = bp->dev->mtu;
  4521. /* (24*1024 + val*4)/256 */
  4522. low = 96 + (val/64) +
  4523. ((val % 64) ? 1 : 0);
  4524. }
  4525. } else
  4526. low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
  4527. high = low + 56; /* 14*1024/256 */
  4528. }
  4529. REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
  4530. REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
  4531. }
  4532. if (CHIP_MODE_IS_4_PORT(bp)) {
  4533. REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
  4534. REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
  4535. REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
  4536. BRB1_REG_MAC_GUARANTIED_0), 40);
  4537. }
  4538. bnx2x_init_block(bp, PRS_BLOCK, init_stage);
  4539. bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
  4540. bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
  4541. bnx2x_init_block(bp, USDM_BLOCK, init_stage);
  4542. bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
  4543. bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
  4544. bnx2x_init_block(bp, USEM_BLOCK, init_stage);
  4545. bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
  4546. bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
  4547. if (CHIP_MODE_IS_4_PORT(bp))
  4548. bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
  4549. bnx2x_init_block(bp, UPB_BLOCK, init_stage);
  4550. bnx2x_init_block(bp, XPB_BLOCK, init_stage);
  4551. bnx2x_init_block(bp, PBF_BLOCK, init_stage);
  4552. if (!CHIP_IS_E2(bp)) {
  4553. /* configure PBF to work without PAUSE mtu 9000 */
  4554. REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
  4555. /* update threshold */
  4556. REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
  4557. /* update init credit */
  4558. REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
  4559. /* probe changes */
  4560. REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
  4561. udelay(50);
  4562. REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
  4563. }
  4564. #ifdef BCM_CNIC
  4565. bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
  4566. #endif
  4567. bnx2x_init_block(bp, CDU_BLOCK, init_stage);
  4568. bnx2x_init_block(bp, CFC_BLOCK, init_stage);
  4569. if (CHIP_IS_E1(bp)) {
  4570. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
  4571. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
  4572. }
  4573. bnx2x_init_block(bp, HC_BLOCK, init_stage);
  4574. bnx2x_init_block(bp, IGU_BLOCK, init_stage);
  4575. bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
  4576. /* init aeu_mask_attn_func_0/1:
  4577. * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
  4578. * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
  4579. * bits 4-7 are used for "per vn group attention" */
  4580. val = IS_MF(bp) ? 0xF7 : 0x7;
  4581. /* Enable DCBX attention for all but E1 */
  4582. val |= CHIP_IS_E1(bp) ? 0 : 0x10;
  4583. REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
  4584. bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
  4585. bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
  4586. bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
  4587. bnx2x_init_block(bp, DBU_BLOCK, init_stage);
  4588. bnx2x_init_block(bp, DBG_BLOCK, init_stage);
  4589. bnx2x_init_block(bp, NIG_BLOCK, init_stage);
  4590. REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
  4591. if (!CHIP_IS_E1(bp)) {
  4592. /* 0x2 disable mf_ov, 0x1 enable */
  4593. REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
  4594. (IS_MF_SD(bp) ? 0x1 : 0x2));
  4595. if (CHIP_IS_E2(bp)) {
  4596. val = 0;
  4597. switch (bp->mf_mode) {
  4598. case MULTI_FUNCTION_SD:
  4599. val = 1;
  4600. break;
  4601. case MULTI_FUNCTION_SI:
  4602. val = 2;
  4603. break;
  4604. }
  4605. REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
  4606. NIG_REG_LLH0_CLS_TYPE), val);
  4607. }
  4608. {
  4609. REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
  4610. REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
  4611. REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
  4612. }
  4613. }
  4614. bnx2x_init_block(bp, MCP_BLOCK, init_stage);
  4615. bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
  4616. if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
  4617. bp->common.shmem2_base, port)) {
  4618. u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
  4619. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
  4620. val = REG_RD(bp, reg_addr);
  4621. val |= AEU_INPUTS_ATTN_BITS_SPIO5;
  4622. REG_WR(bp, reg_addr, val);
  4623. }
  4624. bnx2x__link_reset(bp);
  4625. return 0;
  4626. }
  4627. static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
  4628. {
  4629. int reg;
  4630. if (CHIP_IS_E1(bp))
  4631. reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
  4632. else
  4633. reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
  4634. bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
  4635. }
  4636. static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
  4637. {
  4638. bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
  4639. }
  4640. static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
  4641. {
  4642. u32 i, base = FUNC_ILT_BASE(func);
  4643. for (i = base; i < base + ILT_PER_FUNC; i++)
  4644. bnx2x_ilt_wr(bp, i, 0);
  4645. }
  4646. static int bnx2x_init_hw_func(struct bnx2x *bp)
  4647. {
  4648. int port = BP_PORT(bp);
  4649. int func = BP_FUNC(bp);
  4650. struct bnx2x_ilt *ilt = BP_ILT(bp);
  4651. u16 cdu_ilt_start;
  4652. u32 addr, val;
  4653. u32 main_mem_base, main_mem_size, main_mem_prty_clr;
  4654. int i, main_mem_width;
  4655. DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
  4656. /* set MSI reconfigure capability */
  4657. if (bp->common.int_block == INT_BLOCK_HC) {
  4658. addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
  4659. val = REG_RD(bp, addr);
  4660. val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
  4661. REG_WR(bp, addr, val);
  4662. }
  4663. ilt = BP_ILT(bp);
  4664. cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
  4665. for (i = 0; i < L2_ILT_LINES(bp); i++) {
  4666. ilt->lines[cdu_ilt_start + i].page =
  4667. bp->context.vcxt + (ILT_PAGE_CIDS * i);
  4668. ilt->lines[cdu_ilt_start + i].page_mapping =
  4669. bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
  4670. /* cdu ilt pages are allocated manually so there's no need to
  4671. set the size */
  4672. }
  4673. bnx2x_ilt_init_op(bp, INITOP_SET);
  4674. #ifdef BCM_CNIC
  4675. bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
  4676. /* T1 hash bits value determines the T1 number of entries */
  4677. REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
  4678. #endif
  4679. #ifndef BCM_CNIC
  4680. /* set NIC mode */
  4681. REG_WR(bp, PRS_REG_NIC_MODE, 1);
  4682. #endif /* BCM_CNIC */
  4683. if (CHIP_IS_E2(bp)) {
  4684. u32 pf_conf = IGU_PF_CONF_FUNC_EN;
  4685. /* Turn on a single ISR mode in IGU if driver is going to use
  4686. * INT#x or MSI
  4687. */
  4688. if (!(bp->flags & USING_MSIX_FLAG))
  4689. pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
  4690. /*
  4691. * Timers workaround bug: function init part.
  4692. * Need to wait 20msec after initializing ILT,
  4693. * needed to make sure there are no requests in
  4694. * one of the PXP internal queues with "old" ILT addresses
  4695. */
  4696. msleep(20);
  4697. /*
  4698. * Master enable - Due to WB DMAE writes performed before this
  4699. * register is re-initialized as part of the regular function
  4700. * init
  4701. */
  4702. REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
  4703. /* Enable the function in IGU */
  4704. REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
  4705. }
  4706. bp->dmae_ready = 1;
  4707. bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
  4708. if (CHIP_IS_E2(bp))
  4709. REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
  4710. bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
  4711. bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
  4712. bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
  4713. bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
  4714. bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
  4715. bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
  4716. bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
  4717. bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
  4718. bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
  4719. if (CHIP_IS_E2(bp)) {
  4720. REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
  4721. BP_PATH(bp));
  4722. REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
  4723. BP_PATH(bp));
  4724. }
  4725. if (CHIP_MODE_IS_4_PORT(bp))
  4726. bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
  4727. if (CHIP_IS_E2(bp))
  4728. REG_WR(bp, QM_REG_PF_EN, 1);
  4729. bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
  4730. if (CHIP_MODE_IS_4_PORT(bp))
  4731. bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
  4732. bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
  4733. bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
  4734. bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
  4735. bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
  4736. bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
  4737. bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
  4738. bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
  4739. bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
  4740. bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
  4741. bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
  4742. bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
  4743. if (CHIP_IS_E2(bp))
  4744. REG_WR(bp, PBF_REG_DISABLE_PF, 0);
  4745. bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
  4746. bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
  4747. if (CHIP_IS_E2(bp))
  4748. REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
  4749. if (IS_MF(bp)) {
  4750. REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
  4751. REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
  4752. }
  4753. bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
  4754. /* HC init per function */
  4755. if (bp->common.int_block == INT_BLOCK_HC) {
  4756. if (CHIP_IS_E1H(bp)) {
  4757. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
  4758. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
  4759. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
  4760. }
  4761. bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
  4762. } else {
  4763. int num_segs, sb_idx, prod_offset;
  4764. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
  4765. if (CHIP_IS_E2(bp)) {
  4766. REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
  4767. REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
  4768. }
  4769. bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
  4770. if (CHIP_IS_E2(bp)) {
  4771. int dsb_idx = 0;
  4772. /**
  4773. * Producer memory:
  4774. * E2 mode: address 0-135 match to the mapping memory;
  4775. * 136 - PF0 default prod; 137 - PF1 default prod;
  4776. * 138 - PF2 default prod; 139 - PF3 default prod;
  4777. * 140 - PF0 attn prod; 141 - PF1 attn prod;
  4778. * 142 - PF2 attn prod; 143 - PF3 attn prod;
  4779. * 144-147 reserved.
  4780. *
  4781. * E1.5 mode - In backward compatible mode;
  4782. * for non default SB; each even line in the memory
  4783. * holds the U producer and each odd line hold
  4784. * the C producer. The first 128 producers are for
  4785. * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
  4786. * producers are for the DSB for each PF.
  4787. * Each PF has five segments: (the order inside each
  4788. * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
  4789. * 132-135 C prods; 136-139 X prods; 140-143 T prods;
  4790. * 144-147 attn prods;
  4791. */
  4792. /* non-default-status-blocks */
  4793. num_segs = CHIP_INT_MODE_IS_BC(bp) ?
  4794. IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
  4795. for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
  4796. prod_offset = (bp->igu_base_sb + sb_idx) *
  4797. num_segs;
  4798. for (i = 0; i < num_segs; i++) {
  4799. addr = IGU_REG_PROD_CONS_MEMORY +
  4800. (prod_offset + i) * 4;
  4801. REG_WR(bp, addr, 0);
  4802. }
  4803. /* send consumer update with value 0 */
  4804. bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
  4805. USTORM_ID, 0, IGU_INT_NOP, 1);
  4806. bnx2x_igu_clear_sb(bp,
  4807. bp->igu_base_sb + sb_idx);
  4808. }
  4809. /* default-status-blocks */
  4810. num_segs = CHIP_INT_MODE_IS_BC(bp) ?
  4811. IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
  4812. if (CHIP_MODE_IS_4_PORT(bp))
  4813. dsb_idx = BP_FUNC(bp);
  4814. else
  4815. dsb_idx = BP_E1HVN(bp);
  4816. prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
  4817. IGU_BC_BASE_DSB_PROD + dsb_idx :
  4818. IGU_NORM_BASE_DSB_PROD + dsb_idx);
  4819. for (i = 0; i < (num_segs * E1HVN_MAX);
  4820. i += E1HVN_MAX) {
  4821. addr = IGU_REG_PROD_CONS_MEMORY +
  4822. (prod_offset + i)*4;
  4823. REG_WR(bp, addr, 0);
  4824. }
  4825. /* send consumer update with 0 */
  4826. if (CHIP_INT_MODE_IS_BC(bp)) {
  4827. bnx2x_ack_sb(bp, bp->igu_dsb_id,
  4828. USTORM_ID, 0, IGU_INT_NOP, 1);
  4829. bnx2x_ack_sb(bp, bp->igu_dsb_id,
  4830. CSTORM_ID, 0, IGU_INT_NOP, 1);
  4831. bnx2x_ack_sb(bp, bp->igu_dsb_id,
  4832. XSTORM_ID, 0, IGU_INT_NOP, 1);
  4833. bnx2x_ack_sb(bp, bp->igu_dsb_id,
  4834. TSTORM_ID, 0, IGU_INT_NOP, 1);
  4835. bnx2x_ack_sb(bp, bp->igu_dsb_id,
  4836. ATTENTION_ID, 0, IGU_INT_NOP, 1);
  4837. } else {
  4838. bnx2x_ack_sb(bp, bp->igu_dsb_id,
  4839. USTORM_ID, 0, IGU_INT_NOP, 1);
  4840. bnx2x_ack_sb(bp, bp->igu_dsb_id,
  4841. ATTENTION_ID, 0, IGU_INT_NOP, 1);
  4842. }
  4843. bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
  4844. /* !!! these should become driver const once
  4845. rf-tool supports split-68 const */
  4846. REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
  4847. REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
  4848. REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
  4849. REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
  4850. REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
  4851. REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
  4852. }
  4853. }
  4854. /* Reset PCIE errors for debug */
  4855. REG_WR(bp, 0x2114, 0xffffffff);
  4856. REG_WR(bp, 0x2120, 0xffffffff);
  4857. bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
  4858. bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
  4859. bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
  4860. bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
  4861. bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
  4862. bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
  4863. if (CHIP_IS_E1x(bp)) {
  4864. main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
  4865. main_mem_base = HC_REG_MAIN_MEMORY +
  4866. BP_PORT(bp) * (main_mem_size * 4);
  4867. main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
  4868. main_mem_width = 8;
  4869. val = REG_RD(bp, main_mem_prty_clr);
  4870. if (val)
  4871. DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
  4872. "block during "
  4873. "function init (0x%x)!\n", val);
  4874. /* Clear "false" parity errors in MSI-X table */
  4875. for (i = main_mem_base;
  4876. i < main_mem_base + main_mem_size * 4;
  4877. i += main_mem_width) {
  4878. bnx2x_read_dmae(bp, i, main_mem_width / 4);
  4879. bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
  4880. i, main_mem_width / 4);
  4881. }
  4882. /* Clear HC parity attention */
  4883. REG_RD(bp, main_mem_prty_clr);
  4884. }
  4885. bnx2x_phy_probe(&bp->link_params);
  4886. return 0;
  4887. }
  4888. int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
  4889. {
  4890. int rc = 0;
  4891. DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
  4892. BP_ABS_FUNC(bp), load_code);
  4893. bp->dmae_ready = 0;
  4894. spin_lock_init(&bp->dmae_lock);
  4895. rc = bnx2x_gunzip_init(bp);
  4896. if (rc)
  4897. return rc;
  4898. switch (load_code) {
  4899. case FW_MSG_CODE_DRV_LOAD_COMMON:
  4900. case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
  4901. rc = bnx2x_init_hw_common(bp, load_code);
  4902. if (rc)
  4903. goto init_hw_err;
  4904. /* no break */
  4905. case FW_MSG_CODE_DRV_LOAD_PORT:
  4906. rc = bnx2x_init_hw_port(bp);
  4907. if (rc)
  4908. goto init_hw_err;
  4909. /* no break */
  4910. case FW_MSG_CODE_DRV_LOAD_FUNCTION:
  4911. rc = bnx2x_init_hw_func(bp);
  4912. if (rc)
  4913. goto init_hw_err;
  4914. break;
  4915. default:
  4916. BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
  4917. break;
  4918. }
  4919. if (!BP_NOMCP(bp)) {
  4920. int mb_idx = BP_FW_MB_IDX(bp);
  4921. bp->fw_drv_pulse_wr_seq =
  4922. (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
  4923. DRV_PULSE_SEQ_MASK);
  4924. DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
  4925. }
  4926. init_hw_err:
  4927. bnx2x_gunzip_end(bp);
  4928. return rc;
  4929. }
  4930. void bnx2x_free_mem(struct bnx2x *bp)
  4931. {
  4932. #define BNX2X_PCI_FREE(x, y, size) \
  4933. do { \
  4934. if (x) { \
  4935. dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
  4936. x = NULL; \
  4937. y = 0; \
  4938. } \
  4939. } while (0)
  4940. #define BNX2X_FREE(x) \
  4941. do { \
  4942. if (x) { \
  4943. kfree((void *)x); \
  4944. x = NULL; \
  4945. } \
  4946. } while (0)
  4947. int i;
  4948. /* fastpath */
  4949. /* Common */
  4950. for_each_queue(bp, i) {
  4951. #ifdef BCM_CNIC
  4952. /* FCoE client uses default status block */
  4953. if (IS_FCOE_IDX(i)) {
  4954. union host_hc_status_block *sb =
  4955. &bnx2x_fp(bp, i, status_blk);
  4956. memset(sb, 0, sizeof(union host_hc_status_block));
  4957. bnx2x_fp(bp, i, status_blk_mapping) = 0;
  4958. } else {
  4959. #endif
  4960. /* status blocks */
  4961. if (CHIP_IS_E2(bp))
  4962. BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
  4963. bnx2x_fp(bp, i, status_blk_mapping),
  4964. sizeof(struct host_hc_status_block_e2));
  4965. else
  4966. BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
  4967. bnx2x_fp(bp, i, status_blk_mapping),
  4968. sizeof(struct host_hc_status_block_e1x));
  4969. #ifdef BCM_CNIC
  4970. }
  4971. #endif
  4972. }
  4973. /* Rx */
  4974. for_each_rx_queue(bp, i) {
  4975. /* fastpath rx rings: rx_buf rx_desc rx_comp */
  4976. BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
  4977. BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
  4978. bnx2x_fp(bp, i, rx_desc_mapping),
  4979. sizeof(struct eth_rx_bd) * NUM_RX_BD);
  4980. BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
  4981. bnx2x_fp(bp, i, rx_comp_mapping),
  4982. sizeof(struct eth_fast_path_rx_cqe) *
  4983. NUM_RCQ_BD);
  4984. /* SGE ring */
  4985. BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
  4986. BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
  4987. bnx2x_fp(bp, i, rx_sge_mapping),
  4988. BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
  4989. }
  4990. /* Tx */
  4991. for_each_tx_queue(bp, i) {
  4992. /* fastpath tx rings: tx_buf tx_desc */
  4993. BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
  4994. BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
  4995. bnx2x_fp(bp, i, tx_desc_mapping),
  4996. sizeof(union eth_tx_bd_types) * NUM_TX_BD);
  4997. }
  4998. /* end of fastpath */
  4999. BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
  5000. sizeof(struct host_sp_status_block));
  5001. BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
  5002. sizeof(struct bnx2x_slowpath));
  5003. BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
  5004. bp->context.size);
  5005. bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
  5006. BNX2X_FREE(bp->ilt->lines);
  5007. #ifdef BCM_CNIC
  5008. if (CHIP_IS_E2(bp))
  5009. BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
  5010. sizeof(struct host_hc_status_block_e2));
  5011. else
  5012. BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
  5013. sizeof(struct host_hc_status_block_e1x));
  5014. BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
  5015. #endif
  5016. BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
  5017. BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
  5018. BCM_PAGE_SIZE * NUM_EQ_PAGES);
  5019. BNX2X_FREE(bp->rx_indir_table);
  5020. #undef BNX2X_PCI_FREE
  5021. #undef BNX2X_KFREE
  5022. }
  5023. static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
  5024. {
  5025. union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
  5026. if (CHIP_IS_E2(bp)) {
  5027. bnx2x_fp(bp, index, sb_index_values) =
  5028. (__le16 *)status_blk.e2_sb->sb.index_values;
  5029. bnx2x_fp(bp, index, sb_running_index) =
  5030. (__le16 *)status_blk.e2_sb->sb.running_index;
  5031. } else {
  5032. bnx2x_fp(bp, index, sb_index_values) =
  5033. (__le16 *)status_blk.e1x_sb->sb.index_values;
  5034. bnx2x_fp(bp, index, sb_running_index) =
  5035. (__le16 *)status_blk.e1x_sb->sb.running_index;
  5036. }
  5037. }
  5038. int bnx2x_alloc_mem(struct bnx2x *bp)
  5039. {
  5040. #define BNX2X_PCI_ALLOC(x, y, size) \
  5041. do { \
  5042. x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
  5043. if (x == NULL) \
  5044. goto alloc_mem_err; \
  5045. memset(x, 0, size); \
  5046. } while (0)
  5047. #define BNX2X_ALLOC(x, size) \
  5048. do { \
  5049. x = kzalloc(size, GFP_KERNEL); \
  5050. if (x == NULL) \
  5051. goto alloc_mem_err; \
  5052. } while (0)
  5053. int i;
  5054. /* fastpath */
  5055. /* Common */
  5056. for_each_queue(bp, i) {
  5057. union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
  5058. bnx2x_fp(bp, i, bp) = bp;
  5059. /* status blocks */
  5060. #ifdef BCM_CNIC
  5061. if (!IS_FCOE_IDX(i)) {
  5062. #endif
  5063. if (CHIP_IS_E2(bp))
  5064. BNX2X_PCI_ALLOC(sb->e2_sb,
  5065. &bnx2x_fp(bp, i, status_blk_mapping),
  5066. sizeof(struct host_hc_status_block_e2));
  5067. else
  5068. BNX2X_PCI_ALLOC(sb->e1x_sb,
  5069. &bnx2x_fp(bp, i, status_blk_mapping),
  5070. sizeof(struct host_hc_status_block_e1x));
  5071. #ifdef BCM_CNIC
  5072. }
  5073. #endif
  5074. set_sb_shortcuts(bp, i);
  5075. }
  5076. /* Rx */
  5077. for_each_queue(bp, i) {
  5078. /* fastpath rx rings: rx_buf rx_desc rx_comp */
  5079. BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
  5080. sizeof(struct sw_rx_bd) * NUM_RX_BD);
  5081. BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
  5082. &bnx2x_fp(bp, i, rx_desc_mapping),
  5083. sizeof(struct eth_rx_bd) * NUM_RX_BD);
  5084. BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
  5085. &bnx2x_fp(bp, i, rx_comp_mapping),
  5086. sizeof(struct eth_fast_path_rx_cqe) *
  5087. NUM_RCQ_BD);
  5088. /* SGE ring */
  5089. BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
  5090. sizeof(struct sw_rx_page) * NUM_RX_SGE);
  5091. BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
  5092. &bnx2x_fp(bp, i, rx_sge_mapping),
  5093. BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
  5094. }
  5095. /* Tx */
  5096. for_each_queue(bp, i) {
  5097. /* fastpath tx rings: tx_buf tx_desc */
  5098. BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
  5099. sizeof(struct sw_tx_bd) * NUM_TX_BD);
  5100. BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
  5101. &bnx2x_fp(bp, i, tx_desc_mapping),
  5102. sizeof(union eth_tx_bd_types) * NUM_TX_BD);
  5103. }
  5104. /* end of fastpath */
  5105. #ifdef BCM_CNIC
  5106. if (CHIP_IS_E2(bp))
  5107. BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
  5108. sizeof(struct host_hc_status_block_e2));
  5109. else
  5110. BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
  5111. sizeof(struct host_hc_status_block_e1x));
  5112. /* allocate searcher T2 table */
  5113. BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
  5114. #endif
  5115. BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
  5116. sizeof(struct host_sp_status_block));
  5117. BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
  5118. sizeof(struct bnx2x_slowpath));
  5119. bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
  5120. BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
  5121. bp->context.size);
  5122. BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
  5123. if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
  5124. goto alloc_mem_err;
  5125. /* Slow path ring */
  5126. BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
  5127. /* EQ */
  5128. BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
  5129. BCM_PAGE_SIZE * NUM_EQ_PAGES);
  5130. BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
  5131. TSTORM_INDIRECTION_TABLE_SIZE);
  5132. return 0;
  5133. alloc_mem_err:
  5134. bnx2x_free_mem(bp);
  5135. return -ENOMEM;
  5136. #undef BNX2X_PCI_ALLOC
  5137. #undef BNX2X_ALLOC
  5138. }
  5139. /*
  5140. * Init service functions
  5141. */
  5142. static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
  5143. int *state_p, int flags);
  5144. int bnx2x_func_start(struct bnx2x *bp)
  5145. {
  5146. bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
  5147. /* Wait for completion */
  5148. return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
  5149. WAIT_RAMROD_COMMON);
  5150. }
  5151. static int bnx2x_func_stop(struct bnx2x *bp)
  5152. {
  5153. bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
  5154. /* Wait for completion */
  5155. return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
  5156. 0, &(bp->state), WAIT_RAMROD_COMMON);
  5157. }
  5158. /**
  5159. * Sets a MAC in a CAM for a few L2 Clients for E1x chips
  5160. *
  5161. * @param bp driver descriptor
  5162. * @param set set or clear an entry (1 or 0)
  5163. * @param mac pointer to a buffer containing a MAC
  5164. * @param cl_bit_vec bit vector of clients to register a MAC for
  5165. * @param cam_offset offset in a CAM to use
  5166. * @param is_bcast is the set MAC a broadcast address (for E1 only)
  5167. */
  5168. static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
  5169. u32 cl_bit_vec, u8 cam_offset,
  5170. u8 is_bcast)
  5171. {
  5172. struct mac_configuration_cmd *config =
  5173. (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
  5174. int ramrod_flags = WAIT_RAMROD_COMMON;
  5175. bp->set_mac_pending = 1;
  5176. config->hdr.length = 1;
  5177. config->hdr.offset = cam_offset;
  5178. config->hdr.client_id = 0xff;
  5179. /* Mark the single MAC configuration ramrod as opposed to a
  5180. * UC/MC list configuration).
  5181. */
  5182. config->hdr.echo = 1;
  5183. /* primary MAC */
  5184. config->config_table[0].msb_mac_addr =
  5185. swab16(*(u16 *)&mac[0]);
  5186. config->config_table[0].middle_mac_addr =
  5187. swab16(*(u16 *)&mac[2]);
  5188. config->config_table[0].lsb_mac_addr =
  5189. swab16(*(u16 *)&mac[4]);
  5190. config->config_table[0].clients_bit_vector =
  5191. cpu_to_le32(cl_bit_vec);
  5192. config->config_table[0].vlan_id = 0;
  5193. config->config_table[0].pf_id = BP_FUNC(bp);
  5194. if (set)
  5195. SET_FLAG(config->config_table[0].flags,
  5196. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  5197. T_ETH_MAC_COMMAND_SET);
  5198. else
  5199. SET_FLAG(config->config_table[0].flags,
  5200. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  5201. T_ETH_MAC_COMMAND_INVALIDATE);
  5202. if (is_bcast)
  5203. SET_FLAG(config->config_table[0].flags,
  5204. MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
  5205. DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
  5206. (set ? "setting" : "clearing"),
  5207. config->config_table[0].msb_mac_addr,
  5208. config->config_table[0].middle_mac_addr,
  5209. config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
  5210. mb();
  5211. bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
  5212. U64_HI(bnx2x_sp_mapping(bp, mac_config)),
  5213. U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
  5214. /* Wait for a completion */
  5215. bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
  5216. }
  5217. static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
  5218. int *state_p, int flags)
  5219. {
  5220. /* can take a while if any port is running */
  5221. int cnt = 5000;
  5222. u8 poll = flags & WAIT_RAMROD_POLL;
  5223. u8 common = flags & WAIT_RAMROD_COMMON;
  5224. DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
  5225. poll ? "polling" : "waiting", state, idx);
  5226. might_sleep();
  5227. while (cnt--) {
  5228. if (poll) {
  5229. if (common)
  5230. bnx2x_eq_int(bp);
  5231. else {
  5232. bnx2x_rx_int(bp->fp, 10);
  5233. /* if index is different from 0
  5234. * the reply for some commands will
  5235. * be on the non default queue
  5236. */
  5237. if (idx)
  5238. bnx2x_rx_int(&bp->fp[idx], 10);
  5239. }
  5240. }
  5241. mb(); /* state is changed by bnx2x_sp_event() */
  5242. if (*state_p == state) {
  5243. #ifdef BNX2X_STOP_ON_ERROR
  5244. DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
  5245. #endif
  5246. return 0;
  5247. }
  5248. msleep(1);
  5249. if (bp->panic)
  5250. return -EIO;
  5251. }
  5252. /* timeout! */
  5253. BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
  5254. poll ? "polling" : "waiting", state, idx);
  5255. #ifdef BNX2X_STOP_ON_ERROR
  5256. bnx2x_panic();
  5257. #endif
  5258. return -EBUSY;
  5259. }
  5260. static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
  5261. {
  5262. if (CHIP_IS_E1H(bp))
  5263. return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
  5264. else if (CHIP_MODE_IS_4_PORT(bp))
  5265. return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
  5266. else
  5267. return E2_FUNC_MAX * rel_offset + BP_VN(bp);
  5268. }
  5269. /**
  5270. * LLH CAM line allocations: currently only iSCSI and ETH macs are
  5271. * relevant. In addition, current implementation is tuned for a
  5272. * single ETH MAC.
  5273. */
  5274. enum {
  5275. LLH_CAM_ISCSI_ETH_LINE = 0,
  5276. LLH_CAM_ETH_LINE,
  5277. LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
  5278. };
  5279. static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
  5280. int set,
  5281. unsigned char *dev_addr,
  5282. int index)
  5283. {
  5284. u32 wb_data[2];
  5285. u32 mem_offset, ena_offset, mem_index;
  5286. /**
  5287. * indexes mapping:
  5288. * 0..7 - goes to MEM
  5289. * 8..15 - goes to MEM2
  5290. */
  5291. if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
  5292. return;
  5293. /* calculate memory start offset according to the mapping
  5294. * and index in the memory */
  5295. if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
  5296. mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
  5297. NIG_REG_LLH0_FUNC_MEM;
  5298. ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
  5299. NIG_REG_LLH0_FUNC_MEM_ENABLE;
  5300. mem_index = index;
  5301. } else {
  5302. mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
  5303. NIG_REG_P0_LLH_FUNC_MEM2;
  5304. ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
  5305. NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
  5306. mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
  5307. }
  5308. if (set) {
  5309. /* LLH_FUNC_MEM is a u64 WB register */
  5310. mem_offset += 8*mem_index;
  5311. wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
  5312. (dev_addr[4] << 8) | dev_addr[5]);
  5313. wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
  5314. REG_WR_DMAE(bp, mem_offset, wb_data, 2);
  5315. }
  5316. /* enable/disable the entry */
  5317. REG_WR(bp, ena_offset + 4*mem_index, set);
  5318. }
  5319. void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
  5320. {
  5321. u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
  5322. bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
  5323. /* networking MAC */
  5324. bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
  5325. (1 << bp->fp->cl_id), cam_offset , 0);
  5326. bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
  5327. if (CHIP_IS_E1(bp)) {
  5328. /* broadcast MAC */
  5329. static const u8 bcast[ETH_ALEN] = {
  5330. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  5331. };
  5332. bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
  5333. }
  5334. }
  5335. static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
  5336. {
  5337. return CHIP_REV_IS_SLOW(bp) ?
  5338. (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
  5339. (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
  5340. }
  5341. /* set mc list, do not wait as wait implies sleep and
  5342. * set_rx_mode can be invoked from non-sleepable context.
  5343. *
  5344. * Instead we use the same ramrod data buffer each time we need
  5345. * to configure a list of addresses, and use the fact that the
  5346. * list of MACs is changed in an incremental way and that the
  5347. * function is called under the netif_addr_lock. A temporary
  5348. * inconsistent CAM configuration (possible in case of a very fast
  5349. * sequence of add/del/add on the host side) will shortly be
  5350. * restored by the handler of the last ramrod.
  5351. */
  5352. static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
  5353. {
  5354. int i = 0, old;
  5355. struct net_device *dev = bp->dev;
  5356. u8 offset = bnx2x_e1_cam_mc_offset(bp);
  5357. struct netdev_hw_addr *ha;
  5358. struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
  5359. dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
  5360. if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
  5361. return -EINVAL;
  5362. netdev_for_each_mc_addr(ha, dev) {
  5363. /* copy mac */
  5364. config_cmd->config_table[i].msb_mac_addr =
  5365. swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
  5366. config_cmd->config_table[i].middle_mac_addr =
  5367. swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
  5368. config_cmd->config_table[i].lsb_mac_addr =
  5369. swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
  5370. config_cmd->config_table[i].vlan_id = 0;
  5371. config_cmd->config_table[i].pf_id = BP_FUNC(bp);
  5372. config_cmd->config_table[i].clients_bit_vector =
  5373. cpu_to_le32(1 << BP_L_ID(bp));
  5374. SET_FLAG(config_cmd->config_table[i].flags,
  5375. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  5376. T_ETH_MAC_COMMAND_SET);
  5377. DP(NETIF_MSG_IFUP,
  5378. "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
  5379. config_cmd->config_table[i].msb_mac_addr,
  5380. config_cmd->config_table[i].middle_mac_addr,
  5381. config_cmd->config_table[i].lsb_mac_addr);
  5382. i++;
  5383. }
  5384. old = config_cmd->hdr.length;
  5385. if (old > i) {
  5386. for (; i < old; i++) {
  5387. if (CAM_IS_INVALID(config_cmd->
  5388. config_table[i])) {
  5389. /* already invalidated */
  5390. break;
  5391. }
  5392. /* invalidate */
  5393. SET_FLAG(config_cmd->config_table[i].flags,
  5394. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  5395. T_ETH_MAC_COMMAND_INVALIDATE);
  5396. }
  5397. }
  5398. wmb();
  5399. config_cmd->hdr.length = i;
  5400. config_cmd->hdr.offset = offset;
  5401. config_cmd->hdr.client_id = 0xff;
  5402. /* Mark that this ramrod doesn't use bp->set_mac_pending for
  5403. * synchronization.
  5404. */
  5405. config_cmd->hdr.echo = 0;
  5406. mb();
  5407. return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
  5408. U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
  5409. }
  5410. void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
  5411. {
  5412. int i;
  5413. struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
  5414. dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
  5415. int ramrod_flags = WAIT_RAMROD_COMMON;
  5416. u8 offset = bnx2x_e1_cam_mc_offset(bp);
  5417. for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
  5418. SET_FLAG(config_cmd->config_table[i].flags,
  5419. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  5420. T_ETH_MAC_COMMAND_INVALIDATE);
  5421. wmb();
  5422. config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
  5423. config_cmd->hdr.offset = offset;
  5424. config_cmd->hdr.client_id = 0xff;
  5425. /* We'll wait for a completion this time... */
  5426. config_cmd->hdr.echo = 1;
  5427. bp->set_mac_pending = 1;
  5428. mb();
  5429. bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
  5430. U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
  5431. /* Wait for a completion */
  5432. bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
  5433. ramrod_flags);
  5434. }
  5435. /* Accept one or more multicasts */
  5436. static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
  5437. {
  5438. struct net_device *dev = bp->dev;
  5439. struct netdev_hw_addr *ha;
  5440. u32 mc_filter[MC_HASH_SIZE];
  5441. u32 crc, bit, regidx;
  5442. int i;
  5443. memset(mc_filter, 0, 4 * MC_HASH_SIZE);
  5444. netdev_for_each_mc_addr(ha, dev) {
  5445. DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
  5446. bnx2x_mc_addr(ha));
  5447. crc = crc32c_le(0, bnx2x_mc_addr(ha),
  5448. ETH_ALEN);
  5449. bit = (crc >> 24) & 0xff;
  5450. regidx = bit >> 5;
  5451. bit &= 0x1f;
  5452. mc_filter[regidx] |= (1 << bit);
  5453. }
  5454. for (i = 0; i < MC_HASH_SIZE; i++)
  5455. REG_WR(bp, MC_HASH_OFFSET(bp, i),
  5456. mc_filter[i]);
  5457. return 0;
  5458. }
  5459. void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
  5460. {
  5461. int i;
  5462. for (i = 0; i < MC_HASH_SIZE; i++)
  5463. REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
  5464. }
  5465. #ifdef BCM_CNIC
  5466. /**
  5467. * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
  5468. * MAC(s). This function will wait until the ramdord completion
  5469. * returns.
  5470. *
  5471. * @param bp driver handle
  5472. * @param set set or clear the CAM entry
  5473. *
  5474. * @return 0 if cussess, -ENODEV if ramrod doesn't return.
  5475. */
  5476. static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
  5477. {
  5478. u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
  5479. bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
  5480. u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
  5481. BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
  5482. u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
  5483. u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
  5484. /* Send a SET_MAC ramrod */
  5485. bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
  5486. cam_offset, 0);
  5487. bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
  5488. return 0;
  5489. }
  5490. /**
  5491. * Set FCoE L2 MAC(s) at the next enties in the CAM after the
  5492. * ETH MAC(s). This function will wait until the ramdord
  5493. * completion returns.
  5494. *
  5495. * @param bp driver handle
  5496. * @param set set or clear the CAM entry
  5497. *
  5498. * @return 0 if cussess, -ENODEV if ramrod doesn't return.
  5499. */
  5500. int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
  5501. {
  5502. u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
  5503. /**
  5504. * CAM allocation for E1H
  5505. * eth unicasts: by func number
  5506. * iscsi: by func number
  5507. * fip unicast: by func number
  5508. * fip multicast: by func number
  5509. */
  5510. bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
  5511. cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
  5512. return 0;
  5513. }
  5514. int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
  5515. {
  5516. u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
  5517. /**
  5518. * CAM allocation for E1H
  5519. * eth unicasts: by func number
  5520. * iscsi: by func number
  5521. * fip unicast: by func number
  5522. * fip multicast: by func number
  5523. */
  5524. bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
  5525. bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
  5526. return 0;
  5527. }
  5528. #endif
  5529. static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
  5530. struct bnx2x_client_init_params *params,
  5531. u8 activate,
  5532. struct client_init_ramrod_data *data)
  5533. {
  5534. /* Clear the buffer */
  5535. memset(data, 0, sizeof(*data));
  5536. /* general */
  5537. data->general.client_id = params->rxq_params.cl_id;
  5538. data->general.statistics_counter_id = params->rxq_params.stat_id;
  5539. data->general.statistics_en_flg =
  5540. (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
  5541. data->general.is_fcoe_flg =
  5542. (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
  5543. data->general.activate_flg = activate;
  5544. data->general.sp_client_id = params->rxq_params.spcl_id;
  5545. /* Rx data */
  5546. data->rx.tpa_en_flg =
  5547. (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
  5548. data->rx.vmqueue_mode_en_flg = 0;
  5549. data->rx.cache_line_alignment_log_size =
  5550. params->rxq_params.cache_line_log;
  5551. data->rx.enable_dynamic_hc =
  5552. (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
  5553. data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
  5554. data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
  5555. data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
  5556. /* We don't set drop flags */
  5557. data->rx.drop_ip_cs_err_flg = 0;
  5558. data->rx.drop_tcp_cs_err_flg = 0;
  5559. data->rx.drop_ttl0_flg = 0;
  5560. data->rx.drop_udp_cs_err_flg = 0;
  5561. data->rx.inner_vlan_removal_enable_flg =
  5562. (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
  5563. data->rx.outer_vlan_removal_enable_flg =
  5564. (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
  5565. data->rx.status_block_id = params->rxq_params.fw_sb_id;
  5566. data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
  5567. data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
  5568. data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
  5569. data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
  5570. data->rx.bd_page_base.lo =
  5571. cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
  5572. data->rx.bd_page_base.hi =
  5573. cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
  5574. data->rx.sge_page_base.lo =
  5575. cpu_to_le32(U64_LO(params->rxq_params.sge_map));
  5576. data->rx.sge_page_base.hi =
  5577. cpu_to_le32(U64_HI(params->rxq_params.sge_map));
  5578. data->rx.cqe_page_base.lo =
  5579. cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
  5580. data->rx.cqe_page_base.hi =
  5581. cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
  5582. data->rx.is_leading_rss =
  5583. (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
  5584. data->rx.is_approx_mcast = data->rx.is_leading_rss;
  5585. /* Tx data */
  5586. data->tx.enforce_security_flg = 0; /* VF specific */
  5587. data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
  5588. data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
  5589. data->tx.mtu = 0; /* VF specific */
  5590. data->tx.tx_bd_page_base.lo =
  5591. cpu_to_le32(U64_LO(params->txq_params.dscr_map));
  5592. data->tx.tx_bd_page_base.hi =
  5593. cpu_to_le32(U64_HI(params->txq_params.dscr_map));
  5594. /* flow control data */
  5595. data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
  5596. data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
  5597. data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
  5598. data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
  5599. data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
  5600. data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
  5601. data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
  5602. data->fc.safc_group_num = params->txq_params.cos;
  5603. data->fc.safc_group_en_flg =
  5604. (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
  5605. data->fc.traffic_type =
  5606. (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
  5607. LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
  5608. }
  5609. static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
  5610. {
  5611. /* ustorm cxt validation */
  5612. cxt->ustorm_ag_context.cdu_usage =
  5613. CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
  5614. ETH_CONNECTION_TYPE);
  5615. /* xcontext validation */
  5616. cxt->xstorm_ag_context.cdu_reserved =
  5617. CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
  5618. ETH_CONNECTION_TYPE);
  5619. }
  5620. static int bnx2x_setup_fw_client(struct bnx2x *bp,
  5621. struct bnx2x_client_init_params *params,
  5622. u8 activate,
  5623. struct client_init_ramrod_data *data,
  5624. dma_addr_t data_mapping)
  5625. {
  5626. u16 hc_usec;
  5627. int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
  5628. int ramrod_flags = 0, rc;
  5629. /* HC and context validation values */
  5630. hc_usec = params->txq_params.hc_rate ?
  5631. 1000000 / params->txq_params.hc_rate : 0;
  5632. bnx2x_update_coalesce_sb_index(bp,
  5633. params->txq_params.fw_sb_id,
  5634. params->txq_params.sb_cq_index,
  5635. !(params->txq_params.flags & QUEUE_FLG_HC),
  5636. hc_usec);
  5637. *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
  5638. hc_usec = params->rxq_params.hc_rate ?
  5639. 1000000 / params->rxq_params.hc_rate : 0;
  5640. bnx2x_update_coalesce_sb_index(bp,
  5641. params->rxq_params.fw_sb_id,
  5642. params->rxq_params.sb_cq_index,
  5643. !(params->rxq_params.flags & QUEUE_FLG_HC),
  5644. hc_usec);
  5645. bnx2x_set_ctx_validation(params->rxq_params.cxt,
  5646. params->rxq_params.cid);
  5647. /* zero stats */
  5648. if (params->txq_params.flags & QUEUE_FLG_STATS)
  5649. storm_memset_xstats_zero(bp, BP_PORT(bp),
  5650. params->txq_params.stat_id);
  5651. if (params->rxq_params.flags & QUEUE_FLG_STATS) {
  5652. storm_memset_ustats_zero(bp, BP_PORT(bp),
  5653. params->rxq_params.stat_id);
  5654. storm_memset_tstats_zero(bp, BP_PORT(bp),
  5655. params->rxq_params.stat_id);
  5656. }
  5657. /* Fill the ramrod data */
  5658. bnx2x_fill_cl_init_data(bp, params, activate, data);
  5659. /* SETUP ramrod.
  5660. *
  5661. * bnx2x_sp_post() takes a spin_lock thus no other explict memory
  5662. * barrier except from mmiowb() is needed to impose a
  5663. * proper ordering of memory operations.
  5664. */
  5665. mmiowb();
  5666. bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
  5667. U64_HI(data_mapping), U64_LO(data_mapping), 0);
  5668. /* Wait for completion */
  5669. rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
  5670. params->ramrod_params.index,
  5671. params->ramrod_params.pstate,
  5672. ramrod_flags);
  5673. return rc;
  5674. }
  5675. /**
  5676. * Configure interrupt mode according to current configuration.
  5677. * In case of MSI-X it will also try to enable MSI-X.
  5678. *
  5679. * @param bp
  5680. *
  5681. * @return int
  5682. */
  5683. static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
  5684. {
  5685. int rc = 0;
  5686. switch (bp->int_mode) {
  5687. case INT_MODE_MSI:
  5688. bnx2x_enable_msi(bp);
  5689. /* falling through... */
  5690. case INT_MODE_INTx:
  5691. bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
  5692. DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
  5693. break;
  5694. default:
  5695. /* Set number of queues according to bp->multi_mode value */
  5696. bnx2x_set_num_queues(bp);
  5697. DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
  5698. bp->num_queues);
  5699. /* if we can't use MSI-X we only need one fp,
  5700. * so try to enable MSI-X with the requested number of fp's
  5701. * and fallback to MSI or legacy INTx with one fp
  5702. */
  5703. rc = bnx2x_enable_msix(bp);
  5704. if (rc) {
  5705. /* failed to enable MSI-X */
  5706. if (bp->multi_mode)
  5707. DP(NETIF_MSG_IFUP,
  5708. "Multi requested but failed to "
  5709. "enable MSI-X (%d), "
  5710. "set number of queues to %d\n",
  5711. bp->num_queues,
  5712. 1 + NONE_ETH_CONTEXT_USE);
  5713. bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
  5714. if (!(bp->flags & DISABLE_MSI_FLAG))
  5715. bnx2x_enable_msi(bp);
  5716. }
  5717. break;
  5718. }
  5719. return rc;
  5720. }
  5721. /* must be called prioir to any HW initializations */
  5722. static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
  5723. {
  5724. return L2_ILT_LINES(bp);
  5725. }
  5726. void bnx2x_ilt_set_info(struct bnx2x *bp)
  5727. {
  5728. struct ilt_client_info *ilt_client;
  5729. struct bnx2x_ilt *ilt = BP_ILT(bp);
  5730. u16 line = 0;
  5731. ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
  5732. DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
  5733. /* CDU */
  5734. ilt_client = &ilt->clients[ILT_CLIENT_CDU];
  5735. ilt_client->client_num = ILT_CLIENT_CDU;
  5736. ilt_client->page_size = CDU_ILT_PAGE_SZ;
  5737. ilt_client->flags = ILT_CLIENT_SKIP_MEM;
  5738. ilt_client->start = line;
  5739. line += L2_ILT_LINES(bp);
  5740. #ifdef BCM_CNIC
  5741. line += CNIC_ILT_LINES;
  5742. #endif
  5743. ilt_client->end = line - 1;
  5744. DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
  5745. "flags 0x%x, hw psz %d\n",
  5746. ilt_client->start,
  5747. ilt_client->end,
  5748. ilt_client->page_size,
  5749. ilt_client->flags,
  5750. ilog2(ilt_client->page_size >> 12));
  5751. /* QM */
  5752. if (QM_INIT(bp->qm_cid_count)) {
  5753. ilt_client = &ilt->clients[ILT_CLIENT_QM];
  5754. ilt_client->client_num = ILT_CLIENT_QM;
  5755. ilt_client->page_size = QM_ILT_PAGE_SZ;
  5756. ilt_client->flags = 0;
  5757. ilt_client->start = line;
  5758. /* 4 bytes for each cid */
  5759. line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
  5760. QM_ILT_PAGE_SZ);
  5761. ilt_client->end = line - 1;
  5762. DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
  5763. "flags 0x%x, hw psz %d\n",
  5764. ilt_client->start,
  5765. ilt_client->end,
  5766. ilt_client->page_size,
  5767. ilt_client->flags,
  5768. ilog2(ilt_client->page_size >> 12));
  5769. }
  5770. /* SRC */
  5771. ilt_client = &ilt->clients[ILT_CLIENT_SRC];
  5772. #ifdef BCM_CNIC
  5773. ilt_client->client_num = ILT_CLIENT_SRC;
  5774. ilt_client->page_size = SRC_ILT_PAGE_SZ;
  5775. ilt_client->flags = 0;
  5776. ilt_client->start = line;
  5777. line += SRC_ILT_LINES;
  5778. ilt_client->end = line - 1;
  5779. DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
  5780. "flags 0x%x, hw psz %d\n",
  5781. ilt_client->start,
  5782. ilt_client->end,
  5783. ilt_client->page_size,
  5784. ilt_client->flags,
  5785. ilog2(ilt_client->page_size >> 12));
  5786. #else
  5787. ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
  5788. #endif
  5789. /* TM */
  5790. ilt_client = &ilt->clients[ILT_CLIENT_TM];
  5791. #ifdef BCM_CNIC
  5792. ilt_client->client_num = ILT_CLIENT_TM;
  5793. ilt_client->page_size = TM_ILT_PAGE_SZ;
  5794. ilt_client->flags = 0;
  5795. ilt_client->start = line;
  5796. line += TM_ILT_LINES;
  5797. ilt_client->end = line - 1;
  5798. DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
  5799. "flags 0x%x, hw psz %d\n",
  5800. ilt_client->start,
  5801. ilt_client->end,
  5802. ilt_client->page_size,
  5803. ilt_client->flags,
  5804. ilog2(ilt_client->page_size >> 12));
  5805. #else
  5806. ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
  5807. #endif
  5808. }
  5809. int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  5810. int is_leading)
  5811. {
  5812. struct bnx2x_client_init_params params = { {0} };
  5813. int rc;
  5814. /* reset IGU state skip FCoE L2 queue */
  5815. if (!IS_FCOE_FP(fp))
  5816. bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
  5817. IGU_INT_ENABLE, 0);
  5818. params.ramrod_params.pstate = &fp->state;
  5819. params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
  5820. params.ramrod_params.index = fp->index;
  5821. params.ramrod_params.cid = fp->cid;
  5822. #ifdef BCM_CNIC
  5823. if (IS_FCOE_FP(fp))
  5824. params.ramrod_params.flags |= CLIENT_IS_FCOE;
  5825. #endif
  5826. if (is_leading)
  5827. params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
  5828. bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
  5829. bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
  5830. rc = bnx2x_setup_fw_client(bp, &params, 1,
  5831. bnx2x_sp(bp, client_init_data),
  5832. bnx2x_sp_mapping(bp, client_init_data));
  5833. return rc;
  5834. }
  5835. static int bnx2x_stop_fw_client(struct bnx2x *bp,
  5836. struct bnx2x_client_ramrod_params *p)
  5837. {
  5838. int rc;
  5839. int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
  5840. /* halt the connection */
  5841. *p->pstate = BNX2X_FP_STATE_HALTING;
  5842. bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
  5843. p->cl_id, 0);
  5844. /* Wait for completion */
  5845. rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
  5846. p->pstate, poll_flag);
  5847. if (rc) /* timeout */
  5848. return rc;
  5849. *p->pstate = BNX2X_FP_STATE_TERMINATING;
  5850. bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
  5851. p->cl_id, 0);
  5852. /* Wait for completion */
  5853. rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
  5854. p->pstate, poll_flag);
  5855. if (rc) /* timeout */
  5856. return rc;
  5857. /* delete cfc entry */
  5858. bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
  5859. /* Wait for completion */
  5860. rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
  5861. p->pstate, WAIT_RAMROD_COMMON);
  5862. return rc;
  5863. }
  5864. static int bnx2x_stop_client(struct bnx2x *bp, int index)
  5865. {
  5866. struct bnx2x_client_ramrod_params client_stop = {0};
  5867. struct bnx2x_fastpath *fp = &bp->fp[index];
  5868. client_stop.index = index;
  5869. client_stop.cid = fp->cid;
  5870. client_stop.cl_id = fp->cl_id;
  5871. client_stop.pstate = &(fp->state);
  5872. client_stop.poll = 0;
  5873. return bnx2x_stop_fw_client(bp, &client_stop);
  5874. }
  5875. static void bnx2x_reset_func(struct bnx2x *bp)
  5876. {
  5877. int port = BP_PORT(bp);
  5878. int func = BP_FUNC(bp);
  5879. int i;
  5880. int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
  5881. (CHIP_IS_E2(bp) ?
  5882. offsetof(struct hc_status_block_data_e2, common) :
  5883. offsetof(struct hc_status_block_data_e1x, common));
  5884. int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
  5885. int pfid_offset = offsetof(struct pci_entity, pf_id);
  5886. /* Disable the function in the FW */
  5887. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
  5888. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
  5889. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
  5890. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
  5891. /* FP SBs */
  5892. for_each_eth_queue(bp, i) {
  5893. struct bnx2x_fastpath *fp = &bp->fp[i];
  5894. REG_WR8(bp,
  5895. BAR_CSTRORM_INTMEM +
  5896. CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
  5897. + pfunc_offset_fp + pfid_offset,
  5898. HC_FUNCTION_DISABLED);
  5899. }
  5900. /* SP SB */
  5901. REG_WR8(bp,
  5902. BAR_CSTRORM_INTMEM +
  5903. CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
  5904. pfunc_offset_sp + pfid_offset,
  5905. HC_FUNCTION_DISABLED);
  5906. for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
  5907. REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
  5908. 0);
  5909. /* Configure IGU */
  5910. if (bp->common.int_block == INT_BLOCK_HC) {
  5911. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
  5912. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
  5913. } else {
  5914. REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
  5915. REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
  5916. }
  5917. #ifdef BCM_CNIC
  5918. /* Disable Timer scan */
  5919. REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
  5920. /*
  5921. * Wait for at least 10ms and up to 2 second for the timers scan to
  5922. * complete
  5923. */
  5924. for (i = 0; i < 200; i++) {
  5925. msleep(10);
  5926. if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
  5927. break;
  5928. }
  5929. #endif
  5930. /* Clear ILT */
  5931. bnx2x_clear_func_ilt(bp, func);
  5932. /* Timers workaround bug for E2: if this is vnic-3,
  5933. * we need to set the entire ilt range for this timers.
  5934. */
  5935. if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
  5936. struct ilt_client_info ilt_cli;
  5937. /* use dummy TM client */
  5938. memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
  5939. ilt_cli.start = 0;
  5940. ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
  5941. ilt_cli.client_num = ILT_CLIENT_TM;
  5942. bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
  5943. }
  5944. /* this assumes that reset_port() called before reset_func()*/
  5945. if (CHIP_IS_E2(bp))
  5946. bnx2x_pf_disable(bp);
  5947. bp->dmae_ready = 0;
  5948. }
  5949. static void bnx2x_reset_port(struct bnx2x *bp)
  5950. {
  5951. int port = BP_PORT(bp);
  5952. u32 val;
  5953. REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
  5954. /* Do not rcv packets to BRB */
  5955. REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
  5956. /* Do not direct rcv packets that are not for MCP to the BRB */
  5957. REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
  5958. NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
  5959. /* Configure AEU */
  5960. REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
  5961. msleep(100);
  5962. /* Check for BRB port occupancy */
  5963. val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
  5964. if (val)
  5965. DP(NETIF_MSG_IFDOWN,
  5966. "BRB1 is not empty %d blocks are occupied\n", val);
  5967. /* TODO: Close Doorbell port? */
  5968. }
  5969. static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
  5970. {
  5971. DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
  5972. BP_ABS_FUNC(bp), reset_code);
  5973. switch (reset_code) {
  5974. case FW_MSG_CODE_DRV_UNLOAD_COMMON:
  5975. bnx2x_reset_port(bp);
  5976. bnx2x_reset_func(bp);
  5977. bnx2x_reset_common(bp);
  5978. break;
  5979. case FW_MSG_CODE_DRV_UNLOAD_PORT:
  5980. bnx2x_reset_port(bp);
  5981. bnx2x_reset_func(bp);
  5982. break;
  5983. case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
  5984. bnx2x_reset_func(bp);
  5985. break;
  5986. default:
  5987. BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
  5988. break;
  5989. }
  5990. }
  5991. #ifdef BCM_CNIC
  5992. static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
  5993. {
  5994. if (bp->flags & FCOE_MACS_SET) {
  5995. if (!IS_MF_SD(bp))
  5996. bnx2x_set_fip_eth_mac_addr(bp, 0);
  5997. bnx2x_set_all_enode_macs(bp, 0);
  5998. bp->flags &= ~FCOE_MACS_SET;
  5999. }
  6000. }
  6001. #endif
  6002. void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
  6003. {
  6004. int port = BP_PORT(bp);
  6005. u32 reset_code = 0;
  6006. int i, cnt, rc;
  6007. /* Wait until tx fastpath tasks complete */
  6008. for_each_tx_queue(bp, i) {
  6009. struct bnx2x_fastpath *fp = &bp->fp[i];
  6010. cnt = 1000;
  6011. while (bnx2x_has_tx_work_unload(fp)) {
  6012. if (!cnt) {
  6013. BNX2X_ERR("timeout waiting for queue[%d]\n",
  6014. i);
  6015. #ifdef BNX2X_STOP_ON_ERROR
  6016. bnx2x_panic();
  6017. return -EBUSY;
  6018. #else
  6019. break;
  6020. #endif
  6021. }
  6022. cnt--;
  6023. msleep(1);
  6024. }
  6025. }
  6026. /* Give HW time to discard old tx messages */
  6027. msleep(1);
  6028. bnx2x_set_eth_mac(bp, 0);
  6029. bnx2x_invalidate_uc_list(bp);
  6030. if (CHIP_IS_E1(bp))
  6031. bnx2x_invalidate_e1_mc_list(bp);
  6032. else {
  6033. bnx2x_invalidate_e1h_mc_list(bp);
  6034. REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
  6035. }
  6036. #ifdef BCM_CNIC
  6037. bnx2x_del_fcoe_eth_macs(bp);
  6038. #endif
  6039. if (unload_mode == UNLOAD_NORMAL)
  6040. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
  6041. else if (bp->flags & NO_WOL_FLAG)
  6042. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
  6043. else if (bp->wol) {
  6044. u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
  6045. u8 *mac_addr = bp->dev->dev_addr;
  6046. u32 val;
  6047. /* The mac address is written to entries 1-4 to
  6048. preserve entry 0 which is used by the PMF */
  6049. u8 entry = (BP_E1HVN(bp) + 1)*8;
  6050. val = (mac_addr[0] << 8) | mac_addr[1];
  6051. EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
  6052. val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
  6053. (mac_addr[4] << 8) | mac_addr[5];
  6054. EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
  6055. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
  6056. } else
  6057. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
  6058. /* Close multi and leading connections
  6059. Completions for ramrods are collected in a synchronous way */
  6060. for_each_queue(bp, i)
  6061. if (bnx2x_stop_client(bp, i))
  6062. #ifdef BNX2X_STOP_ON_ERROR
  6063. return;
  6064. #else
  6065. goto unload_error;
  6066. #endif
  6067. rc = bnx2x_func_stop(bp);
  6068. if (rc) {
  6069. BNX2X_ERR("Function stop failed!\n");
  6070. #ifdef BNX2X_STOP_ON_ERROR
  6071. return;
  6072. #else
  6073. goto unload_error;
  6074. #endif
  6075. }
  6076. #ifndef BNX2X_STOP_ON_ERROR
  6077. unload_error:
  6078. #endif
  6079. if (!BP_NOMCP(bp))
  6080. reset_code = bnx2x_fw_command(bp, reset_code, 0);
  6081. else {
  6082. DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
  6083. "%d, %d, %d\n", BP_PATH(bp),
  6084. load_count[BP_PATH(bp)][0],
  6085. load_count[BP_PATH(bp)][1],
  6086. load_count[BP_PATH(bp)][2]);
  6087. load_count[BP_PATH(bp)][0]--;
  6088. load_count[BP_PATH(bp)][1 + port]--;
  6089. DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
  6090. "%d, %d, %d\n", BP_PATH(bp),
  6091. load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
  6092. load_count[BP_PATH(bp)][2]);
  6093. if (load_count[BP_PATH(bp)][0] == 0)
  6094. reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
  6095. else if (load_count[BP_PATH(bp)][1 + port] == 0)
  6096. reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
  6097. else
  6098. reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
  6099. }
  6100. if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
  6101. (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
  6102. bnx2x__link_reset(bp);
  6103. /* Disable HW interrupts, NAPI */
  6104. bnx2x_netif_stop(bp, 1);
  6105. /* Release IRQs */
  6106. bnx2x_free_irq(bp);
  6107. /* Reset the chip */
  6108. bnx2x_reset_chip(bp, reset_code);
  6109. /* Report UNLOAD_DONE to MCP */
  6110. if (!BP_NOMCP(bp))
  6111. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
  6112. }
  6113. void bnx2x_disable_close_the_gate(struct bnx2x *bp)
  6114. {
  6115. u32 val;
  6116. DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
  6117. if (CHIP_IS_E1(bp)) {
  6118. int port = BP_PORT(bp);
  6119. u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
  6120. MISC_REG_AEU_MASK_ATTN_FUNC_0;
  6121. val = REG_RD(bp, addr);
  6122. val &= ~(0x300);
  6123. REG_WR(bp, addr, val);
  6124. } else if (CHIP_IS_E1H(bp)) {
  6125. val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
  6126. val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
  6127. MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
  6128. REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
  6129. }
  6130. }
  6131. /* Close gates #2, #3 and #4: */
  6132. static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
  6133. {
  6134. u32 val, addr;
  6135. /* Gates #2 and #4a are closed/opened for "not E1" only */
  6136. if (!CHIP_IS_E1(bp)) {
  6137. /* #4 */
  6138. val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
  6139. REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
  6140. close ? (val | 0x1) : (val & (~(u32)1)));
  6141. /* #2 */
  6142. val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
  6143. REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
  6144. close ? (val | 0x1) : (val & (~(u32)1)));
  6145. }
  6146. /* #3 */
  6147. addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
  6148. val = REG_RD(bp, addr);
  6149. REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
  6150. DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
  6151. close ? "closing" : "opening");
  6152. mmiowb();
  6153. }
  6154. #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
  6155. static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
  6156. {
  6157. /* Do some magic... */
  6158. u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
  6159. *magic_val = val & SHARED_MF_CLP_MAGIC;
  6160. MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
  6161. }
  6162. /* Restore the value of the `magic' bit.
  6163. *
  6164. * @param pdev Device handle.
  6165. * @param magic_val Old value of the `magic' bit.
  6166. */
  6167. static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
  6168. {
  6169. /* Restore the `magic' bit value... */
  6170. u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
  6171. MF_CFG_WR(bp, shared_mf_config.clp_mb,
  6172. (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
  6173. }
  6174. /**
  6175. * Prepares for MCP reset: takes care of CLP configurations.
  6176. *
  6177. * @param bp
  6178. * @param magic_val Old value of 'magic' bit.
  6179. */
  6180. static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
  6181. {
  6182. u32 shmem;
  6183. u32 validity_offset;
  6184. DP(NETIF_MSG_HW, "Starting\n");
  6185. /* Set `magic' bit in order to save MF config */
  6186. if (!CHIP_IS_E1(bp))
  6187. bnx2x_clp_reset_prep(bp, magic_val);
  6188. /* Get shmem offset */
  6189. shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
  6190. validity_offset = offsetof(struct shmem_region, validity_map[0]);
  6191. /* Clear validity map flags */
  6192. if (shmem > 0)
  6193. REG_WR(bp, shmem + validity_offset, 0);
  6194. }
  6195. #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
  6196. #define MCP_ONE_TIMEOUT 100 /* 100 ms */
  6197. /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
  6198. * depending on the HW type.
  6199. *
  6200. * @param bp
  6201. */
  6202. static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
  6203. {
  6204. /* special handling for emulation and FPGA,
  6205. wait 10 times longer */
  6206. if (CHIP_REV_IS_SLOW(bp))
  6207. msleep(MCP_ONE_TIMEOUT*10);
  6208. else
  6209. msleep(MCP_ONE_TIMEOUT);
  6210. }
  6211. static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
  6212. {
  6213. u32 shmem, cnt, validity_offset, val;
  6214. int rc = 0;
  6215. msleep(100);
  6216. /* Get shmem offset */
  6217. shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
  6218. if (shmem == 0) {
  6219. BNX2X_ERR("Shmem 0 return failure\n");
  6220. rc = -ENOTTY;
  6221. goto exit_lbl;
  6222. }
  6223. validity_offset = offsetof(struct shmem_region, validity_map[0]);
  6224. /* Wait for MCP to come up */
  6225. for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
  6226. /* TBD: its best to check validity map of last port.
  6227. * currently checks on port 0.
  6228. */
  6229. val = REG_RD(bp, shmem + validity_offset);
  6230. DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
  6231. shmem + validity_offset, val);
  6232. /* check that shared memory is valid. */
  6233. if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
  6234. == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
  6235. break;
  6236. bnx2x_mcp_wait_one(bp);
  6237. }
  6238. DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
  6239. /* Check that shared memory is valid. This indicates that MCP is up. */
  6240. if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
  6241. (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
  6242. BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
  6243. rc = -ENOTTY;
  6244. goto exit_lbl;
  6245. }
  6246. exit_lbl:
  6247. /* Restore the `magic' bit value */
  6248. if (!CHIP_IS_E1(bp))
  6249. bnx2x_clp_reset_done(bp, magic_val);
  6250. return rc;
  6251. }
  6252. static void bnx2x_pxp_prep(struct bnx2x *bp)
  6253. {
  6254. if (!CHIP_IS_E1(bp)) {
  6255. REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
  6256. REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
  6257. REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
  6258. mmiowb();
  6259. }
  6260. }
  6261. /*
  6262. * Reset the whole chip except for:
  6263. * - PCIE core
  6264. * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
  6265. * one reset bit)
  6266. * - IGU
  6267. * - MISC (including AEU)
  6268. * - GRC
  6269. * - RBCN, RBCP
  6270. */
  6271. static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
  6272. {
  6273. u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
  6274. not_reset_mask1 =
  6275. MISC_REGISTERS_RESET_REG_1_RST_HC |
  6276. MISC_REGISTERS_RESET_REG_1_RST_PXPV |
  6277. MISC_REGISTERS_RESET_REG_1_RST_PXP;
  6278. not_reset_mask2 =
  6279. MISC_REGISTERS_RESET_REG_2_RST_MDIO |
  6280. MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
  6281. MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
  6282. MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
  6283. MISC_REGISTERS_RESET_REG_2_RST_RBCN |
  6284. MISC_REGISTERS_RESET_REG_2_RST_GRC |
  6285. MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
  6286. MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
  6287. reset_mask1 = 0xffffffff;
  6288. if (CHIP_IS_E1(bp))
  6289. reset_mask2 = 0xffff;
  6290. else
  6291. reset_mask2 = 0x1ffff;
  6292. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
  6293. reset_mask1 & (~not_reset_mask1));
  6294. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
  6295. reset_mask2 & (~not_reset_mask2));
  6296. barrier();
  6297. mmiowb();
  6298. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
  6299. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
  6300. mmiowb();
  6301. }
  6302. static int bnx2x_process_kill(struct bnx2x *bp)
  6303. {
  6304. int cnt = 1000;
  6305. u32 val = 0;
  6306. u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
  6307. /* Empty the Tetris buffer, wait for 1s */
  6308. do {
  6309. sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
  6310. blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
  6311. port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
  6312. port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
  6313. pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
  6314. if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
  6315. ((port_is_idle_0 & 0x1) == 0x1) &&
  6316. ((port_is_idle_1 & 0x1) == 0x1) &&
  6317. (pgl_exp_rom2 == 0xffffffff))
  6318. break;
  6319. msleep(1);
  6320. } while (cnt-- > 0);
  6321. if (cnt <= 0) {
  6322. DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
  6323. " are still"
  6324. " outstanding read requests after 1s!\n");
  6325. DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
  6326. " port_is_idle_0=0x%08x,"
  6327. " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
  6328. sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
  6329. pgl_exp_rom2);
  6330. return -EAGAIN;
  6331. }
  6332. barrier();
  6333. /* Close gates #2, #3 and #4 */
  6334. bnx2x_set_234_gates(bp, true);
  6335. /* TBD: Indicate that "process kill" is in progress to MCP */
  6336. /* Clear "unprepared" bit */
  6337. REG_WR(bp, MISC_REG_UNPREPARED, 0);
  6338. barrier();
  6339. /* Make sure all is written to the chip before the reset */
  6340. mmiowb();
  6341. /* Wait for 1ms to empty GLUE and PCI-E core queues,
  6342. * PSWHST, GRC and PSWRD Tetris buffer.
  6343. */
  6344. msleep(1);
  6345. /* Prepare to chip reset: */
  6346. /* MCP */
  6347. bnx2x_reset_mcp_prep(bp, &val);
  6348. /* PXP */
  6349. bnx2x_pxp_prep(bp);
  6350. barrier();
  6351. /* reset the chip */
  6352. bnx2x_process_kill_chip_reset(bp);
  6353. barrier();
  6354. /* Recover after reset: */
  6355. /* MCP */
  6356. if (bnx2x_reset_mcp_comp(bp, val))
  6357. return -EAGAIN;
  6358. /* PXP */
  6359. bnx2x_pxp_prep(bp);
  6360. /* Open the gates #2, #3 and #4 */
  6361. bnx2x_set_234_gates(bp, false);
  6362. /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
  6363. * reset state, re-enable attentions. */
  6364. return 0;
  6365. }
  6366. static int bnx2x_leader_reset(struct bnx2x *bp)
  6367. {
  6368. int rc = 0;
  6369. /* Try to recover after the failure */
  6370. if (bnx2x_process_kill(bp)) {
  6371. printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
  6372. bp->dev->name);
  6373. rc = -EAGAIN;
  6374. goto exit_leader_reset;
  6375. }
  6376. /* Clear "reset is in progress" bit and update the driver state */
  6377. bnx2x_set_reset_done(bp);
  6378. bp->recovery_state = BNX2X_RECOVERY_DONE;
  6379. exit_leader_reset:
  6380. bp->is_leader = 0;
  6381. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
  6382. smp_wmb();
  6383. return rc;
  6384. }
  6385. /* Assumption: runs under rtnl lock. This together with the fact
  6386. * that it's called only from bnx2x_reset_task() ensure that it
  6387. * will never be called when netif_running(bp->dev) is false.
  6388. */
  6389. static void bnx2x_parity_recover(struct bnx2x *bp)
  6390. {
  6391. DP(NETIF_MSG_HW, "Handling parity\n");
  6392. while (1) {
  6393. switch (bp->recovery_state) {
  6394. case BNX2X_RECOVERY_INIT:
  6395. DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
  6396. /* Try to get a LEADER_LOCK HW lock */
  6397. if (bnx2x_trylock_hw_lock(bp,
  6398. HW_LOCK_RESOURCE_RESERVED_08))
  6399. bp->is_leader = 1;
  6400. /* Stop the driver */
  6401. /* If interface has been removed - break */
  6402. if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
  6403. return;
  6404. bp->recovery_state = BNX2X_RECOVERY_WAIT;
  6405. /* Ensure "is_leader" and "recovery_state"
  6406. * update values are seen on other CPUs
  6407. */
  6408. smp_wmb();
  6409. break;
  6410. case BNX2X_RECOVERY_WAIT:
  6411. DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
  6412. if (bp->is_leader) {
  6413. u32 load_counter = bnx2x_get_load_cnt(bp);
  6414. if (load_counter) {
  6415. /* Wait until all other functions get
  6416. * down.
  6417. */
  6418. schedule_delayed_work(&bp->reset_task,
  6419. HZ/10);
  6420. return;
  6421. } else {
  6422. /* If all other functions got down -
  6423. * try to bring the chip back to
  6424. * normal. In any case it's an exit
  6425. * point for a leader.
  6426. */
  6427. if (bnx2x_leader_reset(bp) ||
  6428. bnx2x_nic_load(bp, LOAD_NORMAL)) {
  6429. printk(KERN_ERR"%s: Recovery "
  6430. "has failed. Power cycle is "
  6431. "needed.\n", bp->dev->name);
  6432. /* Disconnect this device */
  6433. netif_device_detach(bp->dev);
  6434. /* Block ifup for all function
  6435. * of this ASIC until
  6436. * "process kill" or power
  6437. * cycle.
  6438. */
  6439. bnx2x_set_reset_in_progress(bp);
  6440. /* Shut down the power */
  6441. bnx2x_set_power_state(bp,
  6442. PCI_D3hot);
  6443. return;
  6444. }
  6445. return;
  6446. }
  6447. } else { /* non-leader */
  6448. if (!bnx2x_reset_is_done(bp)) {
  6449. /* Try to get a LEADER_LOCK HW lock as
  6450. * long as a former leader may have
  6451. * been unloaded by the user or
  6452. * released a leadership by another
  6453. * reason.
  6454. */
  6455. if (bnx2x_trylock_hw_lock(bp,
  6456. HW_LOCK_RESOURCE_RESERVED_08)) {
  6457. /* I'm a leader now! Restart a
  6458. * switch case.
  6459. */
  6460. bp->is_leader = 1;
  6461. break;
  6462. }
  6463. schedule_delayed_work(&bp->reset_task,
  6464. HZ/10);
  6465. return;
  6466. } else { /* A leader has completed
  6467. * the "process kill". It's an exit
  6468. * point for a non-leader.
  6469. */
  6470. bnx2x_nic_load(bp, LOAD_NORMAL);
  6471. bp->recovery_state =
  6472. BNX2X_RECOVERY_DONE;
  6473. smp_wmb();
  6474. return;
  6475. }
  6476. }
  6477. default:
  6478. return;
  6479. }
  6480. }
  6481. }
  6482. /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
  6483. * scheduled on a general queue in order to prevent a dead lock.
  6484. */
  6485. static void bnx2x_reset_task(struct work_struct *work)
  6486. {
  6487. struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
  6488. #ifdef BNX2X_STOP_ON_ERROR
  6489. BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
  6490. " so reset not done to allow debug dump,\n"
  6491. KERN_ERR " you will need to reboot when done\n");
  6492. return;
  6493. #endif
  6494. rtnl_lock();
  6495. if (!netif_running(bp->dev))
  6496. goto reset_task_exit;
  6497. if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
  6498. bnx2x_parity_recover(bp);
  6499. else {
  6500. bnx2x_nic_unload(bp, UNLOAD_NORMAL);
  6501. bnx2x_nic_load(bp, LOAD_NORMAL);
  6502. }
  6503. reset_task_exit:
  6504. rtnl_unlock();
  6505. }
  6506. /* end of nic load/unload */
  6507. /*
  6508. * Init service functions
  6509. */
  6510. static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
  6511. {
  6512. u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
  6513. u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
  6514. return base + (BP_ABS_FUNC(bp)) * stride;
  6515. }
  6516. static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
  6517. {
  6518. u32 reg = bnx2x_get_pretend_reg(bp);
  6519. /* Flush all outstanding writes */
  6520. mmiowb();
  6521. /* Pretend to be function 0 */
  6522. REG_WR(bp, reg, 0);
  6523. REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
  6524. /* From now we are in the "like-E1" mode */
  6525. bnx2x_int_disable(bp);
  6526. /* Flush all outstanding writes */
  6527. mmiowb();
  6528. /* Restore the original function */
  6529. REG_WR(bp, reg, BP_ABS_FUNC(bp));
  6530. REG_RD(bp, reg);
  6531. }
  6532. static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
  6533. {
  6534. if (CHIP_IS_E1(bp))
  6535. bnx2x_int_disable(bp);
  6536. else
  6537. bnx2x_undi_int_disable_e1h(bp);
  6538. }
  6539. static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
  6540. {
  6541. u32 val;
  6542. /* Check if there is any driver already loaded */
  6543. val = REG_RD(bp, MISC_REG_UNPREPARED);
  6544. if (val == 0x1) {
  6545. /* Check if it is the UNDI driver
  6546. * UNDI driver initializes CID offset for normal bell to 0x7
  6547. */
  6548. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
  6549. val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
  6550. if (val == 0x7) {
  6551. u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
  6552. /* save our pf_num */
  6553. int orig_pf_num = bp->pf_num;
  6554. u32 swap_en;
  6555. u32 swap_val;
  6556. /* clear the UNDI indication */
  6557. REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
  6558. BNX2X_DEV_INFO("UNDI is active! reset device\n");
  6559. /* try unload UNDI on port 0 */
  6560. bp->pf_num = 0;
  6561. bp->fw_seq =
  6562. (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
  6563. DRV_MSG_SEQ_NUMBER_MASK);
  6564. reset_code = bnx2x_fw_command(bp, reset_code, 0);
  6565. /* if UNDI is loaded on the other port */
  6566. if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
  6567. /* send "DONE" for previous unload */
  6568. bnx2x_fw_command(bp,
  6569. DRV_MSG_CODE_UNLOAD_DONE, 0);
  6570. /* unload UNDI on port 1 */
  6571. bp->pf_num = 1;
  6572. bp->fw_seq =
  6573. (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
  6574. DRV_MSG_SEQ_NUMBER_MASK);
  6575. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
  6576. bnx2x_fw_command(bp, reset_code, 0);
  6577. }
  6578. /* now it's safe to release the lock */
  6579. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
  6580. bnx2x_undi_int_disable(bp);
  6581. /* close input traffic and wait for it */
  6582. /* Do not rcv packets to BRB */
  6583. REG_WR(bp,
  6584. (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
  6585. NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
  6586. /* Do not direct rcv packets that are not for MCP to
  6587. * the BRB */
  6588. REG_WR(bp,
  6589. (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
  6590. NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
  6591. /* clear AEU */
  6592. REG_WR(bp,
  6593. (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
  6594. MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
  6595. msleep(10);
  6596. /* save NIG port swap info */
  6597. swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
  6598. swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
  6599. /* reset device */
  6600. REG_WR(bp,
  6601. GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
  6602. 0xd3ffffff);
  6603. REG_WR(bp,
  6604. GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
  6605. 0x1403);
  6606. /* take the NIG out of reset and restore swap values */
  6607. REG_WR(bp,
  6608. GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
  6609. MISC_REGISTERS_RESET_REG_1_RST_NIG);
  6610. REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
  6611. REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
  6612. /* send unload done to the MCP */
  6613. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
  6614. /* restore our func and fw_seq */
  6615. bp->pf_num = orig_pf_num;
  6616. bp->fw_seq =
  6617. (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
  6618. DRV_MSG_SEQ_NUMBER_MASK);
  6619. } else
  6620. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
  6621. }
  6622. }
  6623. static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
  6624. {
  6625. u32 val, val2, val3, val4, id;
  6626. u16 pmc;
  6627. /* Get the chip revision id and number. */
  6628. /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
  6629. val = REG_RD(bp, MISC_REG_CHIP_NUM);
  6630. id = ((val & 0xffff) << 16);
  6631. val = REG_RD(bp, MISC_REG_CHIP_REV);
  6632. id |= ((val & 0xf) << 12);
  6633. val = REG_RD(bp, MISC_REG_CHIP_METAL);
  6634. id |= ((val & 0xff) << 4);
  6635. val = REG_RD(bp, MISC_REG_BOND_ID);
  6636. id |= (val & 0xf);
  6637. bp->common.chip_id = id;
  6638. /* Set doorbell size */
  6639. bp->db_size = (1 << BNX2X_DB_SHIFT);
  6640. if (CHIP_IS_E2(bp)) {
  6641. val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
  6642. if ((val & 1) == 0)
  6643. val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
  6644. else
  6645. val = (val >> 1) & 1;
  6646. BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
  6647. "2_PORT_MODE");
  6648. bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
  6649. CHIP_2_PORT_MODE;
  6650. if (CHIP_MODE_IS_4_PORT(bp))
  6651. bp->pfid = (bp->pf_num >> 1); /* 0..3 */
  6652. else
  6653. bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
  6654. } else {
  6655. bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
  6656. bp->pfid = bp->pf_num; /* 0..7 */
  6657. }
  6658. /*
  6659. * set base FW non-default (fast path) status block id, this value is
  6660. * used to initialize the fw_sb_id saved on the fp/queue structure to
  6661. * determine the id used by the FW.
  6662. */
  6663. if (CHIP_IS_E1x(bp))
  6664. bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
  6665. else /* E2 */
  6666. bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
  6667. bp->link_params.chip_id = bp->common.chip_id;
  6668. BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
  6669. val = (REG_RD(bp, 0x2874) & 0x55);
  6670. if ((bp->common.chip_id & 0x1) ||
  6671. (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
  6672. bp->flags |= ONE_PORT_FLAG;
  6673. BNX2X_DEV_INFO("single port device\n");
  6674. }
  6675. val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
  6676. bp->common.flash_size = (NVRAM_1MB_SIZE <<
  6677. (val & MCPR_NVM_CFG4_FLASH_SIZE));
  6678. BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
  6679. bp->common.flash_size, bp->common.flash_size);
  6680. bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
  6681. bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
  6682. MISC_REG_GENERIC_CR_1 :
  6683. MISC_REG_GENERIC_CR_0));
  6684. bp->link_params.shmem_base = bp->common.shmem_base;
  6685. bp->link_params.shmem2_base = bp->common.shmem2_base;
  6686. BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
  6687. bp->common.shmem_base, bp->common.shmem2_base);
  6688. if (!bp->common.shmem_base) {
  6689. BNX2X_DEV_INFO("MCP not active\n");
  6690. bp->flags |= NO_MCP_FLAG;
  6691. return;
  6692. }
  6693. val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
  6694. if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
  6695. != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
  6696. BNX2X_ERR("BAD MCP validity signature\n");
  6697. bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
  6698. BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
  6699. bp->link_params.hw_led_mode = ((bp->common.hw_config &
  6700. SHARED_HW_CFG_LED_MODE_MASK) >>
  6701. SHARED_HW_CFG_LED_MODE_SHIFT);
  6702. bp->link_params.feature_config_flags = 0;
  6703. val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
  6704. if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
  6705. bp->link_params.feature_config_flags |=
  6706. FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
  6707. else
  6708. bp->link_params.feature_config_flags &=
  6709. ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
  6710. val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
  6711. bp->common.bc_ver = val;
  6712. BNX2X_DEV_INFO("bc_ver %X\n", val);
  6713. if (val < BNX2X_BC_VER) {
  6714. /* for now only warn
  6715. * later we might need to enforce this */
  6716. BNX2X_ERR("This driver needs bc_ver %X but found %X, "
  6717. "please upgrade BC\n", BNX2X_BC_VER, val);
  6718. }
  6719. bp->link_params.feature_config_flags |=
  6720. (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
  6721. FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
  6722. bp->link_params.feature_config_flags |=
  6723. (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
  6724. FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
  6725. if (BP_E1HVN(bp) == 0) {
  6726. pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
  6727. bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
  6728. } else {
  6729. /* no WOL capability for E1HVN != 0 */
  6730. bp->flags |= NO_WOL_FLAG;
  6731. }
  6732. BNX2X_DEV_INFO("%sWoL capable\n",
  6733. (bp->flags & NO_WOL_FLAG) ? "not " : "");
  6734. val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
  6735. val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
  6736. val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
  6737. val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
  6738. dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
  6739. val, val2, val3, val4);
  6740. }
  6741. #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
  6742. #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
  6743. static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
  6744. {
  6745. int pfid = BP_FUNC(bp);
  6746. int vn = BP_E1HVN(bp);
  6747. int igu_sb_id;
  6748. u32 val;
  6749. u8 fid;
  6750. bp->igu_base_sb = 0xff;
  6751. bp->igu_sb_cnt = 0;
  6752. if (CHIP_INT_MODE_IS_BC(bp)) {
  6753. bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
  6754. NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
  6755. bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
  6756. FP_SB_MAX_E1x;
  6757. bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
  6758. (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
  6759. return;
  6760. }
  6761. /* IGU in normal mode - read CAM */
  6762. for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
  6763. igu_sb_id++) {
  6764. val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
  6765. if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
  6766. continue;
  6767. fid = IGU_FID(val);
  6768. if ((fid & IGU_FID_ENCODE_IS_PF)) {
  6769. if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
  6770. continue;
  6771. if (IGU_VEC(val) == 0)
  6772. /* default status block */
  6773. bp->igu_dsb_id = igu_sb_id;
  6774. else {
  6775. if (bp->igu_base_sb == 0xff)
  6776. bp->igu_base_sb = igu_sb_id;
  6777. bp->igu_sb_cnt++;
  6778. }
  6779. }
  6780. }
  6781. bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
  6782. NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
  6783. if (bp->igu_sb_cnt == 0)
  6784. BNX2X_ERR("CAM configuration error\n");
  6785. }
  6786. static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
  6787. u32 switch_cfg)
  6788. {
  6789. int cfg_size = 0, idx, port = BP_PORT(bp);
  6790. /* Aggregation of supported attributes of all external phys */
  6791. bp->port.supported[0] = 0;
  6792. bp->port.supported[1] = 0;
  6793. switch (bp->link_params.num_phys) {
  6794. case 1:
  6795. bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
  6796. cfg_size = 1;
  6797. break;
  6798. case 2:
  6799. bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
  6800. cfg_size = 1;
  6801. break;
  6802. case 3:
  6803. if (bp->link_params.multi_phy_config &
  6804. PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
  6805. bp->port.supported[1] =
  6806. bp->link_params.phy[EXT_PHY1].supported;
  6807. bp->port.supported[0] =
  6808. bp->link_params.phy[EXT_PHY2].supported;
  6809. } else {
  6810. bp->port.supported[0] =
  6811. bp->link_params.phy[EXT_PHY1].supported;
  6812. bp->port.supported[1] =
  6813. bp->link_params.phy[EXT_PHY2].supported;
  6814. }
  6815. cfg_size = 2;
  6816. break;
  6817. }
  6818. if (!(bp->port.supported[0] || bp->port.supported[1])) {
  6819. BNX2X_ERR("NVRAM config error. BAD phy config."
  6820. "PHY1 config 0x%x, PHY2 config 0x%x\n",
  6821. SHMEM_RD(bp,
  6822. dev_info.port_hw_config[port].external_phy_config),
  6823. SHMEM_RD(bp,
  6824. dev_info.port_hw_config[port].external_phy_config2));
  6825. return;
  6826. }
  6827. switch (switch_cfg) {
  6828. case SWITCH_CFG_1G:
  6829. bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
  6830. port*0x10);
  6831. BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
  6832. break;
  6833. case SWITCH_CFG_10G:
  6834. bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
  6835. port*0x18);
  6836. BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
  6837. break;
  6838. default:
  6839. BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
  6840. bp->port.link_config[0]);
  6841. return;
  6842. }
  6843. /* mask what we support according to speed_cap_mask per configuration */
  6844. for (idx = 0; idx < cfg_size; idx++) {
  6845. if (!(bp->link_params.speed_cap_mask[idx] &
  6846. PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
  6847. bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
  6848. if (!(bp->link_params.speed_cap_mask[idx] &
  6849. PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
  6850. bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
  6851. if (!(bp->link_params.speed_cap_mask[idx] &
  6852. PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
  6853. bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
  6854. if (!(bp->link_params.speed_cap_mask[idx] &
  6855. PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
  6856. bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
  6857. if (!(bp->link_params.speed_cap_mask[idx] &
  6858. PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
  6859. bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
  6860. SUPPORTED_1000baseT_Full);
  6861. if (!(bp->link_params.speed_cap_mask[idx] &
  6862. PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
  6863. bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
  6864. if (!(bp->link_params.speed_cap_mask[idx] &
  6865. PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
  6866. bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
  6867. }
  6868. BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
  6869. bp->port.supported[1]);
  6870. }
  6871. static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
  6872. {
  6873. u32 link_config, idx, cfg_size = 0;
  6874. bp->port.advertising[0] = 0;
  6875. bp->port.advertising[1] = 0;
  6876. switch (bp->link_params.num_phys) {
  6877. case 1:
  6878. case 2:
  6879. cfg_size = 1;
  6880. break;
  6881. case 3:
  6882. cfg_size = 2;
  6883. break;
  6884. }
  6885. for (idx = 0; idx < cfg_size; idx++) {
  6886. bp->link_params.req_duplex[idx] = DUPLEX_FULL;
  6887. link_config = bp->port.link_config[idx];
  6888. switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
  6889. case PORT_FEATURE_LINK_SPEED_AUTO:
  6890. if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
  6891. bp->link_params.req_line_speed[idx] =
  6892. SPEED_AUTO_NEG;
  6893. bp->port.advertising[idx] |=
  6894. bp->port.supported[idx];
  6895. } else {
  6896. /* force 10G, no AN */
  6897. bp->link_params.req_line_speed[idx] =
  6898. SPEED_10000;
  6899. bp->port.advertising[idx] |=
  6900. (ADVERTISED_10000baseT_Full |
  6901. ADVERTISED_FIBRE);
  6902. continue;
  6903. }
  6904. break;
  6905. case PORT_FEATURE_LINK_SPEED_10M_FULL:
  6906. if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
  6907. bp->link_params.req_line_speed[idx] =
  6908. SPEED_10;
  6909. bp->port.advertising[idx] |=
  6910. (ADVERTISED_10baseT_Full |
  6911. ADVERTISED_TP);
  6912. } else {
  6913. BNX2X_ERROR("NVRAM config error. "
  6914. "Invalid link_config 0x%x"
  6915. " speed_cap_mask 0x%x\n",
  6916. link_config,
  6917. bp->link_params.speed_cap_mask[idx]);
  6918. return;
  6919. }
  6920. break;
  6921. case PORT_FEATURE_LINK_SPEED_10M_HALF:
  6922. if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
  6923. bp->link_params.req_line_speed[idx] =
  6924. SPEED_10;
  6925. bp->link_params.req_duplex[idx] =
  6926. DUPLEX_HALF;
  6927. bp->port.advertising[idx] |=
  6928. (ADVERTISED_10baseT_Half |
  6929. ADVERTISED_TP);
  6930. } else {
  6931. BNX2X_ERROR("NVRAM config error. "
  6932. "Invalid link_config 0x%x"
  6933. " speed_cap_mask 0x%x\n",
  6934. link_config,
  6935. bp->link_params.speed_cap_mask[idx]);
  6936. return;
  6937. }
  6938. break;
  6939. case PORT_FEATURE_LINK_SPEED_100M_FULL:
  6940. if (bp->port.supported[idx] &
  6941. SUPPORTED_100baseT_Full) {
  6942. bp->link_params.req_line_speed[idx] =
  6943. SPEED_100;
  6944. bp->port.advertising[idx] |=
  6945. (ADVERTISED_100baseT_Full |
  6946. ADVERTISED_TP);
  6947. } else {
  6948. BNX2X_ERROR("NVRAM config error. "
  6949. "Invalid link_config 0x%x"
  6950. " speed_cap_mask 0x%x\n",
  6951. link_config,
  6952. bp->link_params.speed_cap_mask[idx]);
  6953. return;
  6954. }
  6955. break;
  6956. case PORT_FEATURE_LINK_SPEED_100M_HALF:
  6957. if (bp->port.supported[idx] &
  6958. SUPPORTED_100baseT_Half) {
  6959. bp->link_params.req_line_speed[idx] =
  6960. SPEED_100;
  6961. bp->link_params.req_duplex[idx] =
  6962. DUPLEX_HALF;
  6963. bp->port.advertising[idx] |=
  6964. (ADVERTISED_100baseT_Half |
  6965. ADVERTISED_TP);
  6966. } else {
  6967. BNX2X_ERROR("NVRAM config error. "
  6968. "Invalid link_config 0x%x"
  6969. " speed_cap_mask 0x%x\n",
  6970. link_config,
  6971. bp->link_params.speed_cap_mask[idx]);
  6972. return;
  6973. }
  6974. break;
  6975. case PORT_FEATURE_LINK_SPEED_1G:
  6976. if (bp->port.supported[idx] &
  6977. SUPPORTED_1000baseT_Full) {
  6978. bp->link_params.req_line_speed[idx] =
  6979. SPEED_1000;
  6980. bp->port.advertising[idx] |=
  6981. (ADVERTISED_1000baseT_Full |
  6982. ADVERTISED_TP);
  6983. } else {
  6984. BNX2X_ERROR("NVRAM config error. "
  6985. "Invalid link_config 0x%x"
  6986. " speed_cap_mask 0x%x\n",
  6987. link_config,
  6988. bp->link_params.speed_cap_mask[idx]);
  6989. return;
  6990. }
  6991. break;
  6992. case PORT_FEATURE_LINK_SPEED_2_5G:
  6993. if (bp->port.supported[idx] &
  6994. SUPPORTED_2500baseX_Full) {
  6995. bp->link_params.req_line_speed[idx] =
  6996. SPEED_2500;
  6997. bp->port.advertising[idx] |=
  6998. (ADVERTISED_2500baseX_Full |
  6999. ADVERTISED_TP);
  7000. } else {
  7001. BNX2X_ERROR("NVRAM config error. "
  7002. "Invalid link_config 0x%x"
  7003. " speed_cap_mask 0x%x\n",
  7004. link_config,
  7005. bp->link_params.speed_cap_mask[idx]);
  7006. return;
  7007. }
  7008. break;
  7009. case PORT_FEATURE_LINK_SPEED_10G_CX4:
  7010. case PORT_FEATURE_LINK_SPEED_10G_KX4:
  7011. case PORT_FEATURE_LINK_SPEED_10G_KR:
  7012. if (bp->port.supported[idx] &
  7013. SUPPORTED_10000baseT_Full) {
  7014. bp->link_params.req_line_speed[idx] =
  7015. SPEED_10000;
  7016. bp->port.advertising[idx] |=
  7017. (ADVERTISED_10000baseT_Full |
  7018. ADVERTISED_FIBRE);
  7019. } else {
  7020. BNX2X_ERROR("NVRAM config error. "
  7021. "Invalid link_config 0x%x"
  7022. " speed_cap_mask 0x%x\n",
  7023. link_config,
  7024. bp->link_params.speed_cap_mask[idx]);
  7025. return;
  7026. }
  7027. break;
  7028. default:
  7029. BNX2X_ERROR("NVRAM config error. "
  7030. "BAD link speed link_config 0x%x\n",
  7031. link_config);
  7032. bp->link_params.req_line_speed[idx] =
  7033. SPEED_AUTO_NEG;
  7034. bp->port.advertising[idx] =
  7035. bp->port.supported[idx];
  7036. break;
  7037. }
  7038. bp->link_params.req_flow_ctrl[idx] = (link_config &
  7039. PORT_FEATURE_FLOW_CONTROL_MASK);
  7040. if ((bp->link_params.req_flow_ctrl[idx] ==
  7041. BNX2X_FLOW_CTRL_AUTO) &&
  7042. !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
  7043. bp->link_params.req_flow_ctrl[idx] =
  7044. BNX2X_FLOW_CTRL_NONE;
  7045. }
  7046. BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
  7047. " 0x%x advertising 0x%x\n",
  7048. bp->link_params.req_line_speed[idx],
  7049. bp->link_params.req_duplex[idx],
  7050. bp->link_params.req_flow_ctrl[idx],
  7051. bp->port.advertising[idx]);
  7052. }
  7053. }
  7054. static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
  7055. {
  7056. mac_hi = cpu_to_be16(mac_hi);
  7057. mac_lo = cpu_to_be32(mac_lo);
  7058. memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
  7059. memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
  7060. }
  7061. static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
  7062. {
  7063. int port = BP_PORT(bp);
  7064. u32 config;
  7065. u32 ext_phy_type, ext_phy_config;
  7066. bp->link_params.bp = bp;
  7067. bp->link_params.port = port;
  7068. bp->link_params.lane_config =
  7069. SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
  7070. bp->link_params.speed_cap_mask[0] =
  7071. SHMEM_RD(bp,
  7072. dev_info.port_hw_config[port].speed_capability_mask);
  7073. bp->link_params.speed_cap_mask[1] =
  7074. SHMEM_RD(bp,
  7075. dev_info.port_hw_config[port].speed_capability_mask2);
  7076. bp->port.link_config[0] =
  7077. SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
  7078. bp->port.link_config[1] =
  7079. SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
  7080. bp->link_params.multi_phy_config =
  7081. SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
  7082. /* If the device is capable of WoL, set the default state according
  7083. * to the HW
  7084. */
  7085. config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
  7086. bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
  7087. (config & PORT_FEATURE_WOL_ENABLED));
  7088. BNX2X_DEV_INFO("lane_config 0x%08x "
  7089. "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
  7090. bp->link_params.lane_config,
  7091. bp->link_params.speed_cap_mask[0],
  7092. bp->port.link_config[0]);
  7093. bp->link_params.switch_cfg = (bp->port.link_config[0] &
  7094. PORT_FEATURE_CONNECTED_SWITCH_MASK);
  7095. bnx2x_phy_probe(&bp->link_params);
  7096. bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
  7097. bnx2x_link_settings_requested(bp);
  7098. /*
  7099. * If connected directly, work with the internal PHY, otherwise, work
  7100. * with the external PHY
  7101. */
  7102. ext_phy_config =
  7103. SHMEM_RD(bp,
  7104. dev_info.port_hw_config[port].external_phy_config);
  7105. ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
  7106. if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
  7107. bp->mdio.prtad = bp->port.phy_addr;
  7108. else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
  7109. (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
  7110. bp->mdio.prtad =
  7111. XGXS_EXT_PHY_ADDR(ext_phy_config);
  7112. /*
  7113. * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
  7114. * In MF mode, it is set to cover self test cases
  7115. */
  7116. if (IS_MF(bp))
  7117. bp->port.need_hw_lock = 1;
  7118. else
  7119. bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
  7120. bp->common.shmem_base,
  7121. bp->common.shmem2_base);
  7122. }
  7123. #ifdef BCM_CNIC
  7124. static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
  7125. {
  7126. u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
  7127. drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
  7128. u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
  7129. drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
  7130. /* Get the number of maximum allowed iSCSI and FCoE connections */
  7131. bp->cnic_eth_dev.max_iscsi_conn =
  7132. (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
  7133. BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
  7134. bp->cnic_eth_dev.max_fcoe_conn =
  7135. (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
  7136. BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
  7137. BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
  7138. bp->cnic_eth_dev.max_iscsi_conn,
  7139. bp->cnic_eth_dev.max_fcoe_conn);
  7140. /* If mamimum allowed number of connections is zero -
  7141. * disable the feature.
  7142. */
  7143. if (!bp->cnic_eth_dev.max_iscsi_conn)
  7144. bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
  7145. if (!bp->cnic_eth_dev.max_fcoe_conn)
  7146. bp->flags |= NO_FCOE_FLAG;
  7147. }
  7148. #endif
  7149. static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
  7150. {
  7151. u32 val, val2;
  7152. int func = BP_ABS_FUNC(bp);
  7153. int port = BP_PORT(bp);
  7154. #ifdef BCM_CNIC
  7155. u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
  7156. u8 *fip_mac = bp->fip_mac;
  7157. #endif
  7158. if (BP_NOMCP(bp)) {
  7159. BNX2X_ERROR("warning: random MAC workaround active\n");
  7160. random_ether_addr(bp->dev->dev_addr);
  7161. } else if (IS_MF(bp)) {
  7162. val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
  7163. val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
  7164. if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
  7165. (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
  7166. bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
  7167. #ifdef BCM_CNIC
  7168. /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
  7169. * FCoE MAC then the appropriate feature should be disabled.
  7170. */
  7171. if (IS_MF_SI(bp)) {
  7172. u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
  7173. if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
  7174. val2 = MF_CFG_RD(bp, func_ext_config[func].
  7175. iscsi_mac_addr_upper);
  7176. val = MF_CFG_RD(bp, func_ext_config[func].
  7177. iscsi_mac_addr_lower);
  7178. BNX2X_DEV_INFO("Read iSCSI MAC: "
  7179. "0x%x:0x%04x\n", val2, val);
  7180. bnx2x_set_mac_buf(iscsi_mac, val, val2);
  7181. /* Disable iSCSI OOO if MAC configuration is
  7182. * invalid.
  7183. */
  7184. if (!is_valid_ether_addr(iscsi_mac)) {
  7185. bp->flags |= NO_ISCSI_OOO_FLAG |
  7186. NO_ISCSI_FLAG;
  7187. memset(iscsi_mac, 0, ETH_ALEN);
  7188. }
  7189. } else
  7190. bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
  7191. if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
  7192. val2 = MF_CFG_RD(bp, func_ext_config[func].
  7193. fcoe_mac_addr_upper);
  7194. val = MF_CFG_RD(bp, func_ext_config[func].
  7195. fcoe_mac_addr_lower);
  7196. BNX2X_DEV_INFO("Read FCoE MAC to "
  7197. "0x%x:0x%04x\n", val2, val);
  7198. bnx2x_set_mac_buf(fip_mac, val, val2);
  7199. /* Disable FCoE if MAC configuration is
  7200. * invalid.
  7201. */
  7202. if (!is_valid_ether_addr(fip_mac)) {
  7203. bp->flags |= NO_FCOE_FLAG;
  7204. memset(bp->fip_mac, 0, ETH_ALEN);
  7205. }
  7206. } else
  7207. bp->flags |= NO_FCOE_FLAG;
  7208. }
  7209. #endif
  7210. } else {
  7211. /* in SF read MACs from port configuration */
  7212. val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
  7213. val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
  7214. bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
  7215. #ifdef BCM_CNIC
  7216. val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
  7217. iscsi_mac_upper);
  7218. val = SHMEM_RD(bp, dev_info.port_hw_config[port].
  7219. iscsi_mac_lower);
  7220. bnx2x_set_mac_buf(iscsi_mac, val, val2);
  7221. #endif
  7222. }
  7223. memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
  7224. memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
  7225. #ifdef BCM_CNIC
  7226. /* Set the FCoE MAC in modes other then MF_SI */
  7227. if (!CHIP_IS_E1x(bp)) {
  7228. if (IS_MF_SD(bp))
  7229. memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
  7230. else if (!IS_MF(bp))
  7231. memcpy(fip_mac, iscsi_mac, ETH_ALEN);
  7232. }
  7233. #endif
  7234. }
  7235. static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
  7236. {
  7237. int /*abs*/func = BP_ABS_FUNC(bp);
  7238. int vn, port;
  7239. u32 val = 0;
  7240. int rc = 0;
  7241. bnx2x_get_common_hwinfo(bp);
  7242. if (CHIP_IS_E1x(bp)) {
  7243. bp->common.int_block = INT_BLOCK_HC;
  7244. bp->igu_dsb_id = DEF_SB_IGU_ID;
  7245. bp->igu_base_sb = 0;
  7246. bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
  7247. NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
  7248. } else {
  7249. bp->common.int_block = INT_BLOCK_IGU;
  7250. val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
  7251. if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
  7252. DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
  7253. bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
  7254. } else
  7255. DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
  7256. bnx2x_get_igu_cam_info(bp);
  7257. }
  7258. DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
  7259. bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
  7260. /*
  7261. * Initialize MF configuration
  7262. */
  7263. bp->mf_ov = 0;
  7264. bp->mf_mode = 0;
  7265. vn = BP_E1HVN(bp);
  7266. port = BP_PORT(bp);
  7267. if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
  7268. DP(NETIF_MSG_PROBE,
  7269. "shmem2base 0x%x, size %d, mfcfg offset %d\n",
  7270. bp->common.shmem2_base, SHMEM2_RD(bp, size),
  7271. (u32)offsetof(struct shmem2_region, mf_cfg_addr));
  7272. if (SHMEM2_HAS(bp, mf_cfg_addr))
  7273. bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
  7274. else
  7275. bp->common.mf_cfg_base = bp->common.shmem_base +
  7276. offsetof(struct shmem_region, func_mb) +
  7277. E1H_FUNC_MAX * sizeof(struct drv_func_mb);
  7278. /*
  7279. * get mf configuration:
  7280. * 1. existance of MF configuration
  7281. * 2. MAC address must be legal (check only upper bytes)
  7282. * for Switch-Independent mode;
  7283. * OVLAN must be legal for Switch-Dependent mode
  7284. * 3. SF_MODE configures specific MF mode
  7285. */
  7286. if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
  7287. /* get mf configuration */
  7288. val = SHMEM_RD(bp,
  7289. dev_info.shared_feature_config.config);
  7290. val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
  7291. switch (val) {
  7292. case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
  7293. val = MF_CFG_RD(bp, func_mf_config[func].
  7294. mac_upper);
  7295. /* check for legal mac (upper bytes)*/
  7296. if (val != 0xffff) {
  7297. bp->mf_mode = MULTI_FUNCTION_SI;
  7298. bp->mf_config[vn] = MF_CFG_RD(bp,
  7299. func_mf_config[func].config);
  7300. } else
  7301. DP(NETIF_MSG_PROBE, "illegal MAC "
  7302. "address for SI\n");
  7303. break;
  7304. case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
  7305. /* get OV configuration */
  7306. val = MF_CFG_RD(bp,
  7307. func_mf_config[FUNC_0].e1hov_tag);
  7308. val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
  7309. if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
  7310. bp->mf_mode = MULTI_FUNCTION_SD;
  7311. bp->mf_config[vn] = MF_CFG_RD(bp,
  7312. func_mf_config[func].config);
  7313. } else
  7314. DP(NETIF_MSG_PROBE, "illegal OV for "
  7315. "SD\n");
  7316. break;
  7317. default:
  7318. /* Unknown configuration: reset mf_config */
  7319. bp->mf_config[vn] = 0;
  7320. DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
  7321. val);
  7322. }
  7323. }
  7324. BNX2X_DEV_INFO("%s function mode\n",
  7325. IS_MF(bp) ? "multi" : "single");
  7326. switch (bp->mf_mode) {
  7327. case MULTI_FUNCTION_SD:
  7328. val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
  7329. FUNC_MF_CFG_E1HOV_TAG_MASK;
  7330. if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
  7331. bp->mf_ov = val;
  7332. BNX2X_DEV_INFO("MF OV for func %d is %d"
  7333. " (0x%04x)\n", func,
  7334. bp->mf_ov, bp->mf_ov);
  7335. } else {
  7336. BNX2X_ERR("No valid MF OV for func %d,"
  7337. " aborting\n", func);
  7338. rc = -EPERM;
  7339. }
  7340. break;
  7341. case MULTI_FUNCTION_SI:
  7342. BNX2X_DEV_INFO("func %d is in MF "
  7343. "switch-independent mode\n", func);
  7344. break;
  7345. default:
  7346. if (vn) {
  7347. BNX2X_ERR("VN %d in single function mode,"
  7348. " aborting\n", vn);
  7349. rc = -EPERM;
  7350. }
  7351. break;
  7352. }
  7353. }
  7354. /* adjust igu_sb_cnt to MF for E1x */
  7355. if (CHIP_IS_E1x(bp) && IS_MF(bp))
  7356. bp->igu_sb_cnt /= E1HVN_MAX;
  7357. /*
  7358. * adjust E2 sb count: to be removed when FW will support
  7359. * more then 16 L2 clients
  7360. */
  7361. #define MAX_L2_CLIENTS 16
  7362. if (CHIP_IS_E2(bp))
  7363. bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
  7364. MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
  7365. if (!BP_NOMCP(bp)) {
  7366. bnx2x_get_port_hwinfo(bp);
  7367. bp->fw_seq =
  7368. (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
  7369. DRV_MSG_SEQ_NUMBER_MASK);
  7370. BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
  7371. }
  7372. /* Get MAC addresses */
  7373. bnx2x_get_mac_hwinfo(bp);
  7374. #ifdef BCM_CNIC
  7375. bnx2x_get_cnic_info(bp);
  7376. #endif
  7377. return rc;
  7378. }
  7379. static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
  7380. {
  7381. int cnt, i, block_end, rodi;
  7382. char vpd_data[BNX2X_VPD_LEN+1];
  7383. char str_id_reg[VENDOR_ID_LEN+1];
  7384. char str_id_cap[VENDOR_ID_LEN+1];
  7385. u8 len;
  7386. cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
  7387. memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
  7388. if (cnt < BNX2X_VPD_LEN)
  7389. goto out_not_found;
  7390. i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
  7391. PCI_VPD_LRDT_RO_DATA);
  7392. if (i < 0)
  7393. goto out_not_found;
  7394. block_end = i + PCI_VPD_LRDT_TAG_SIZE +
  7395. pci_vpd_lrdt_size(&vpd_data[i]);
  7396. i += PCI_VPD_LRDT_TAG_SIZE;
  7397. if (block_end > BNX2X_VPD_LEN)
  7398. goto out_not_found;
  7399. rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
  7400. PCI_VPD_RO_KEYWORD_MFR_ID);
  7401. if (rodi < 0)
  7402. goto out_not_found;
  7403. len = pci_vpd_info_field_size(&vpd_data[rodi]);
  7404. if (len != VENDOR_ID_LEN)
  7405. goto out_not_found;
  7406. rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
  7407. /* vendor specific info */
  7408. snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
  7409. snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
  7410. if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
  7411. !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
  7412. rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
  7413. PCI_VPD_RO_KEYWORD_VENDOR0);
  7414. if (rodi >= 0) {
  7415. len = pci_vpd_info_field_size(&vpd_data[rodi]);
  7416. rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
  7417. if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
  7418. memcpy(bp->fw_ver, &vpd_data[rodi], len);
  7419. bp->fw_ver[len] = ' ';
  7420. }
  7421. }
  7422. return;
  7423. }
  7424. out_not_found:
  7425. return;
  7426. }
  7427. static int __devinit bnx2x_init_bp(struct bnx2x *bp)
  7428. {
  7429. int func;
  7430. int timer_interval;
  7431. int rc;
  7432. /* Disable interrupt handling until HW is initialized */
  7433. atomic_set(&bp->intr_sem, 1);
  7434. smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
  7435. mutex_init(&bp->port.phy_mutex);
  7436. mutex_init(&bp->fw_mb_mutex);
  7437. spin_lock_init(&bp->stats_lock);
  7438. #ifdef BCM_CNIC
  7439. mutex_init(&bp->cnic_mutex);
  7440. #endif
  7441. INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
  7442. INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
  7443. rc = bnx2x_get_hwinfo(bp);
  7444. if (!rc)
  7445. rc = bnx2x_alloc_mem_bp(bp);
  7446. bnx2x_read_fwinfo(bp);
  7447. func = BP_FUNC(bp);
  7448. /* need to reset chip if undi was active */
  7449. if (!BP_NOMCP(bp))
  7450. bnx2x_undi_unload(bp);
  7451. if (CHIP_REV_IS_FPGA(bp))
  7452. dev_err(&bp->pdev->dev, "FPGA detected\n");
  7453. if (BP_NOMCP(bp) && (func == 0))
  7454. dev_err(&bp->pdev->dev, "MCP disabled, "
  7455. "must load devices in order!\n");
  7456. bp->multi_mode = multi_mode;
  7457. bp->int_mode = int_mode;
  7458. bp->dev->features |= NETIF_F_GRO;
  7459. /* Set TPA flags */
  7460. if (disable_tpa) {
  7461. bp->flags &= ~TPA_ENABLE_FLAG;
  7462. bp->dev->features &= ~NETIF_F_LRO;
  7463. } else {
  7464. bp->flags |= TPA_ENABLE_FLAG;
  7465. bp->dev->features |= NETIF_F_LRO;
  7466. }
  7467. bp->disable_tpa = disable_tpa;
  7468. if (CHIP_IS_E1(bp))
  7469. bp->dropless_fc = 0;
  7470. else
  7471. bp->dropless_fc = dropless_fc;
  7472. bp->mrrs = mrrs;
  7473. bp->tx_ring_size = MAX_TX_AVAIL;
  7474. bp->rx_csum = 1;
  7475. /* make sure that the numbers are in the right granularity */
  7476. bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
  7477. bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
  7478. timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
  7479. bp->current_interval = (poll ? poll : timer_interval);
  7480. init_timer(&bp->timer);
  7481. bp->timer.expires = jiffies + bp->current_interval;
  7482. bp->timer.data = (unsigned long) bp;
  7483. bp->timer.function = bnx2x_timer;
  7484. bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
  7485. bnx2x_dcbx_init_params(bp);
  7486. return rc;
  7487. }
  7488. /****************************************************************************
  7489. * General service functions
  7490. ****************************************************************************/
  7491. /* called with rtnl_lock */
  7492. static int bnx2x_open(struct net_device *dev)
  7493. {
  7494. struct bnx2x *bp = netdev_priv(dev);
  7495. netif_carrier_off(dev);
  7496. bnx2x_set_power_state(bp, PCI_D0);
  7497. if (!bnx2x_reset_is_done(bp)) {
  7498. do {
  7499. /* Reset MCP mail box sequence if there is on going
  7500. * recovery
  7501. */
  7502. bp->fw_seq = 0;
  7503. /* If it's the first function to load and reset done
  7504. * is still not cleared it may mean that. We don't
  7505. * check the attention state here because it may have
  7506. * already been cleared by a "common" reset but we
  7507. * shell proceed with "process kill" anyway.
  7508. */
  7509. if ((bnx2x_get_load_cnt(bp) == 0) &&
  7510. bnx2x_trylock_hw_lock(bp,
  7511. HW_LOCK_RESOURCE_RESERVED_08) &&
  7512. (!bnx2x_leader_reset(bp))) {
  7513. DP(NETIF_MSG_HW, "Recovered in open\n");
  7514. break;
  7515. }
  7516. bnx2x_set_power_state(bp, PCI_D3hot);
  7517. printk(KERN_ERR"%s: Recovery flow hasn't been properly"
  7518. " completed yet. Try again later. If u still see this"
  7519. " message after a few retries then power cycle is"
  7520. " required.\n", bp->dev->name);
  7521. return -EAGAIN;
  7522. } while (0);
  7523. }
  7524. bp->recovery_state = BNX2X_RECOVERY_DONE;
  7525. return bnx2x_nic_load(bp, LOAD_OPEN);
  7526. }
  7527. /* called with rtnl_lock */
  7528. static int bnx2x_close(struct net_device *dev)
  7529. {
  7530. struct bnx2x *bp = netdev_priv(dev);
  7531. /* Unload the driver, release IRQs */
  7532. bnx2x_nic_unload(bp, UNLOAD_CLOSE);
  7533. bnx2x_set_power_state(bp, PCI_D3hot);
  7534. return 0;
  7535. }
  7536. #define E1_MAX_UC_LIST 29
  7537. #define E1H_MAX_UC_LIST 30
  7538. #define E2_MAX_UC_LIST 14
  7539. static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
  7540. {
  7541. if (CHIP_IS_E1(bp))
  7542. return E1_MAX_UC_LIST;
  7543. else if (CHIP_IS_E1H(bp))
  7544. return E1H_MAX_UC_LIST;
  7545. else
  7546. return E2_MAX_UC_LIST;
  7547. }
  7548. static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
  7549. {
  7550. if (CHIP_IS_E1(bp))
  7551. /* CAM Entries for Port0:
  7552. * 0 - prim ETH MAC
  7553. * 1 - BCAST MAC
  7554. * 2 - iSCSI L2 ring ETH MAC
  7555. * 3-31 - UC MACs
  7556. *
  7557. * Port1 entries are allocated the same way starting from
  7558. * entry 32.
  7559. */
  7560. return 3 + 32 * BP_PORT(bp);
  7561. else if (CHIP_IS_E1H(bp)) {
  7562. /* CAM Entries:
  7563. * 0-7 - prim ETH MAC for each function
  7564. * 8-15 - iSCSI L2 ring ETH MAC for each function
  7565. * 16 till 255 UC MAC lists for each function
  7566. *
  7567. * Remark: There is no FCoE support for E1H, thus FCoE related
  7568. * MACs are not considered.
  7569. */
  7570. return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
  7571. bnx2x_max_uc_list(bp) * BP_FUNC(bp);
  7572. } else {
  7573. /* CAM Entries (there is a separate CAM per engine):
  7574. * 0-4 - prim ETH MAC for each function
  7575. * 4-7 - iSCSI L2 ring ETH MAC for each function
  7576. * 8-11 - FIP ucast L2 MAC for each function
  7577. * 12-15 - ALL_ENODE_MACS mcast MAC for each function
  7578. * 16 till 71 UC MAC lists for each function
  7579. */
  7580. u8 func_idx =
  7581. (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
  7582. return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
  7583. bnx2x_max_uc_list(bp) * func_idx;
  7584. }
  7585. }
  7586. /* set uc list, do not wait as wait implies sleep and
  7587. * set_rx_mode can be invoked from non-sleepable context.
  7588. *
  7589. * Instead we use the same ramrod data buffer each time we need
  7590. * to configure a list of addresses, and use the fact that the
  7591. * list of MACs is changed in an incremental way and that the
  7592. * function is called under the netif_addr_lock. A temporary
  7593. * inconsistent CAM configuration (possible in case of very fast
  7594. * sequence of add/del/add on the host side) will shortly be
  7595. * restored by the handler of the last ramrod.
  7596. */
  7597. static int bnx2x_set_uc_list(struct bnx2x *bp)
  7598. {
  7599. int i = 0, old;
  7600. struct net_device *dev = bp->dev;
  7601. u8 offset = bnx2x_uc_list_cam_offset(bp);
  7602. struct netdev_hw_addr *ha;
  7603. struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
  7604. dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
  7605. if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
  7606. return -EINVAL;
  7607. netdev_for_each_uc_addr(ha, dev) {
  7608. /* copy mac */
  7609. config_cmd->config_table[i].msb_mac_addr =
  7610. swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
  7611. config_cmd->config_table[i].middle_mac_addr =
  7612. swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
  7613. config_cmd->config_table[i].lsb_mac_addr =
  7614. swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
  7615. config_cmd->config_table[i].vlan_id = 0;
  7616. config_cmd->config_table[i].pf_id = BP_FUNC(bp);
  7617. config_cmd->config_table[i].clients_bit_vector =
  7618. cpu_to_le32(1 << BP_L_ID(bp));
  7619. SET_FLAG(config_cmd->config_table[i].flags,
  7620. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  7621. T_ETH_MAC_COMMAND_SET);
  7622. DP(NETIF_MSG_IFUP,
  7623. "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
  7624. config_cmd->config_table[i].msb_mac_addr,
  7625. config_cmd->config_table[i].middle_mac_addr,
  7626. config_cmd->config_table[i].lsb_mac_addr);
  7627. i++;
  7628. /* Set uc MAC in NIG */
  7629. bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
  7630. LLH_CAM_ETH_LINE + i);
  7631. }
  7632. old = config_cmd->hdr.length;
  7633. if (old > i) {
  7634. for (; i < old; i++) {
  7635. if (CAM_IS_INVALID(config_cmd->
  7636. config_table[i])) {
  7637. /* already invalidated */
  7638. break;
  7639. }
  7640. /* invalidate */
  7641. SET_FLAG(config_cmd->config_table[i].flags,
  7642. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  7643. T_ETH_MAC_COMMAND_INVALIDATE);
  7644. }
  7645. }
  7646. wmb();
  7647. config_cmd->hdr.length = i;
  7648. config_cmd->hdr.offset = offset;
  7649. config_cmd->hdr.client_id = 0xff;
  7650. /* Mark that this ramrod doesn't use bp->set_mac_pending for
  7651. * synchronization.
  7652. */
  7653. config_cmd->hdr.echo = 0;
  7654. mb();
  7655. return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
  7656. U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
  7657. }
  7658. void bnx2x_invalidate_uc_list(struct bnx2x *bp)
  7659. {
  7660. int i;
  7661. struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
  7662. dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
  7663. int ramrod_flags = WAIT_RAMROD_COMMON;
  7664. u8 offset = bnx2x_uc_list_cam_offset(bp);
  7665. u8 max_list_size = bnx2x_max_uc_list(bp);
  7666. for (i = 0; i < max_list_size; i++) {
  7667. SET_FLAG(config_cmd->config_table[i].flags,
  7668. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  7669. T_ETH_MAC_COMMAND_INVALIDATE);
  7670. bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
  7671. }
  7672. wmb();
  7673. config_cmd->hdr.length = max_list_size;
  7674. config_cmd->hdr.offset = offset;
  7675. config_cmd->hdr.client_id = 0xff;
  7676. /* We'll wait for a completion this time... */
  7677. config_cmd->hdr.echo = 1;
  7678. bp->set_mac_pending = 1;
  7679. mb();
  7680. bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
  7681. U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
  7682. /* Wait for a completion */
  7683. bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
  7684. ramrod_flags);
  7685. }
  7686. static inline int bnx2x_set_mc_list(struct bnx2x *bp)
  7687. {
  7688. /* some multicasts */
  7689. if (CHIP_IS_E1(bp)) {
  7690. return bnx2x_set_e1_mc_list(bp);
  7691. } else { /* E1H and newer */
  7692. return bnx2x_set_e1h_mc_list(bp);
  7693. }
  7694. }
  7695. /* called with netif_tx_lock from dev_mcast.c */
  7696. void bnx2x_set_rx_mode(struct net_device *dev)
  7697. {
  7698. struct bnx2x *bp = netdev_priv(dev);
  7699. u32 rx_mode = BNX2X_RX_MODE_NORMAL;
  7700. if (bp->state != BNX2X_STATE_OPEN) {
  7701. DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
  7702. return;
  7703. }
  7704. DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
  7705. if (dev->flags & IFF_PROMISC)
  7706. rx_mode = BNX2X_RX_MODE_PROMISC;
  7707. else if (dev->flags & IFF_ALLMULTI)
  7708. rx_mode = BNX2X_RX_MODE_ALLMULTI;
  7709. else {
  7710. /* some multicasts */
  7711. if (bnx2x_set_mc_list(bp))
  7712. rx_mode = BNX2X_RX_MODE_ALLMULTI;
  7713. /* some unicasts */
  7714. if (bnx2x_set_uc_list(bp))
  7715. rx_mode = BNX2X_RX_MODE_PROMISC;
  7716. }
  7717. bp->rx_mode = rx_mode;
  7718. bnx2x_set_storm_rx_mode(bp);
  7719. }
  7720. /* called with rtnl_lock */
  7721. static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
  7722. int devad, u16 addr)
  7723. {
  7724. struct bnx2x *bp = netdev_priv(netdev);
  7725. u16 value;
  7726. int rc;
  7727. DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
  7728. prtad, devad, addr);
  7729. /* The HW expects different devad if CL22 is used */
  7730. devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
  7731. bnx2x_acquire_phy_lock(bp);
  7732. rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
  7733. bnx2x_release_phy_lock(bp);
  7734. DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
  7735. if (!rc)
  7736. rc = value;
  7737. return rc;
  7738. }
  7739. /* called with rtnl_lock */
  7740. static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
  7741. u16 addr, u16 value)
  7742. {
  7743. struct bnx2x *bp = netdev_priv(netdev);
  7744. int rc;
  7745. DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
  7746. " value 0x%x\n", prtad, devad, addr, value);
  7747. /* The HW expects different devad if CL22 is used */
  7748. devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
  7749. bnx2x_acquire_phy_lock(bp);
  7750. rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
  7751. bnx2x_release_phy_lock(bp);
  7752. return rc;
  7753. }
  7754. /* called with rtnl_lock */
  7755. static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  7756. {
  7757. struct bnx2x *bp = netdev_priv(dev);
  7758. struct mii_ioctl_data *mdio = if_mii(ifr);
  7759. DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
  7760. mdio->phy_id, mdio->reg_num, mdio->val_in);
  7761. if (!netif_running(dev))
  7762. return -EAGAIN;
  7763. return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
  7764. }
  7765. #ifdef CONFIG_NET_POLL_CONTROLLER
  7766. static void poll_bnx2x(struct net_device *dev)
  7767. {
  7768. struct bnx2x *bp = netdev_priv(dev);
  7769. disable_irq(bp->pdev->irq);
  7770. bnx2x_interrupt(bp->pdev->irq, dev);
  7771. enable_irq(bp->pdev->irq);
  7772. }
  7773. #endif
  7774. static const struct net_device_ops bnx2x_netdev_ops = {
  7775. .ndo_open = bnx2x_open,
  7776. .ndo_stop = bnx2x_close,
  7777. .ndo_start_xmit = bnx2x_start_xmit,
  7778. .ndo_select_queue = bnx2x_select_queue,
  7779. .ndo_set_rx_mode = bnx2x_set_rx_mode,
  7780. .ndo_set_mac_address = bnx2x_change_mac_addr,
  7781. .ndo_validate_addr = eth_validate_addr,
  7782. .ndo_do_ioctl = bnx2x_ioctl,
  7783. .ndo_change_mtu = bnx2x_change_mtu,
  7784. .ndo_tx_timeout = bnx2x_tx_timeout,
  7785. #ifdef CONFIG_NET_POLL_CONTROLLER
  7786. .ndo_poll_controller = poll_bnx2x,
  7787. #endif
  7788. };
  7789. static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
  7790. struct net_device *dev)
  7791. {
  7792. struct bnx2x *bp;
  7793. int rc;
  7794. SET_NETDEV_DEV(dev, &pdev->dev);
  7795. bp = netdev_priv(dev);
  7796. bp->dev = dev;
  7797. bp->pdev = pdev;
  7798. bp->flags = 0;
  7799. bp->pf_num = PCI_FUNC(pdev->devfn);
  7800. rc = pci_enable_device(pdev);
  7801. if (rc) {
  7802. dev_err(&bp->pdev->dev,
  7803. "Cannot enable PCI device, aborting\n");
  7804. goto err_out;
  7805. }
  7806. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  7807. dev_err(&bp->pdev->dev,
  7808. "Cannot find PCI device base address, aborting\n");
  7809. rc = -ENODEV;
  7810. goto err_out_disable;
  7811. }
  7812. if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
  7813. dev_err(&bp->pdev->dev, "Cannot find second PCI device"
  7814. " base address, aborting\n");
  7815. rc = -ENODEV;
  7816. goto err_out_disable;
  7817. }
  7818. if (atomic_read(&pdev->enable_cnt) == 1) {
  7819. rc = pci_request_regions(pdev, DRV_MODULE_NAME);
  7820. if (rc) {
  7821. dev_err(&bp->pdev->dev,
  7822. "Cannot obtain PCI resources, aborting\n");
  7823. goto err_out_disable;
  7824. }
  7825. pci_set_master(pdev);
  7826. pci_save_state(pdev);
  7827. }
  7828. bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
  7829. if (bp->pm_cap == 0) {
  7830. dev_err(&bp->pdev->dev,
  7831. "Cannot find power management capability, aborting\n");
  7832. rc = -EIO;
  7833. goto err_out_release;
  7834. }
  7835. bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
  7836. if (bp->pcie_cap == 0) {
  7837. dev_err(&bp->pdev->dev,
  7838. "Cannot find PCI Express capability, aborting\n");
  7839. rc = -EIO;
  7840. goto err_out_release;
  7841. }
  7842. if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
  7843. bp->flags |= USING_DAC_FLAG;
  7844. if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
  7845. dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
  7846. " failed, aborting\n");
  7847. rc = -EIO;
  7848. goto err_out_release;
  7849. }
  7850. } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
  7851. dev_err(&bp->pdev->dev,
  7852. "System does not support DMA, aborting\n");
  7853. rc = -EIO;
  7854. goto err_out_release;
  7855. }
  7856. dev->mem_start = pci_resource_start(pdev, 0);
  7857. dev->base_addr = dev->mem_start;
  7858. dev->mem_end = pci_resource_end(pdev, 0);
  7859. dev->irq = pdev->irq;
  7860. bp->regview = pci_ioremap_bar(pdev, 0);
  7861. if (!bp->regview) {
  7862. dev_err(&bp->pdev->dev,
  7863. "Cannot map register space, aborting\n");
  7864. rc = -ENOMEM;
  7865. goto err_out_release;
  7866. }
  7867. bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
  7868. min_t(u64, BNX2X_DB_SIZE(bp),
  7869. pci_resource_len(pdev, 2)));
  7870. if (!bp->doorbells) {
  7871. dev_err(&bp->pdev->dev,
  7872. "Cannot map doorbell space, aborting\n");
  7873. rc = -ENOMEM;
  7874. goto err_out_unmap;
  7875. }
  7876. bnx2x_set_power_state(bp, PCI_D0);
  7877. /* clean indirect addresses */
  7878. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
  7879. PCICFG_VENDOR_ID_OFFSET);
  7880. REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
  7881. REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
  7882. REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
  7883. REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
  7884. /* Reset the load counter */
  7885. bnx2x_clear_load_cnt(bp);
  7886. dev->watchdog_timeo = TX_TIMEOUT;
  7887. dev->netdev_ops = &bnx2x_netdev_ops;
  7888. bnx2x_set_ethtool_ops(dev);
  7889. dev->features |= NETIF_F_SG;
  7890. dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  7891. if (bp->flags & USING_DAC_FLAG)
  7892. dev->features |= NETIF_F_HIGHDMA;
  7893. dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
  7894. dev->features |= NETIF_F_TSO6;
  7895. dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
  7896. dev->vlan_features |= NETIF_F_SG;
  7897. dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  7898. if (bp->flags & USING_DAC_FLAG)
  7899. dev->vlan_features |= NETIF_F_HIGHDMA;
  7900. dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
  7901. dev->vlan_features |= NETIF_F_TSO6;
  7902. #ifdef BCM_DCBNL
  7903. dev->dcbnl_ops = &bnx2x_dcbnl_ops;
  7904. #endif
  7905. /* get_port_hwinfo() will set prtad and mmds properly */
  7906. bp->mdio.prtad = MDIO_PRTAD_NONE;
  7907. bp->mdio.mmds = 0;
  7908. bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
  7909. bp->mdio.dev = dev;
  7910. bp->mdio.mdio_read = bnx2x_mdio_read;
  7911. bp->mdio.mdio_write = bnx2x_mdio_write;
  7912. return 0;
  7913. err_out_unmap:
  7914. if (bp->regview) {
  7915. iounmap(bp->regview);
  7916. bp->regview = NULL;
  7917. }
  7918. if (bp->doorbells) {
  7919. iounmap(bp->doorbells);
  7920. bp->doorbells = NULL;
  7921. }
  7922. err_out_release:
  7923. if (atomic_read(&pdev->enable_cnt) == 1)
  7924. pci_release_regions(pdev);
  7925. err_out_disable:
  7926. pci_disable_device(pdev);
  7927. pci_set_drvdata(pdev, NULL);
  7928. err_out:
  7929. return rc;
  7930. }
  7931. static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
  7932. int *width, int *speed)
  7933. {
  7934. u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
  7935. *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
  7936. /* return value of 1=2.5GHz 2=5GHz */
  7937. *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
  7938. }
  7939. static int bnx2x_check_firmware(struct bnx2x *bp)
  7940. {
  7941. const struct firmware *firmware = bp->firmware;
  7942. struct bnx2x_fw_file_hdr *fw_hdr;
  7943. struct bnx2x_fw_file_section *sections;
  7944. u32 offset, len, num_ops;
  7945. u16 *ops_offsets;
  7946. int i;
  7947. const u8 *fw_ver;
  7948. if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
  7949. return -EINVAL;
  7950. fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
  7951. sections = (struct bnx2x_fw_file_section *)fw_hdr;
  7952. /* Make sure none of the offsets and sizes make us read beyond
  7953. * the end of the firmware data */
  7954. for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
  7955. offset = be32_to_cpu(sections[i].offset);
  7956. len = be32_to_cpu(sections[i].len);
  7957. if (offset + len > firmware->size) {
  7958. dev_err(&bp->pdev->dev,
  7959. "Section %d length is out of bounds\n", i);
  7960. return -EINVAL;
  7961. }
  7962. }
  7963. /* Likewise for the init_ops offsets */
  7964. offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
  7965. ops_offsets = (u16 *)(firmware->data + offset);
  7966. num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
  7967. for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
  7968. if (be16_to_cpu(ops_offsets[i]) > num_ops) {
  7969. dev_err(&bp->pdev->dev,
  7970. "Section offset %d is out of bounds\n", i);
  7971. return -EINVAL;
  7972. }
  7973. }
  7974. /* Check FW version */
  7975. offset = be32_to_cpu(fw_hdr->fw_version.offset);
  7976. fw_ver = firmware->data + offset;
  7977. if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
  7978. (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
  7979. (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
  7980. (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
  7981. dev_err(&bp->pdev->dev,
  7982. "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
  7983. fw_ver[0], fw_ver[1], fw_ver[2],
  7984. fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
  7985. BCM_5710_FW_MINOR_VERSION,
  7986. BCM_5710_FW_REVISION_VERSION,
  7987. BCM_5710_FW_ENGINEERING_VERSION);
  7988. return -EINVAL;
  7989. }
  7990. return 0;
  7991. }
  7992. static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
  7993. {
  7994. const __be32 *source = (const __be32 *)_source;
  7995. u32 *target = (u32 *)_target;
  7996. u32 i;
  7997. for (i = 0; i < n/4; i++)
  7998. target[i] = be32_to_cpu(source[i]);
  7999. }
  8000. /*
  8001. Ops array is stored in the following format:
  8002. {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
  8003. */
  8004. static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
  8005. {
  8006. const __be32 *source = (const __be32 *)_source;
  8007. struct raw_op *target = (struct raw_op *)_target;
  8008. u32 i, j, tmp;
  8009. for (i = 0, j = 0; i < n/8; i++, j += 2) {
  8010. tmp = be32_to_cpu(source[j]);
  8011. target[i].op = (tmp >> 24) & 0xff;
  8012. target[i].offset = tmp & 0xffffff;
  8013. target[i].raw_data = be32_to_cpu(source[j + 1]);
  8014. }
  8015. }
  8016. /**
  8017. * IRO array is stored in the following format:
  8018. * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
  8019. */
  8020. static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
  8021. {
  8022. const __be32 *source = (const __be32 *)_source;
  8023. struct iro *target = (struct iro *)_target;
  8024. u32 i, j, tmp;
  8025. for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
  8026. target[i].base = be32_to_cpu(source[j]);
  8027. j++;
  8028. tmp = be32_to_cpu(source[j]);
  8029. target[i].m1 = (tmp >> 16) & 0xffff;
  8030. target[i].m2 = tmp & 0xffff;
  8031. j++;
  8032. tmp = be32_to_cpu(source[j]);
  8033. target[i].m3 = (tmp >> 16) & 0xffff;
  8034. target[i].size = tmp & 0xffff;
  8035. j++;
  8036. }
  8037. }
  8038. static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
  8039. {
  8040. const __be16 *source = (const __be16 *)_source;
  8041. u16 *target = (u16 *)_target;
  8042. u32 i;
  8043. for (i = 0; i < n/2; i++)
  8044. target[i] = be16_to_cpu(source[i]);
  8045. }
  8046. #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
  8047. do { \
  8048. u32 len = be32_to_cpu(fw_hdr->arr.len); \
  8049. bp->arr = kmalloc(len, GFP_KERNEL); \
  8050. if (!bp->arr) { \
  8051. pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
  8052. goto lbl; \
  8053. } \
  8054. func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
  8055. (u8 *)bp->arr, len); \
  8056. } while (0)
  8057. int bnx2x_init_firmware(struct bnx2x *bp)
  8058. {
  8059. const char *fw_file_name;
  8060. struct bnx2x_fw_file_hdr *fw_hdr;
  8061. int rc;
  8062. if (CHIP_IS_E1(bp))
  8063. fw_file_name = FW_FILE_NAME_E1;
  8064. else if (CHIP_IS_E1H(bp))
  8065. fw_file_name = FW_FILE_NAME_E1H;
  8066. else if (CHIP_IS_E2(bp))
  8067. fw_file_name = FW_FILE_NAME_E2;
  8068. else {
  8069. BNX2X_ERR("Unsupported chip revision\n");
  8070. return -EINVAL;
  8071. }
  8072. BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
  8073. rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
  8074. if (rc) {
  8075. BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
  8076. goto request_firmware_exit;
  8077. }
  8078. rc = bnx2x_check_firmware(bp);
  8079. if (rc) {
  8080. BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
  8081. goto request_firmware_exit;
  8082. }
  8083. fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
  8084. /* Initialize the pointers to the init arrays */
  8085. /* Blob */
  8086. BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
  8087. /* Opcodes */
  8088. BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
  8089. /* Offsets */
  8090. BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
  8091. be16_to_cpu_n);
  8092. /* STORMs firmware */
  8093. INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
  8094. be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
  8095. INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
  8096. be32_to_cpu(fw_hdr->tsem_pram_data.offset);
  8097. INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
  8098. be32_to_cpu(fw_hdr->usem_int_table_data.offset);
  8099. INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
  8100. be32_to_cpu(fw_hdr->usem_pram_data.offset);
  8101. INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
  8102. be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
  8103. INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
  8104. be32_to_cpu(fw_hdr->xsem_pram_data.offset);
  8105. INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
  8106. be32_to_cpu(fw_hdr->csem_int_table_data.offset);
  8107. INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
  8108. be32_to_cpu(fw_hdr->csem_pram_data.offset);
  8109. /* IRO */
  8110. BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
  8111. return 0;
  8112. iro_alloc_err:
  8113. kfree(bp->init_ops_offsets);
  8114. init_offsets_alloc_err:
  8115. kfree(bp->init_ops);
  8116. init_ops_alloc_err:
  8117. kfree(bp->init_data);
  8118. request_firmware_exit:
  8119. release_firmware(bp->firmware);
  8120. return rc;
  8121. }
  8122. static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
  8123. {
  8124. int cid_count = L2_FP_COUNT(l2_cid_count);
  8125. #ifdef BCM_CNIC
  8126. cid_count += CNIC_CID_MAX;
  8127. #endif
  8128. return roundup(cid_count, QM_CID_ROUND);
  8129. }
  8130. static int __devinit bnx2x_init_one(struct pci_dev *pdev,
  8131. const struct pci_device_id *ent)
  8132. {
  8133. struct net_device *dev = NULL;
  8134. struct bnx2x *bp;
  8135. int pcie_width, pcie_speed;
  8136. int rc, cid_count;
  8137. switch (ent->driver_data) {
  8138. case BCM57710:
  8139. case BCM57711:
  8140. case BCM57711E:
  8141. cid_count = FP_SB_MAX_E1x;
  8142. break;
  8143. case BCM57712:
  8144. case BCM57712E:
  8145. cid_count = FP_SB_MAX_E2;
  8146. break;
  8147. default:
  8148. pr_err("Unknown board_type (%ld), aborting\n",
  8149. ent->driver_data);
  8150. return -ENODEV;
  8151. }
  8152. cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
  8153. /* dev zeroed in init_etherdev */
  8154. dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
  8155. if (!dev) {
  8156. dev_err(&pdev->dev, "Cannot allocate net device\n");
  8157. return -ENOMEM;
  8158. }
  8159. bp = netdev_priv(dev);
  8160. bp->msg_enable = debug;
  8161. pci_set_drvdata(pdev, dev);
  8162. bp->l2_cid_count = cid_count;
  8163. rc = bnx2x_init_dev(pdev, dev);
  8164. if (rc < 0) {
  8165. free_netdev(dev);
  8166. return rc;
  8167. }
  8168. rc = bnx2x_init_bp(bp);
  8169. if (rc)
  8170. goto init_one_exit;
  8171. /* calc qm_cid_count */
  8172. bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
  8173. #ifdef BCM_CNIC
  8174. /* disable FCOE L2 queue for E1x*/
  8175. if (CHIP_IS_E1x(bp))
  8176. bp->flags |= NO_FCOE_FLAG;
  8177. #endif
  8178. /* Configure interupt mode: try to enable MSI-X/MSI if
  8179. * needed, set bp->num_queues appropriately.
  8180. */
  8181. bnx2x_set_int_mode(bp);
  8182. /* Add all NAPI objects */
  8183. bnx2x_add_all_napi(bp);
  8184. rc = register_netdev(dev);
  8185. if (rc) {
  8186. dev_err(&pdev->dev, "Cannot register net device\n");
  8187. goto init_one_exit;
  8188. }
  8189. #ifdef BCM_CNIC
  8190. if (!NO_FCOE(bp)) {
  8191. /* Add storage MAC address */
  8192. rtnl_lock();
  8193. dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
  8194. rtnl_unlock();
  8195. }
  8196. #endif
  8197. bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
  8198. netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
  8199. " IRQ %d, ", board_info[ent->driver_data].name,
  8200. (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
  8201. pcie_width,
  8202. ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
  8203. (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
  8204. "5GHz (Gen2)" : "2.5GHz",
  8205. dev->base_addr, bp->pdev->irq);
  8206. pr_cont("node addr %pM\n", dev->dev_addr);
  8207. return 0;
  8208. init_one_exit:
  8209. if (bp->regview)
  8210. iounmap(bp->regview);
  8211. if (bp->doorbells)
  8212. iounmap(bp->doorbells);
  8213. free_netdev(dev);
  8214. if (atomic_read(&pdev->enable_cnt) == 1)
  8215. pci_release_regions(pdev);
  8216. pci_disable_device(pdev);
  8217. pci_set_drvdata(pdev, NULL);
  8218. return rc;
  8219. }
  8220. static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
  8221. {
  8222. struct net_device *dev = pci_get_drvdata(pdev);
  8223. struct bnx2x *bp;
  8224. if (!dev) {
  8225. dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
  8226. return;
  8227. }
  8228. bp = netdev_priv(dev);
  8229. #ifdef BCM_CNIC
  8230. /* Delete storage MAC address */
  8231. if (!NO_FCOE(bp)) {
  8232. rtnl_lock();
  8233. dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
  8234. rtnl_unlock();
  8235. }
  8236. #endif
  8237. #ifdef BCM_DCBNL
  8238. /* Delete app tlvs from dcbnl */
  8239. bnx2x_dcbnl_update_applist(bp, true);
  8240. #endif
  8241. unregister_netdev(dev);
  8242. /* Delete all NAPI objects */
  8243. bnx2x_del_all_napi(bp);
  8244. /* Power on: we can't let PCI layer write to us while we are in D3 */
  8245. bnx2x_set_power_state(bp, PCI_D0);
  8246. /* Disable MSI/MSI-X */
  8247. bnx2x_disable_msi(bp);
  8248. /* Power off */
  8249. bnx2x_set_power_state(bp, PCI_D3hot);
  8250. /* Make sure RESET task is not scheduled before continuing */
  8251. cancel_delayed_work_sync(&bp->reset_task);
  8252. if (bp->regview)
  8253. iounmap(bp->regview);
  8254. if (bp->doorbells)
  8255. iounmap(bp->doorbells);
  8256. bnx2x_free_mem_bp(bp);
  8257. free_netdev(dev);
  8258. if (atomic_read(&pdev->enable_cnt) == 1)
  8259. pci_release_regions(pdev);
  8260. pci_disable_device(pdev);
  8261. pci_set_drvdata(pdev, NULL);
  8262. }
  8263. static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
  8264. {
  8265. int i;
  8266. bp->state = BNX2X_STATE_ERROR;
  8267. bp->rx_mode = BNX2X_RX_MODE_NONE;
  8268. bnx2x_netif_stop(bp, 0);
  8269. netif_carrier_off(bp->dev);
  8270. del_timer_sync(&bp->timer);
  8271. bp->stats_state = STATS_STATE_DISABLED;
  8272. DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
  8273. /* Release IRQs */
  8274. bnx2x_free_irq(bp);
  8275. /* Free SKBs, SGEs, TPA pool and driver internals */
  8276. bnx2x_free_skbs(bp);
  8277. for_each_rx_queue(bp, i)
  8278. bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
  8279. bnx2x_free_mem(bp);
  8280. bp->state = BNX2X_STATE_CLOSED;
  8281. return 0;
  8282. }
  8283. static void bnx2x_eeh_recover(struct bnx2x *bp)
  8284. {
  8285. u32 val;
  8286. mutex_init(&bp->port.phy_mutex);
  8287. bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
  8288. bp->link_params.shmem_base = bp->common.shmem_base;
  8289. BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
  8290. if (!bp->common.shmem_base ||
  8291. (bp->common.shmem_base < 0xA0000) ||
  8292. (bp->common.shmem_base >= 0xC0000)) {
  8293. BNX2X_DEV_INFO("MCP not active\n");
  8294. bp->flags |= NO_MCP_FLAG;
  8295. return;
  8296. }
  8297. val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
  8298. if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
  8299. != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
  8300. BNX2X_ERR("BAD MCP validity signature\n");
  8301. if (!BP_NOMCP(bp)) {
  8302. bp->fw_seq =
  8303. (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
  8304. DRV_MSG_SEQ_NUMBER_MASK);
  8305. BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
  8306. }
  8307. }
  8308. /**
  8309. * bnx2x_io_error_detected - called when PCI error is detected
  8310. * @pdev: Pointer to PCI device
  8311. * @state: The current pci connection state
  8312. *
  8313. * This function is called after a PCI bus error affecting
  8314. * this device has been detected.
  8315. */
  8316. static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
  8317. pci_channel_state_t state)
  8318. {
  8319. struct net_device *dev = pci_get_drvdata(pdev);
  8320. struct bnx2x *bp = netdev_priv(dev);
  8321. rtnl_lock();
  8322. netif_device_detach(dev);
  8323. if (state == pci_channel_io_perm_failure) {
  8324. rtnl_unlock();
  8325. return PCI_ERS_RESULT_DISCONNECT;
  8326. }
  8327. if (netif_running(dev))
  8328. bnx2x_eeh_nic_unload(bp);
  8329. pci_disable_device(pdev);
  8330. rtnl_unlock();
  8331. /* Request a slot reset */
  8332. return PCI_ERS_RESULT_NEED_RESET;
  8333. }
  8334. /**
  8335. * bnx2x_io_slot_reset - called after the PCI bus has been reset
  8336. * @pdev: Pointer to PCI device
  8337. *
  8338. * Restart the card from scratch, as if from a cold-boot.
  8339. */
  8340. static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
  8341. {
  8342. struct net_device *dev = pci_get_drvdata(pdev);
  8343. struct bnx2x *bp = netdev_priv(dev);
  8344. rtnl_lock();
  8345. if (pci_enable_device(pdev)) {
  8346. dev_err(&pdev->dev,
  8347. "Cannot re-enable PCI device after reset\n");
  8348. rtnl_unlock();
  8349. return PCI_ERS_RESULT_DISCONNECT;
  8350. }
  8351. pci_set_master(pdev);
  8352. pci_restore_state(pdev);
  8353. if (netif_running(dev))
  8354. bnx2x_set_power_state(bp, PCI_D0);
  8355. rtnl_unlock();
  8356. return PCI_ERS_RESULT_RECOVERED;
  8357. }
  8358. /**
  8359. * bnx2x_io_resume - called when traffic can start flowing again
  8360. * @pdev: Pointer to PCI device
  8361. *
  8362. * This callback is called when the error recovery driver tells us that
  8363. * its OK to resume normal operation.
  8364. */
  8365. static void bnx2x_io_resume(struct pci_dev *pdev)
  8366. {
  8367. struct net_device *dev = pci_get_drvdata(pdev);
  8368. struct bnx2x *bp = netdev_priv(dev);
  8369. if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
  8370. printk(KERN_ERR "Handling parity error recovery. "
  8371. "Try again later\n");
  8372. return;
  8373. }
  8374. rtnl_lock();
  8375. bnx2x_eeh_recover(bp);
  8376. if (netif_running(dev))
  8377. bnx2x_nic_load(bp, LOAD_NORMAL);
  8378. netif_device_attach(dev);
  8379. rtnl_unlock();
  8380. }
  8381. static struct pci_error_handlers bnx2x_err_handler = {
  8382. .error_detected = bnx2x_io_error_detected,
  8383. .slot_reset = bnx2x_io_slot_reset,
  8384. .resume = bnx2x_io_resume,
  8385. };
  8386. static struct pci_driver bnx2x_pci_driver = {
  8387. .name = DRV_MODULE_NAME,
  8388. .id_table = bnx2x_pci_tbl,
  8389. .probe = bnx2x_init_one,
  8390. .remove = __devexit_p(bnx2x_remove_one),
  8391. .suspend = bnx2x_suspend,
  8392. .resume = bnx2x_resume,
  8393. .err_handler = &bnx2x_err_handler,
  8394. };
  8395. static int __init bnx2x_init(void)
  8396. {
  8397. int ret;
  8398. pr_info("%s", version);
  8399. bnx2x_wq = create_singlethread_workqueue("bnx2x");
  8400. if (bnx2x_wq == NULL) {
  8401. pr_err("Cannot create workqueue\n");
  8402. return -ENOMEM;
  8403. }
  8404. ret = pci_register_driver(&bnx2x_pci_driver);
  8405. if (ret) {
  8406. pr_err("Cannot register driver\n");
  8407. destroy_workqueue(bnx2x_wq);
  8408. }
  8409. return ret;
  8410. }
  8411. static void __exit bnx2x_cleanup(void)
  8412. {
  8413. pci_unregister_driver(&bnx2x_pci_driver);
  8414. destroy_workqueue(bnx2x_wq);
  8415. }
  8416. module_init(bnx2x_init);
  8417. module_exit(bnx2x_cleanup);
  8418. #ifdef BCM_CNIC
  8419. /* count denotes the number of new completions we have seen */
  8420. static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
  8421. {
  8422. struct eth_spe *spe;
  8423. #ifdef BNX2X_STOP_ON_ERROR
  8424. if (unlikely(bp->panic))
  8425. return;
  8426. #endif
  8427. spin_lock_bh(&bp->spq_lock);
  8428. BUG_ON(bp->cnic_spq_pending < count);
  8429. bp->cnic_spq_pending -= count;
  8430. for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
  8431. u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
  8432. & SPE_HDR_CONN_TYPE) >>
  8433. SPE_HDR_CONN_TYPE_SHIFT;
  8434. /* Set validation for iSCSI L2 client before sending SETUP
  8435. * ramrod
  8436. */
  8437. if (type == ETH_CONNECTION_TYPE) {
  8438. u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
  8439. hdr.conn_and_cmd_data) >>
  8440. SPE_HDR_CMD_ID_SHIFT) & 0xff;
  8441. if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
  8442. bnx2x_set_ctx_validation(&bp->context.
  8443. vcxt[BNX2X_ISCSI_ETH_CID].eth,
  8444. HW_CID(bp, BNX2X_ISCSI_ETH_CID));
  8445. }
  8446. /* There may be not more than 8 L2 and not more than 8 L5 SPEs
  8447. * We also check that the number of outstanding
  8448. * COMMON ramrods is not more than the EQ and SPQ can
  8449. * accommodate.
  8450. */
  8451. if (type == ETH_CONNECTION_TYPE) {
  8452. if (!atomic_read(&bp->cq_spq_left))
  8453. break;
  8454. else
  8455. atomic_dec(&bp->cq_spq_left);
  8456. } else if (type == NONE_CONNECTION_TYPE) {
  8457. if (!atomic_read(&bp->eq_spq_left))
  8458. break;
  8459. else
  8460. atomic_dec(&bp->eq_spq_left);
  8461. } else if ((type == ISCSI_CONNECTION_TYPE) ||
  8462. (type == FCOE_CONNECTION_TYPE)) {
  8463. if (bp->cnic_spq_pending >=
  8464. bp->cnic_eth_dev.max_kwqe_pending)
  8465. break;
  8466. else
  8467. bp->cnic_spq_pending++;
  8468. } else {
  8469. BNX2X_ERR("Unknown SPE type: %d\n", type);
  8470. bnx2x_panic();
  8471. break;
  8472. }
  8473. spe = bnx2x_sp_get_next(bp);
  8474. *spe = *bp->cnic_kwq_cons;
  8475. DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
  8476. bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
  8477. if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
  8478. bp->cnic_kwq_cons = bp->cnic_kwq;
  8479. else
  8480. bp->cnic_kwq_cons++;
  8481. }
  8482. bnx2x_sp_prod_update(bp);
  8483. spin_unlock_bh(&bp->spq_lock);
  8484. }
  8485. static int bnx2x_cnic_sp_queue(struct net_device *dev,
  8486. struct kwqe_16 *kwqes[], u32 count)
  8487. {
  8488. struct bnx2x *bp = netdev_priv(dev);
  8489. int i;
  8490. #ifdef BNX2X_STOP_ON_ERROR
  8491. if (unlikely(bp->panic))
  8492. return -EIO;
  8493. #endif
  8494. spin_lock_bh(&bp->spq_lock);
  8495. for (i = 0; i < count; i++) {
  8496. struct eth_spe *spe = (struct eth_spe *)kwqes[i];
  8497. if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
  8498. break;
  8499. *bp->cnic_kwq_prod = *spe;
  8500. bp->cnic_kwq_pending++;
  8501. DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
  8502. spe->hdr.conn_and_cmd_data, spe->hdr.type,
  8503. spe->data.update_data_addr.hi,
  8504. spe->data.update_data_addr.lo,
  8505. bp->cnic_kwq_pending);
  8506. if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
  8507. bp->cnic_kwq_prod = bp->cnic_kwq;
  8508. else
  8509. bp->cnic_kwq_prod++;
  8510. }
  8511. spin_unlock_bh(&bp->spq_lock);
  8512. if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
  8513. bnx2x_cnic_sp_post(bp, 0);
  8514. return i;
  8515. }
  8516. static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
  8517. {
  8518. struct cnic_ops *c_ops;
  8519. int rc = 0;
  8520. mutex_lock(&bp->cnic_mutex);
  8521. c_ops = rcu_dereference_protected(bp->cnic_ops,
  8522. lockdep_is_held(&bp->cnic_mutex));
  8523. if (c_ops)
  8524. rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
  8525. mutex_unlock(&bp->cnic_mutex);
  8526. return rc;
  8527. }
  8528. static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
  8529. {
  8530. struct cnic_ops *c_ops;
  8531. int rc = 0;
  8532. rcu_read_lock();
  8533. c_ops = rcu_dereference(bp->cnic_ops);
  8534. if (c_ops)
  8535. rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
  8536. rcu_read_unlock();
  8537. return rc;
  8538. }
  8539. /*
  8540. * for commands that have no data
  8541. */
  8542. int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
  8543. {
  8544. struct cnic_ctl_info ctl = {0};
  8545. ctl.cmd = cmd;
  8546. return bnx2x_cnic_ctl_send(bp, &ctl);
  8547. }
  8548. static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
  8549. {
  8550. struct cnic_ctl_info ctl;
  8551. /* first we tell CNIC and only then we count this as a completion */
  8552. ctl.cmd = CNIC_CTL_COMPLETION_CMD;
  8553. ctl.data.comp.cid = cid;
  8554. bnx2x_cnic_ctl_send_bh(bp, &ctl);
  8555. bnx2x_cnic_sp_post(bp, 0);
  8556. }
  8557. static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
  8558. {
  8559. struct bnx2x *bp = netdev_priv(dev);
  8560. int rc = 0;
  8561. switch (ctl->cmd) {
  8562. case DRV_CTL_CTXTBL_WR_CMD: {
  8563. u32 index = ctl->data.io.offset;
  8564. dma_addr_t addr = ctl->data.io.dma_addr;
  8565. bnx2x_ilt_wr(bp, index, addr);
  8566. break;
  8567. }
  8568. case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
  8569. int count = ctl->data.credit.credit_count;
  8570. bnx2x_cnic_sp_post(bp, count);
  8571. break;
  8572. }
  8573. /* rtnl_lock is held. */
  8574. case DRV_CTL_START_L2_CMD: {
  8575. u32 cli = ctl->data.ring.client_id;
  8576. /* Clear FCoE FIP and ALL ENODE MACs addresses first */
  8577. bnx2x_del_fcoe_eth_macs(bp);
  8578. /* Set iSCSI MAC address */
  8579. bnx2x_set_iscsi_eth_mac_addr(bp, 1);
  8580. mmiowb();
  8581. barrier();
  8582. /* Start accepting on iSCSI L2 ring. Accept all multicasts
  8583. * because it's the only way for UIO Client to accept
  8584. * multicasts (in non-promiscuous mode only one Client per
  8585. * function will receive multicast packets (leading in our
  8586. * case).
  8587. */
  8588. bnx2x_rxq_set_mac_filters(bp, cli,
  8589. BNX2X_ACCEPT_UNICAST |
  8590. BNX2X_ACCEPT_BROADCAST |
  8591. BNX2X_ACCEPT_ALL_MULTICAST);
  8592. storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
  8593. break;
  8594. }
  8595. /* rtnl_lock is held. */
  8596. case DRV_CTL_STOP_L2_CMD: {
  8597. u32 cli = ctl->data.ring.client_id;
  8598. /* Stop accepting on iSCSI L2 ring */
  8599. bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
  8600. storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
  8601. mmiowb();
  8602. barrier();
  8603. /* Unset iSCSI L2 MAC */
  8604. bnx2x_set_iscsi_eth_mac_addr(bp, 0);
  8605. break;
  8606. }
  8607. case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
  8608. int count = ctl->data.credit.credit_count;
  8609. smp_mb__before_atomic_inc();
  8610. atomic_add(count, &bp->cq_spq_left);
  8611. smp_mb__after_atomic_inc();
  8612. break;
  8613. }
  8614. default:
  8615. BNX2X_ERR("unknown command %x\n", ctl->cmd);
  8616. rc = -EINVAL;
  8617. }
  8618. return rc;
  8619. }
  8620. void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
  8621. {
  8622. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  8623. if (bp->flags & USING_MSIX_FLAG) {
  8624. cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
  8625. cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
  8626. cp->irq_arr[0].vector = bp->msix_table[1].vector;
  8627. } else {
  8628. cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
  8629. cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
  8630. }
  8631. if (CHIP_IS_E2(bp))
  8632. cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
  8633. else
  8634. cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
  8635. cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
  8636. cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
  8637. cp->irq_arr[1].status_blk = bp->def_status_blk;
  8638. cp->irq_arr[1].status_blk_num = DEF_SB_ID;
  8639. cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
  8640. cp->num_irq = 2;
  8641. }
  8642. static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
  8643. void *data)
  8644. {
  8645. struct bnx2x *bp = netdev_priv(dev);
  8646. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  8647. if (ops == NULL)
  8648. return -EINVAL;
  8649. if (atomic_read(&bp->intr_sem) != 0)
  8650. return -EBUSY;
  8651. bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
  8652. if (!bp->cnic_kwq)
  8653. return -ENOMEM;
  8654. bp->cnic_kwq_cons = bp->cnic_kwq;
  8655. bp->cnic_kwq_prod = bp->cnic_kwq;
  8656. bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
  8657. bp->cnic_spq_pending = 0;
  8658. bp->cnic_kwq_pending = 0;
  8659. bp->cnic_data = data;
  8660. cp->num_irq = 0;
  8661. cp->drv_state = CNIC_DRV_STATE_REGD;
  8662. cp->iro_arr = bp->iro_arr;
  8663. bnx2x_setup_cnic_irq_info(bp);
  8664. rcu_assign_pointer(bp->cnic_ops, ops);
  8665. return 0;
  8666. }
  8667. static int bnx2x_unregister_cnic(struct net_device *dev)
  8668. {
  8669. struct bnx2x *bp = netdev_priv(dev);
  8670. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  8671. mutex_lock(&bp->cnic_mutex);
  8672. cp->drv_state = 0;
  8673. rcu_assign_pointer(bp->cnic_ops, NULL);
  8674. mutex_unlock(&bp->cnic_mutex);
  8675. synchronize_rcu();
  8676. kfree(bp->cnic_kwq);
  8677. bp->cnic_kwq = NULL;
  8678. return 0;
  8679. }
  8680. struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
  8681. {
  8682. struct bnx2x *bp = netdev_priv(dev);
  8683. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  8684. /* If both iSCSI and FCoE are disabled - return NULL in
  8685. * order to indicate CNIC that it should not try to work
  8686. * with this device.
  8687. */
  8688. if (NO_ISCSI(bp) && NO_FCOE(bp))
  8689. return NULL;
  8690. cp->drv_owner = THIS_MODULE;
  8691. cp->chip_id = CHIP_ID(bp);
  8692. cp->pdev = bp->pdev;
  8693. cp->io_base = bp->regview;
  8694. cp->io_base2 = bp->doorbells;
  8695. cp->max_kwqe_pending = 8;
  8696. cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
  8697. cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
  8698. bnx2x_cid_ilt_lines(bp);
  8699. cp->ctx_tbl_len = CNIC_ILT_LINES;
  8700. cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
  8701. cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
  8702. cp->drv_ctl = bnx2x_drv_ctl;
  8703. cp->drv_register_cnic = bnx2x_register_cnic;
  8704. cp->drv_unregister_cnic = bnx2x_unregister_cnic;
  8705. cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
  8706. cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
  8707. BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
  8708. cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
  8709. if (NO_ISCSI_OOO(bp))
  8710. cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
  8711. if (NO_ISCSI(bp))
  8712. cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
  8713. if (NO_FCOE(bp))
  8714. cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
  8715. DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
  8716. "starting cid %d\n",
  8717. cp->ctx_blk_size,
  8718. cp->ctx_tbl_offset,
  8719. cp->ctx_tbl_len,
  8720. cp->starting_cid);
  8721. return cp;
  8722. }
  8723. EXPORT_SYMBOL(bnx2x_cnic_probe);
  8724. #endif /* BCM_CNIC */