qib_iba7322.c 265 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559
  1. /*
  2. * Copyright (c) 2012 Intel Corporation. All rights reserved.
  3. * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. /*
  34. * This file contains all of the code that is specific to the
  35. * InfiniPath 7322 chip
  36. */
  37. #include <linux/interrupt.h>
  38. #include <linux/pci.h>
  39. #include <linux/delay.h>
  40. #include <linux/io.h>
  41. #include <linux/jiffies.h>
  42. #include <linux/module.h>
  43. #include <rdma/ib_verbs.h>
  44. #include <rdma/ib_smi.h>
  45. #ifdef CONFIG_INFINIBAND_QIB_DCA
  46. #include <linux/dca.h>
  47. #endif
  48. #include "qib.h"
  49. #include "qib_7322_regs.h"
  50. #include "qib_qsfp.h"
  51. #include "qib_mad.h"
  52. #include "qib_verbs.h"
  53. #undef pr_fmt
  54. #define pr_fmt(fmt) QIB_DRV_NAME " " fmt
  55. static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
  56. static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
  57. static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
  58. static irqreturn_t qib_7322intr(int irq, void *data);
  59. static irqreturn_t qib_7322bufavail(int irq, void *data);
  60. static irqreturn_t sdma_intr(int irq, void *data);
  61. static irqreturn_t sdma_idle_intr(int irq, void *data);
  62. static irqreturn_t sdma_progress_intr(int irq, void *data);
  63. static irqreturn_t sdma_cleanup_intr(int irq, void *data);
  64. static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
  65. struct qib_ctxtdata *rcd);
  66. static u8 qib_7322_phys_portstate(u64);
  67. static u32 qib_7322_iblink_state(u64);
  68. static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  69. u16 linitcmd);
  70. static void force_h1(struct qib_pportdata *);
  71. static void adj_tx_serdes(struct qib_pportdata *);
  72. static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
  73. static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
  74. static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
  75. static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
  76. static void serdes_7322_los_enable(struct qib_pportdata *, int);
  77. static int serdes_7322_init_old(struct qib_pportdata *);
  78. static int serdes_7322_init_new(struct qib_pportdata *);
  79. static void dump_sdma_7322_state(struct qib_pportdata *);
  80. #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
  81. /* LE2 serdes values for different cases */
  82. #define LE2_DEFAULT 5
  83. #define LE2_5m 4
  84. #define LE2_QME 0
  85. /* Below is special-purpose, so only really works for the IB SerDes blocks. */
  86. #define IBSD(hw_pidx) (hw_pidx + 2)
  87. /* these are variables for documentation and experimentation purposes */
  88. static const unsigned rcv_int_timeout = 375;
  89. static const unsigned rcv_int_count = 16;
  90. static const unsigned sdma_idle_cnt = 64;
  91. /* Time to stop altering Rx Equalization parameters, after link up. */
  92. #define RXEQ_DISABLE_MSECS 2500
  93. /*
  94. * Number of VLs we are configured to use (to allow for more
  95. * credits per vl, etc.)
  96. */
  97. ushort qib_num_cfg_vls = 2;
  98. module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
  99. MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
  100. static ushort qib_chase = 1;
  101. module_param_named(chase, qib_chase, ushort, S_IRUGO);
  102. MODULE_PARM_DESC(chase, "Enable state chase handling");
  103. static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
  104. module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
  105. MODULE_PARM_DESC(long_attenuation, \
  106. "attenuation cutoff (dB) for long copper cable setup");
  107. static ushort qib_singleport;
  108. module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
  109. MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
  110. static ushort qib_krcvq01_no_msi;
  111. module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
  112. MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
  113. /*
  114. * Receive header queue sizes
  115. */
  116. static unsigned qib_rcvhdrcnt;
  117. module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
  118. MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
  119. static unsigned qib_rcvhdrsize;
  120. module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
  121. MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
  122. static unsigned qib_rcvhdrentsize;
  123. module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
  124. MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
  125. #define MAX_ATTEN_LEN 64 /* plenty for any real system */
  126. /* for read back, default index is ~5m copper cable */
  127. static char txselect_list[MAX_ATTEN_LEN] = "10";
  128. static struct kparam_string kp_txselect = {
  129. .string = txselect_list,
  130. .maxlen = MAX_ATTEN_LEN
  131. };
  132. static int setup_txselect(const char *, struct kernel_param *);
  133. module_param_call(txselect, setup_txselect, param_get_string,
  134. &kp_txselect, S_IWUSR | S_IRUGO);
  135. MODULE_PARM_DESC(txselect, \
  136. "Tx serdes indices (for no QSFP or invalid QSFP data)");
  137. #define BOARD_QME7342 5
  138. #define BOARD_QMH7342 6
  139. #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
  140. BOARD_QMH7342)
  141. #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
  142. BOARD_QME7342)
  143. #define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
  144. #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
  145. #define MASK_ACROSS(lsb, msb) \
  146. (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
  147. #define SYM_RMASK(regname, fldname) ((u64) \
  148. QIB_7322_##regname##_##fldname##_RMASK)
  149. #define SYM_MASK(regname, fldname) ((u64) \
  150. QIB_7322_##regname##_##fldname##_RMASK << \
  151. QIB_7322_##regname##_##fldname##_LSB)
  152. #define SYM_FIELD(value, regname, fldname) ((u64) \
  153. (((value) >> SYM_LSB(regname, fldname)) & \
  154. SYM_RMASK(regname, fldname)))
  155. /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
  156. #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
  157. (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
  158. #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
  159. #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
  160. #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
  161. #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
  162. #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
  163. /* Below because most, but not all, fields of IntMask have that full suffix */
  164. #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
  165. #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
  166. /*
  167. * the size bits give us 2^N, in KB units. 0 marks as invalid,
  168. * and 7 is reserved. We currently use only 2KB and 4KB
  169. */
  170. #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
  171. #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
  172. #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
  173. #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
  174. #define SendIBSLIDAssignMask \
  175. QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
  176. #define SendIBSLMCMask \
  177. QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
  178. #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
  179. #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
  180. #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
  181. #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
  182. #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
  183. #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
  184. #define _QIB_GPIO_SDA_NUM 1
  185. #define _QIB_GPIO_SCL_NUM 0
  186. #define QIB_EEPROM_WEN_NUM 14
  187. #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
  188. /* HW counter clock is at 4nsec */
  189. #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
  190. /* full speed IB port 1 only */
  191. #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
  192. #define PORT_SPD_CAP_SHIFT 3
  193. /* full speed featuremask, both ports */
  194. #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
  195. /*
  196. * This file contains almost all the chip-specific register information and
  197. * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
  198. */
  199. /* Use defines to tie machine-generated names to lower-case names */
  200. #define kr_contextcnt KREG_IDX(ContextCnt)
  201. #define kr_control KREG_IDX(Control)
  202. #define kr_counterregbase KREG_IDX(CntrRegBase)
  203. #define kr_errclear KREG_IDX(ErrClear)
  204. #define kr_errmask KREG_IDX(ErrMask)
  205. #define kr_errstatus KREG_IDX(ErrStatus)
  206. #define kr_extctrl KREG_IDX(EXTCtrl)
  207. #define kr_extstatus KREG_IDX(EXTStatus)
  208. #define kr_gpio_clear KREG_IDX(GPIOClear)
  209. #define kr_gpio_mask KREG_IDX(GPIOMask)
  210. #define kr_gpio_out KREG_IDX(GPIOOut)
  211. #define kr_gpio_status KREG_IDX(GPIOStatus)
  212. #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
  213. #define kr_debugportval KREG_IDX(DebugPortValueReg)
  214. #define kr_fmask KREG_IDX(feature_mask)
  215. #define kr_act_fmask KREG_IDX(active_feature_mask)
  216. #define kr_hwerrclear KREG_IDX(HwErrClear)
  217. #define kr_hwerrmask KREG_IDX(HwErrMask)
  218. #define kr_hwerrstatus KREG_IDX(HwErrStatus)
  219. #define kr_intclear KREG_IDX(IntClear)
  220. #define kr_intmask KREG_IDX(IntMask)
  221. #define kr_intredirect KREG_IDX(IntRedirect0)
  222. #define kr_intstatus KREG_IDX(IntStatus)
  223. #define kr_pagealign KREG_IDX(PageAlign)
  224. #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
  225. #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
  226. #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
  227. #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
  228. #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
  229. #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
  230. #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
  231. #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
  232. #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
  233. #define kr_revision KREG_IDX(Revision)
  234. #define kr_scratch KREG_IDX(Scratch)
  235. #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
  236. #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
  237. #define kr_sendctrl KREG_IDX(SendCtrl)
  238. #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
  239. #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
  240. #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
  241. #define kr_sendpiobufbase KREG_IDX(SendBufBase)
  242. #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
  243. #define kr_sendpiosize KREG_IDX(SendBufSize)
  244. #define kr_sendregbase KREG_IDX(SendRegBase)
  245. #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
  246. #define kr_userregbase KREG_IDX(UserRegBase)
  247. #define kr_intgranted KREG_IDX(Int_Granted)
  248. #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
  249. #define kr_intblocked KREG_IDX(IntBlocked)
  250. #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
  251. /*
  252. * per-port kernel registers. Access only with qib_read_kreg_port()
  253. * or qib_write_kreg_port()
  254. */
  255. #define krp_errclear KREG_IBPORT_IDX(ErrClear)
  256. #define krp_errmask KREG_IBPORT_IDX(ErrMask)
  257. #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
  258. #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
  259. #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
  260. #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
  261. #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
  262. #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
  263. #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
  264. #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
  265. #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
  266. #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
  267. #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
  268. #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
  269. #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
  270. #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
  271. #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
  272. #define krp_psstart KREG_IBPORT_IDX(PSStart)
  273. #define krp_psstat KREG_IBPORT_IDX(PSStat)
  274. #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
  275. #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
  276. #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
  277. #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
  278. #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
  279. #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
  280. #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
  281. #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
  282. #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
  283. #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
  284. #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
  285. #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
  286. #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
  287. #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
  288. #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
  289. #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
  290. #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
  291. #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
  292. #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
  293. #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
  294. #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
  295. #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
  296. #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
  297. #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
  298. #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
  299. #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
  300. #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
  301. #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
  302. #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
  303. #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
  304. #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
  305. /*
  306. * Per-context kernel registers. Access only with qib_read_kreg_ctxt()
  307. * or qib_write_kreg_ctxt()
  308. */
  309. #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
  310. #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
  311. /*
  312. * TID Flow table, per context. Reduces
  313. * number of hdrq updates to one per flow (or on errors).
  314. * context 0 and 1 share same memory, but have distinct
  315. * addresses. Since for now, we never use expected sends
  316. * on kernel contexts, we don't worry about that (we initialize
  317. * those entries for ctxt 0/1 on driver load twice, for example).
  318. */
  319. #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
  320. #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
  321. /* these are the error bits in the tid flows, and are W1C */
  322. #define TIDFLOW_ERRBITS ( \
  323. (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
  324. SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
  325. (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
  326. SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
  327. /* Most (not all) Counters are per-IBport.
  328. * Requires LBIntCnt is at offset 0 in the group
  329. */
  330. #define CREG_IDX(regname) \
  331. ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
  332. #define crp_badformat CREG_IDX(RxVersionErrCnt)
  333. #define crp_err_rlen CREG_IDX(RxLenErrCnt)
  334. #define crp_erricrc CREG_IDX(RxICRCErrCnt)
  335. #define crp_errlink CREG_IDX(RxLinkMalformCnt)
  336. #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
  337. #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
  338. #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
  339. #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
  340. #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
  341. #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
  342. #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
  343. #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
  344. #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
  345. #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
  346. #define crp_pktrcv CREG_IDX(RxDataPktCnt)
  347. #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
  348. #define crp_pktsend CREG_IDX(TxDataPktCnt)
  349. #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
  350. #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
  351. #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
  352. #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
  353. #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
  354. #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
  355. #define crp_rcvebp CREG_IDX(RxEBPCnt)
  356. #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
  357. #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
  358. #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
  359. #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
  360. #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
  361. #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
  362. #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
  363. #define crp_sendstall CREG_IDX(TxFlowStallCnt)
  364. #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
  365. #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
  366. #define crp_txlenerr CREG_IDX(TxLenErrCnt)
  367. #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
  368. #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
  369. #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
  370. #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
  371. #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
  372. #define crp_wordrcv CREG_IDX(RxDwordCnt)
  373. #define crp_wordsend CREG_IDX(TxDwordCnt)
  374. #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
  375. /* these are the (few) counters that are not port-specific */
  376. #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
  377. QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
  378. #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
  379. #define cr_lbint CREG_DEVIDX(LBIntCnt)
  380. #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
  381. #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
  382. #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
  383. #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
  384. #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
  385. /* no chip register for # of IB ports supported, so define */
  386. #define NUM_IB_PORTS 2
  387. /* 1 VL15 buffer per hardware IB port, no register for this, so define */
  388. #define NUM_VL15_BUFS NUM_IB_PORTS
  389. /*
  390. * context 0 and 1 are special, and there is no chip register that
  391. * defines this value, so we have to define it here.
  392. * These are all allocated to either 0 or 1 for single port
  393. * hardware configuration, otherwise each gets half
  394. */
  395. #define KCTXT0_EGRCNT 2048
  396. /* values for vl and port fields in PBC, 7322-specific */
  397. #define PBC_PORT_SEL_LSB 26
  398. #define PBC_PORT_SEL_RMASK 1
  399. #define PBC_VL_NUM_LSB 27
  400. #define PBC_VL_NUM_RMASK 7
  401. #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
  402. #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
  403. static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
  404. [IB_RATE_2_5_GBPS] = 16,
  405. [IB_RATE_5_GBPS] = 8,
  406. [IB_RATE_10_GBPS] = 4,
  407. [IB_RATE_20_GBPS] = 2,
  408. [IB_RATE_30_GBPS] = 2,
  409. [IB_RATE_40_GBPS] = 1
  410. };
  411. #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
  412. #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
  413. /* link training states, from IBC */
  414. #define IB_7322_LT_STATE_DISABLED 0x00
  415. #define IB_7322_LT_STATE_LINKUP 0x01
  416. #define IB_7322_LT_STATE_POLLACTIVE 0x02
  417. #define IB_7322_LT_STATE_POLLQUIET 0x03
  418. #define IB_7322_LT_STATE_SLEEPDELAY 0x04
  419. #define IB_7322_LT_STATE_SLEEPQUIET 0x05
  420. #define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
  421. #define IB_7322_LT_STATE_CFGRCVFCFG 0x09
  422. #define IB_7322_LT_STATE_CFGWAITRMT 0x0a
  423. #define IB_7322_LT_STATE_CFGIDLE 0x0b
  424. #define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
  425. #define IB_7322_LT_STATE_TXREVLANES 0x0d
  426. #define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
  427. #define IB_7322_LT_STATE_RECOVERIDLE 0x0f
  428. #define IB_7322_LT_STATE_CFGENH 0x10
  429. #define IB_7322_LT_STATE_CFGTEST 0x11
  430. #define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
  431. #define IB_7322_LT_STATE_CFGWAITENH 0x13
  432. /* link state machine states from IBC */
  433. #define IB_7322_L_STATE_DOWN 0x0
  434. #define IB_7322_L_STATE_INIT 0x1
  435. #define IB_7322_L_STATE_ARM 0x2
  436. #define IB_7322_L_STATE_ACTIVE 0x3
  437. #define IB_7322_L_STATE_ACT_DEFER 0x4
  438. static const u8 qib_7322_physportstate[0x20] = {
  439. [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
  440. [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
  441. [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
  442. [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
  443. [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
  444. [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
  445. [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
  446. [IB_7322_LT_STATE_CFGRCVFCFG] =
  447. IB_PHYSPORTSTATE_CFG_TRAIN,
  448. [IB_7322_LT_STATE_CFGWAITRMT] =
  449. IB_PHYSPORTSTATE_CFG_TRAIN,
  450. [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
  451. [IB_7322_LT_STATE_RECOVERRETRAIN] =
  452. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  453. [IB_7322_LT_STATE_RECOVERWAITRMT] =
  454. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  455. [IB_7322_LT_STATE_RECOVERIDLE] =
  456. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  457. [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
  458. [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
  459. [IB_7322_LT_STATE_CFGWAITRMTTEST] =
  460. IB_PHYSPORTSTATE_CFG_TRAIN,
  461. [IB_7322_LT_STATE_CFGWAITENH] =
  462. IB_PHYSPORTSTATE_CFG_WAIT_ENH,
  463. [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
  464. [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
  465. [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
  466. [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
  467. };
  468. #ifdef CONFIG_INFINIBAND_QIB_DCA
  469. struct qib_irq_notify {
  470. int rcv;
  471. void *arg;
  472. struct irq_affinity_notify notify;
  473. };
  474. #endif
  475. struct qib_chip_specific {
  476. u64 __iomem *cregbase;
  477. u64 *cntrs;
  478. spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
  479. spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
  480. u64 main_int_mask; /* clear bits which have dedicated handlers */
  481. u64 int_enable_mask; /* for per port interrupts in single port mode */
  482. u64 errormask;
  483. u64 hwerrmask;
  484. u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
  485. u64 gpio_mask; /* shadow the gpio mask register */
  486. u64 extctrl; /* shadow the gpio output enable, etc... */
  487. u32 ncntrs;
  488. u32 nportcntrs;
  489. u32 cntrnamelen;
  490. u32 portcntrnamelen;
  491. u32 numctxts;
  492. u32 rcvegrcnt;
  493. u32 updthresh; /* current AvailUpdThld */
  494. u32 updthresh_dflt; /* default AvailUpdThld */
  495. u32 r1;
  496. int irq;
  497. u32 num_msix_entries;
  498. u32 sdmabufcnt;
  499. u32 lastbuf_for_pio;
  500. u32 stay_in_freeze;
  501. u32 recovery_ports_initted;
  502. #ifdef CONFIG_INFINIBAND_QIB_DCA
  503. u32 dca_ctrl;
  504. int rhdr_cpu[18];
  505. int sdma_cpu[2];
  506. u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
  507. #endif
  508. struct qib_msix_entry *msix_entries;
  509. unsigned long *sendchkenable;
  510. unsigned long *sendgrhchk;
  511. unsigned long *sendibchk;
  512. u32 rcvavail_timeout[18];
  513. char emsgbuf[128]; /* for device error interrupt msg buffer */
  514. };
  515. /* Table of entries in "human readable" form Tx Emphasis. */
  516. struct txdds_ent {
  517. u8 amp;
  518. u8 pre;
  519. u8 main;
  520. u8 post;
  521. };
  522. struct vendor_txdds_ent {
  523. u8 oui[QSFP_VOUI_LEN];
  524. u8 *partnum;
  525. struct txdds_ent sdr;
  526. struct txdds_ent ddr;
  527. struct txdds_ent qdr;
  528. };
  529. static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
  530. #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
  531. #define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
  532. #define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
  533. #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
  534. #define H1_FORCE_VAL 8
  535. #define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
  536. #define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
  537. /* The static and dynamic registers are paired, and the pairs indexed by spd */
  538. #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
  539. + ((spd) * 2))
  540. #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
  541. #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
  542. #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
  543. #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
  544. #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
  545. struct qib_chippport_specific {
  546. u64 __iomem *kpregbase;
  547. u64 __iomem *cpregbase;
  548. u64 *portcntrs;
  549. struct qib_pportdata *ppd;
  550. wait_queue_head_t autoneg_wait;
  551. struct delayed_work autoneg_work;
  552. struct delayed_work ipg_work;
  553. struct timer_list chase_timer;
  554. /*
  555. * these 5 fields are used to establish deltas for IB symbol
  556. * errors and linkrecovery errors. They can be reported on
  557. * some chips during link negotiation prior to INIT, and with
  558. * DDR when faking DDR negotiations with non-IBTA switches.
  559. * The chip counters are adjusted at driver unload if there is
  560. * a non-zero delta.
  561. */
  562. u64 ibdeltainprog;
  563. u64 ibsymdelta;
  564. u64 ibsymsnap;
  565. u64 iblnkerrdelta;
  566. u64 iblnkerrsnap;
  567. u64 iblnkdownsnap;
  568. u64 iblnkdowndelta;
  569. u64 ibmalfdelta;
  570. u64 ibmalfsnap;
  571. u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
  572. u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
  573. unsigned long qdr_dfe_time;
  574. unsigned long chase_end;
  575. u32 autoneg_tries;
  576. u32 recovery_init;
  577. u32 qdr_dfe_on;
  578. u32 qdr_reforce;
  579. /*
  580. * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
  581. * entry zero is unused, to simplify indexing
  582. */
  583. u8 h1_val;
  584. u8 no_eep; /* txselect table index to use if no qsfp info */
  585. u8 ipg_tries;
  586. u8 ibmalfusesnap;
  587. struct qib_qsfp_data qsfp_data;
  588. char epmsgbuf[192]; /* for port error interrupt msg buffer */
  589. char sdmamsgbuf[192]; /* for per-port sdma error messages */
  590. };
  591. static struct {
  592. const char *name;
  593. irq_handler_t handler;
  594. int lsb;
  595. int port; /* 0 if not port-specific, else port # */
  596. int dca;
  597. } irq_table[] = {
  598. { "", qib_7322intr, -1, 0, 0 },
  599. { " (buf avail)", qib_7322bufavail,
  600. SYM_LSB(IntStatus, SendBufAvail), 0, 0},
  601. { " (sdma 0)", sdma_intr,
  602. SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
  603. { " (sdma 1)", sdma_intr,
  604. SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
  605. { " (sdmaI 0)", sdma_idle_intr,
  606. SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
  607. { " (sdmaI 1)", sdma_idle_intr,
  608. SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
  609. { " (sdmaP 0)", sdma_progress_intr,
  610. SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
  611. { " (sdmaP 1)", sdma_progress_intr,
  612. SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
  613. { " (sdmaC 0)", sdma_cleanup_intr,
  614. SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
  615. { " (sdmaC 1)", sdma_cleanup_intr,
  616. SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
  617. };
  618. #ifdef CONFIG_INFINIBAND_QIB_DCA
  619. static const struct dca_reg_map {
  620. int shadow_inx;
  621. int lsb;
  622. u64 mask;
  623. u16 regno;
  624. } dca_rcvhdr_reg_map[] = {
  625. { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
  626. ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
  627. { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
  628. ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
  629. { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
  630. ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
  631. { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
  632. ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
  633. { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
  634. ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
  635. { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
  636. ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
  637. { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
  638. ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
  639. { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
  640. ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
  641. { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
  642. ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
  643. { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
  644. ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
  645. { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
  646. ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
  647. { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
  648. ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
  649. { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
  650. ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
  651. { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
  652. ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
  653. { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
  654. ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
  655. { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
  656. ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
  657. { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
  658. ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
  659. { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
  660. ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
  661. };
  662. #endif
  663. /* ibcctrl bits */
  664. #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
  665. /* cycle through TS1/TS2 till OK */
  666. #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
  667. /* wait for TS1, then go on */
  668. #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
  669. #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
  670. #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
  671. #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
  672. #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
  673. #define BLOB_7322_IBCHG 0x101
  674. static inline void qib_write_kreg(const struct qib_devdata *dd,
  675. const u32 regno, u64 value);
  676. static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
  677. static void write_7322_initregs(struct qib_devdata *);
  678. static void write_7322_init_portregs(struct qib_pportdata *);
  679. static void setup_7322_link_recovery(struct qib_pportdata *, u32);
  680. static void check_7322_rxe_status(struct qib_pportdata *);
  681. static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
  682. #ifdef CONFIG_INFINIBAND_QIB_DCA
  683. static void qib_setup_dca(struct qib_devdata *dd);
  684. static void setup_dca_notifier(struct qib_devdata *dd,
  685. struct qib_msix_entry *m);
  686. static void reset_dca_notifier(struct qib_devdata *dd,
  687. struct qib_msix_entry *m);
  688. #endif
  689. /**
  690. * qib_read_ureg32 - read 32-bit virtualized per-context register
  691. * @dd: device
  692. * @regno: register number
  693. * @ctxt: context number
  694. *
  695. * Return the contents of a register that is virtualized to be per context.
  696. * Returns -1 on errors (not distinguishable from valid contents at
  697. * runtime; we may add a separate error variable at some point).
  698. */
  699. static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
  700. enum qib_ureg regno, int ctxt)
  701. {
  702. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  703. return 0;
  704. return readl(regno + (u64 __iomem *)(
  705. (dd->ureg_align * ctxt) + (dd->userbase ?
  706. (char __iomem *)dd->userbase :
  707. (char __iomem *)dd->kregbase + dd->uregbase)));
  708. }
  709. /**
  710. * qib_read_ureg - read virtualized per-context register
  711. * @dd: device
  712. * @regno: register number
  713. * @ctxt: context number
  714. *
  715. * Return the contents of a register that is virtualized to be per context.
  716. * Returns -1 on errors (not distinguishable from valid contents at
  717. * runtime; we may add a separate error variable at some point).
  718. */
  719. static inline u64 qib_read_ureg(const struct qib_devdata *dd,
  720. enum qib_ureg regno, int ctxt)
  721. {
  722. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  723. return 0;
  724. return readq(regno + (u64 __iomem *)(
  725. (dd->ureg_align * ctxt) + (dd->userbase ?
  726. (char __iomem *)dd->userbase :
  727. (char __iomem *)dd->kregbase + dd->uregbase)));
  728. }
  729. /**
  730. * qib_write_ureg - write virtualized per-context register
  731. * @dd: device
  732. * @regno: register number
  733. * @value: value
  734. * @ctxt: context
  735. *
  736. * Write the contents of a register that is virtualized to be per context.
  737. */
  738. static inline void qib_write_ureg(const struct qib_devdata *dd,
  739. enum qib_ureg regno, u64 value, int ctxt)
  740. {
  741. u64 __iomem *ubase;
  742. if (dd->userbase)
  743. ubase = (u64 __iomem *)
  744. ((char __iomem *) dd->userbase +
  745. dd->ureg_align * ctxt);
  746. else
  747. ubase = (u64 __iomem *)
  748. (dd->uregbase +
  749. (char __iomem *) dd->kregbase +
  750. dd->ureg_align * ctxt);
  751. if (dd->kregbase && (dd->flags & QIB_PRESENT))
  752. writeq(value, &ubase[regno]);
  753. }
  754. static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
  755. const u32 regno)
  756. {
  757. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  758. return -1;
  759. return readl((u32 __iomem *) &dd->kregbase[regno]);
  760. }
  761. static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
  762. const u32 regno)
  763. {
  764. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  765. return -1;
  766. return readq(&dd->kregbase[regno]);
  767. }
  768. static inline void qib_write_kreg(const struct qib_devdata *dd,
  769. const u32 regno, u64 value)
  770. {
  771. if (dd->kregbase && (dd->flags & QIB_PRESENT))
  772. writeq(value, &dd->kregbase[regno]);
  773. }
  774. /*
  775. * not many sanity checks for the port-specific kernel register routines,
  776. * since they are only used when it's known to be safe.
  777. */
  778. static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
  779. const u16 regno)
  780. {
  781. if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
  782. return 0ULL;
  783. return readq(&ppd->cpspec->kpregbase[regno]);
  784. }
  785. static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
  786. const u16 regno, u64 value)
  787. {
  788. if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
  789. (ppd->dd->flags & QIB_PRESENT))
  790. writeq(value, &ppd->cpspec->kpregbase[regno]);
  791. }
  792. /**
  793. * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
  794. * @dd: the qlogic_ib device
  795. * @regno: the register number to write
  796. * @ctxt: the context containing the register
  797. * @value: the value to write
  798. */
  799. static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
  800. const u16 regno, unsigned ctxt,
  801. u64 value)
  802. {
  803. qib_write_kreg(dd, regno + ctxt, value);
  804. }
  805. static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
  806. {
  807. if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
  808. return 0;
  809. return readq(&dd->cspec->cregbase[regno]);
  810. }
  811. static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
  812. {
  813. if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
  814. return 0;
  815. return readl(&dd->cspec->cregbase[regno]);
  816. }
  817. static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
  818. u16 regno, u64 value)
  819. {
  820. if (ppd->cpspec && ppd->cpspec->cpregbase &&
  821. (ppd->dd->flags & QIB_PRESENT))
  822. writeq(value, &ppd->cpspec->cpregbase[regno]);
  823. }
  824. static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
  825. u16 regno)
  826. {
  827. if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
  828. !(ppd->dd->flags & QIB_PRESENT))
  829. return 0;
  830. return readq(&ppd->cpspec->cpregbase[regno]);
  831. }
  832. static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
  833. u16 regno)
  834. {
  835. if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
  836. !(ppd->dd->flags & QIB_PRESENT))
  837. return 0;
  838. return readl(&ppd->cpspec->cpregbase[regno]);
  839. }
  840. /* bits in Control register */
  841. #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
  842. #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
  843. /* bits in general interrupt regs */
  844. #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
  845. #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
  846. #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
  847. #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
  848. #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
  849. #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
  850. #define QIB_I_C_ERROR INT_MASK(Err)
  851. #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
  852. #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
  853. #define QIB_I_GPIO INT_MASK(AssertGPIO)
  854. #define QIB_I_P_SDMAINT(pidx) \
  855. (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
  856. INT_MASK_P(SDmaProgress, pidx) | \
  857. INT_MASK_PM(SDmaCleanupDone, pidx))
  858. /* Interrupt bits that are "per port" */
  859. #define QIB_I_P_BITSEXTANT(pidx) \
  860. (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
  861. INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
  862. INT_MASK_P(SDmaProgress, pidx) | \
  863. INT_MASK_PM(SDmaCleanupDone, pidx))
  864. /* Interrupt bits that are common to a device */
  865. /* currently unused: QIB_I_SPIOSENT */
  866. #define QIB_I_C_BITSEXTANT \
  867. (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
  868. QIB_I_SPIOSENT | \
  869. QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
  870. #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
  871. QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
  872. /*
  873. * Error bits that are "per port".
  874. */
  875. #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
  876. #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
  877. #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
  878. #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
  879. #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
  880. #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
  881. #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
  882. #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
  883. #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
  884. #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
  885. #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
  886. #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
  887. #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
  888. #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
  889. #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
  890. #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
  891. #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
  892. #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
  893. #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
  894. #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
  895. #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
  896. #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
  897. #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
  898. #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
  899. #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
  900. #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
  901. #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
  902. #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
  903. #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
  904. #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
  905. #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
  906. #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
  907. #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
  908. #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
  909. #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
  910. #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
  911. #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
  912. #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
  913. #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
  914. /* Error bits that are common to a device */
  915. #define QIB_E_RESET ERR_MASK(ResetNegated)
  916. #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
  917. #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
  918. /*
  919. * Per chip (rather than per-port) errors. Most either do
  920. * nothing but trigger a print (because they self-recover, or
  921. * always occur in tandem with other errors that handle the
  922. * issue), or because they indicate errors with no recovery,
  923. * but we want to know that they happened.
  924. */
  925. #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
  926. #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
  927. #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
  928. #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
  929. #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
  930. #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
  931. #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
  932. #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
  933. /* SDMA chip errors (not per port)
  934. * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
  935. * the SDMAHALT error immediately, so we just print the dup error via the
  936. * E_AUTO mechanism. This is true of most of the per-port fatal errors
  937. * as well, but since this is port-independent, by definition, it's
  938. * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
  939. * packet send errors, and so are handled in the same manner as other
  940. * per-packet errors.
  941. */
  942. #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
  943. #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
  944. #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
  945. /*
  946. * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
  947. * it is used to print "common" packet errors.
  948. */
  949. #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
  950. QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
  951. QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
  952. QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
  953. QIB_E_P_REBP)
  954. /* Error Bits that Packet-related (Receive, per-port) */
  955. #define QIB_E_P_RPKTERRS (\
  956. QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
  957. QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
  958. QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
  959. QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
  960. QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
  961. QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
  962. /*
  963. * Error bits that are Send-related (per port)
  964. * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
  965. * All of these potentially need to have a buffer disarmed
  966. */
  967. #define QIB_E_P_SPKTERRS (\
  968. QIB_E_P_SUNEXP_PKTNUM |\
  969. QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
  970. QIB_E_P_SMAXPKTLEN |\
  971. QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
  972. QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
  973. QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
  974. #define QIB_E_SPKTERRS ( \
  975. QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
  976. ERR_MASK_N(SendUnsupportedVLErr) | \
  977. QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
  978. #define QIB_E_P_SDMAERRS ( \
  979. QIB_E_P_SDMAHALT | \
  980. QIB_E_P_SDMADESCADDRMISALIGN | \
  981. QIB_E_P_SDMAUNEXPDATA | \
  982. QIB_E_P_SDMAMISSINGDW | \
  983. QIB_E_P_SDMADWEN | \
  984. QIB_E_P_SDMARPYTAG | \
  985. QIB_E_P_SDMA1STDESC | \
  986. QIB_E_P_SDMABASE | \
  987. QIB_E_P_SDMATAILOUTOFBOUND | \
  988. QIB_E_P_SDMAOUTOFBOUND | \
  989. QIB_E_P_SDMAGENMISMATCH)
  990. /*
  991. * This sets some bits more than once, but makes it more obvious which
  992. * bits are not handled under other categories, and the repeat definition
  993. * is not a problem.
  994. */
  995. #define QIB_E_P_BITSEXTANT ( \
  996. QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
  997. QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
  998. QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
  999. QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
  1000. )
  1001. /*
  1002. * These are errors that can occur when the link
  1003. * changes state while a packet is being sent or received. This doesn't
  1004. * cover things like EBP or VCRC that can be the result of a sending
  1005. * having the link change state, so we receive a "known bad" packet.
  1006. * All of these are "per port", so renamed:
  1007. */
  1008. #define QIB_E_P_LINK_PKTERRS (\
  1009. QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
  1010. QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
  1011. QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
  1012. QIB_E_P_RUNEXPCHAR)
  1013. /*
  1014. * This sets some bits more than once, but makes it more obvious which
  1015. * bits are not handled under other categories (such as QIB_E_SPKTERRS),
  1016. * and the repeat definition is not a problem.
  1017. */
  1018. #define QIB_E_C_BITSEXTANT (\
  1019. QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
  1020. QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
  1021. QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
  1022. /* Likewise Neuter E_SPKT_ERRS_IGNORE */
  1023. #define E_SPKT_ERRS_IGNORE 0
  1024. #define QIB_EXTS_MEMBIST_DISABLED \
  1025. SYM_MASK(EXTStatus, MemBISTDisabled)
  1026. #define QIB_EXTS_MEMBIST_ENDTEST \
  1027. SYM_MASK(EXTStatus, MemBISTEndTest)
  1028. #define QIB_E_SPIOARMLAUNCH \
  1029. ERR_MASK(SendArmLaunchErr)
  1030. #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
  1031. #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
  1032. /*
  1033. * IBTA_1_2 is set when multiple speeds are enabled (normal),
  1034. * and also if forced QDR (only QDR enabled). It's enabled for the
  1035. * forced QDR case so that scrambling will be enabled by the TS3
  1036. * exchange, when supported by both sides of the link.
  1037. */
  1038. #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
  1039. #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
  1040. #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
  1041. #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
  1042. #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
  1043. #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
  1044. SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
  1045. #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
  1046. #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
  1047. #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
  1048. #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
  1049. #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
  1050. #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
  1051. #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
  1052. #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
  1053. #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
  1054. SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
  1055. #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
  1056. SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
  1057. #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
  1058. #define IBA7322_REDIRECT_VEC_PER_REG 12
  1059. #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
  1060. #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
  1061. #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
  1062. #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
  1063. #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
  1064. #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
  1065. #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
  1066. .msg = #fldname , .sz = sizeof(#fldname) }
  1067. #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
  1068. fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
  1069. static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
  1070. HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
  1071. HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
  1072. HWE_AUTO(PCIESerdesPClkNotDetect),
  1073. HWE_AUTO(PowerOnBISTFailed),
  1074. HWE_AUTO(TempsenseTholdReached),
  1075. HWE_AUTO(MemoryErr),
  1076. HWE_AUTO(PCIeBusParityErr),
  1077. HWE_AUTO(PcieCplTimeout),
  1078. HWE_AUTO(PciePoisonedTLP),
  1079. HWE_AUTO_P(SDmaMemReadErr, 1),
  1080. HWE_AUTO_P(SDmaMemReadErr, 0),
  1081. HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
  1082. HWE_AUTO_P(IBCBusToSPCParityErr, 1),
  1083. HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
  1084. HWE_AUTO(statusValidNoEop),
  1085. HWE_AUTO(LATriggered),
  1086. { .mask = 0, .sz = 0 }
  1087. };
  1088. #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
  1089. .msg = #fldname, .sz = sizeof(#fldname) }
  1090. #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
  1091. .msg = #fldname, .sz = sizeof(#fldname) }
  1092. static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
  1093. E_AUTO(RcvEgrFullErr),
  1094. E_AUTO(RcvHdrFullErr),
  1095. E_AUTO(ResetNegated),
  1096. E_AUTO(HardwareErr),
  1097. E_AUTO(InvalidAddrErr),
  1098. E_AUTO(SDmaVL15Err),
  1099. E_AUTO(SBufVL15MisUseErr),
  1100. E_AUTO(InvalidEEPCmd),
  1101. E_AUTO(RcvContextShareErr),
  1102. E_AUTO(SendVLMismatchErr),
  1103. E_AUTO(SendArmLaunchErr),
  1104. E_AUTO(SendSpecialTriggerErr),
  1105. E_AUTO(SDmaWrongPortErr),
  1106. E_AUTO(SDmaBufMaskDuplicateErr),
  1107. { .mask = 0, .sz = 0 }
  1108. };
  1109. static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
  1110. E_P_AUTO(IBStatusChanged),
  1111. E_P_AUTO(SHeadersErr),
  1112. E_P_AUTO(VL15BufMisuseErr),
  1113. /*
  1114. * SDmaHaltErr is not really an error, make it clearer;
  1115. */
  1116. {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
  1117. .sz = 11},
  1118. E_P_AUTO(SDmaDescAddrMisalignErr),
  1119. E_P_AUTO(SDmaUnexpDataErr),
  1120. E_P_AUTO(SDmaMissingDwErr),
  1121. E_P_AUTO(SDmaDwEnErr),
  1122. E_P_AUTO(SDmaRpyTagErr),
  1123. E_P_AUTO(SDma1stDescErr),
  1124. E_P_AUTO(SDmaBaseErr),
  1125. E_P_AUTO(SDmaTailOutOfBoundErr),
  1126. E_P_AUTO(SDmaOutOfBoundErr),
  1127. E_P_AUTO(SDmaGenMismatchErr),
  1128. E_P_AUTO(SendBufMisuseErr),
  1129. E_P_AUTO(SendUnsupportedVLErr),
  1130. E_P_AUTO(SendUnexpectedPktNumErr),
  1131. E_P_AUTO(SendDroppedDataPktErr),
  1132. E_P_AUTO(SendDroppedSmpPktErr),
  1133. E_P_AUTO(SendPktLenErr),
  1134. E_P_AUTO(SendUnderRunErr),
  1135. E_P_AUTO(SendMaxPktLenErr),
  1136. E_P_AUTO(SendMinPktLenErr),
  1137. E_P_AUTO(RcvIBLostLinkErr),
  1138. E_P_AUTO(RcvHdrErr),
  1139. E_P_AUTO(RcvHdrLenErr),
  1140. E_P_AUTO(RcvBadTidErr),
  1141. E_P_AUTO(RcvBadVersionErr),
  1142. E_P_AUTO(RcvIBFlowErr),
  1143. E_P_AUTO(RcvEBPErr),
  1144. E_P_AUTO(RcvUnsupportedVLErr),
  1145. E_P_AUTO(RcvUnexpectedCharErr),
  1146. E_P_AUTO(RcvShortPktLenErr),
  1147. E_P_AUTO(RcvLongPktLenErr),
  1148. E_P_AUTO(RcvMaxPktLenErr),
  1149. E_P_AUTO(RcvMinPktLenErr),
  1150. E_P_AUTO(RcvICRCErr),
  1151. E_P_AUTO(RcvVCRCErr),
  1152. E_P_AUTO(RcvFormatErr),
  1153. { .mask = 0, .sz = 0 }
  1154. };
  1155. /*
  1156. * Below generates "auto-message" for interrupts not specific to any port or
  1157. * context
  1158. */
  1159. #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
  1160. .msg = #fldname, .sz = sizeof(#fldname) }
  1161. /* Below generates "auto-message" for interrupts specific to a port */
  1162. #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
  1163. SYM_LSB(IntMask, fldname##Mask##_0), \
  1164. SYM_LSB(IntMask, fldname##Mask##_1)), \
  1165. .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
  1166. /* For some reason, the SerDesTrimDone bits are reversed */
  1167. #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
  1168. SYM_LSB(IntMask, fldname##Mask##_1), \
  1169. SYM_LSB(IntMask, fldname##Mask##_0)), \
  1170. .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
  1171. /*
  1172. * Below generates "auto-message" for interrupts specific to a context,
  1173. * with ctxt-number appended
  1174. */
  1175. #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
  1176. SYM_LSB(IntMask, fldname##0IntMask), \
  1177. SYM_LSB(IntMask, fldname##17IntMask)), \
  1178. .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
  1179. static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
  1180. INTR_AUTO_P(SDmaInt),
  1181. INTR_AUTO_P(SDmaProgressInt),
  1182. INTR_AUTO_P(SDmaIdleInt),
  1183. INTR_AUTO_P(SDmaCleanupDone),
  1184. INTR_AUTO_C(RcvUrg),
  1185. INTR_AUTO_P(ErrInt),
  1186. INTR_AUTO(ErrInt), /* non-port-specific errs */
  1187. INTR_AUTO(AssertGPIOInt),
  1188. INTR_AUTO_P(SendDoneInt),
  1189. INTR_AUTO(SendBufAvailInt),
  1190. INTR_AUTO_C(RcvAvail),
  1191. { .mask = 0, .sz = 0 }
  1192. };
  1193. #define TXSYMPTOM_AUTO_P(fldname) \
  1194. { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
  1195. .msg = #fldname, .sz = sizeof(#fldname) }
  1196. static const struct qib_hwerror_msgs hdrchk_msgs[] = {
  1197. TXSYMPTOM_AUTO_P(NonKeyPacket),
  1198. TXSYMPTOM_AUTO_P(GRHFail),
  1199. TXSYMPTOM_AUTO_P(PkeyFail),
  1200. TXSYMPTOM_AUTO_P(QPFail),
  1201. TXSYMPTOM_AUTO_P(SLIDFail),
  1202. TXSYMPTOM_AUTO_P(RawIPV6),
  1203. TXSYMPTOM_AUTO_P(PacketTooSmall),
  1204. { .mask = 0, .sz = 0 }
  1205. };
  1206. #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
  1207. /*
  1208. * Called when we might have an error that is specific to a particular
  1209. * PIO buffer, and may need to cancel that buffer, so it can be re-used,
  1210. * because we don't need to force the update of pioavail
  1211. */
  1212. static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
  1213. {
  1214. struct qib_devdata *dd = ppd->dd;
  1215. u32 i;
  1216. int any;
  1217. u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  1218. u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
  1219. unsigned long sbuf[4];
  1220. /*
  1221. * It's possible that sendbuffererror could have bits set; might
  1222. * have already done this as a result of hardware error handling.
  1223. */
  1224. any = 0;
  1225. for (i = 0; i < regcnt; ++i) {
  1226. sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
  1227. if (sbuf[i]) {
  1228. any = 1;
  1229. qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
  1230. }
  1231. }
  1232. if (any)
  1233. qib_disarm_piobufs_set(dd, sbuf, piobcnt);
  1234. }
  1235. /* No txe_recover yet, if ever */
  1236. /* No decode__errors yet */
  1237. static void err_decode(char *msg, size_t len, u64 errs,
  1238. const struct qib_hwerror_msgs *msp)
  1239. {
  1240. u64 these, lmask;
  1241. int took, multi, n = 0;
  1242. while (errs && msp && msp->mask) {
  1243. multi = (msp->mask & (msp->mask - 1));
  1244. while (errs & msp->mask) {
  1245. these = (errs & msp->mask);
  1246. lmask = (these & (these - 1)) ^ these;
  1247. if (len) {
  1248. if (n++) {
  1249. /* separate the strings */
  1250. *msg++ = ',';
  1251. len--;
  1252. }
  1253. BUG_ON(!msp->sz);
  1254. /* msp->sz counts the nul */
  1255. took = min_t(size_t, msp->sz - (size_t)1, len);
  1256. memcpy(msg, msp->msg, took);
  1257. len -= took;
  1258. msg += took;
  1259. if (len)
  1260. *msg = '\0';
  1261. }
  1262. errs &= ~lmask;
  1263. if (len && multi) {
  1264. /* More than one bit this mask */
  1265. int idx = -1;
  1266. while (lmask & msp->mask) {
  1267. ++idx;
  1268. lmask >>= 1;
  1269. }
  1270. took = scnprintf(msg, len, "_%d", idx);
  1271. len -= took;
  1272. msg += took;
  1273. }
  1274. }
  1275. ++msp;
  1276. }
  1277. /* If some bits are left, show in hex. */
  1278. if (len && errs)
  1279. snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
  1280. (unsigned long long) errs);
  1281. }
  1282. /* only called if r1 set */
  1283. static void flush_fifo(struct qib_pportdata *ppd)
  1284. {
  1285. struct qib_devdata *dd = ppd->dd;
  1286. u32 __iomem *piobuf;
  1287. u32 bufn;
  1288. u32 *hdr;
  1289. u64 pbc;
  1290. const unsigned hdrwords = 7;
  1291. static struct qib_ib_header ibhdr = {
  1292. .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
  1293. .lrh[1] = IB_LID_PERMISSIVE,
  1294. .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
  1295. .lrh[3] = IB_LID_PERMISSIVE,
  1296. .u.oth.bth[0] = cpu_to_be32(
  1297. (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
  1298. .u.oth.bth[1] = cpu_to_be32(0),
  1299. .u.oth.bth[2] = cpu_to_be32(0),
  1300. .u.oth.u.ud.deth[0] = cpu_to_be32(0),
  1301. .u.oth.u.ud.deth[1] = cpu_to_be32(0),
  1302. };
  1303. /*
  1304. * Send a dummy VL15 packet to flush the launch FIFO.
  1305. * This will not actually be sent since the TxeBypassIbc bit is set.
  1306. */
  1307. pbc = PBC_7322_VL15_SEND |
  1308. (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
  1309. (hdrwords + SIZE_OF_CRC);
  1310. piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
  1311. if (!piobuf)
  1312. return;
  1313. writeq(pbc, piobuf);
  1314. hdr = (u32 *) &ibhdr;
  1315. if (dd->flags & QIB_PIO_FLUSH_WC) {
  1316. qib_flush_wc();
  1317. qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
  1318. qib_flush_wc();
  1319. __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
  1320. qib_flush_wc();
  1321. } else
  1322. qib_pio_copy(piobuf + 2, hdr, hdrwords);
  1323. qib_sendbuf_done(dd, bufn);
  1324. }
  1325. /*
  1326. * This is called with interrupts disabled and sdma_lock held.
  1327. */
  1328. static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
  1329. {
  1330. struct qib_devdata *dd = ppd->dd;
  1331. u64 set_sendctrl = 0;
  1332. u64 clr_sendctrl = 0;
  1333. if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
  1334. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
  1335. else
  1336. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
  1337. if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
  1338. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
  1339. else
  1340. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
  1341. if (op & QIB_SDMA_SENDCTRL_OP_HALT)
  1342. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
  1343. else
  1344. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
  1345. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
  1346. set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
  1347. SYM_MASK(SendCtrl_0, TxeAbortIbc) |
  1348. SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
  1349. else
  1350. clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
  1351. SYM_MASK(SendCtrl_0, TxeAbortIbc) |
  1352. SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
  1353. spin_lock(&dd->sendctrl_lock);
  1354. /* If we are draining everything, block sends first */
  1355. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
  1356. ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
  1357. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1358. qib_write_kreg(dd, kr_scratch, 0);
  1359. }
  1360. ppd->p_sendctrl |= set_sendctrl;
  1361. ppd->p_sendctrl &= ~clr_sendctrl;
  1362. if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
  1363. qib_write_kreg_port(ppd, krp_sendctrl,
  1364. ppd->p_sendctrl |
  1365. SYM_MASK(SendCtrl_0, SDmaCleanup));
  1366. else
  1367. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1368. qib_write_kreg(dd, kr_scratch, 0);
  1369. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
  1370. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
  1371. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1372. qib_write_kreg(dd, kr_scratch, 0);
  1373. }
  1374. spin_unlock(&dd->sendctrl_lock);
  1375. if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
  1376. flush_fifo(ppd);
  1377. }
  1378. static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
  1379. {
  1380. __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
  1381. }
  1382. static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
  1383. {
  1384. /*
  1385. * Set SendDmaLenGen and clear and set
  1386. * the MSB of the generation count to enable generation checking
  1387. * and load the internal generation counter.
  1388. */
  1389. qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
  1390. qib_write_kreg_port(ppd, krp_senddmalengen,
  1391. ppd->sdma_descq_cnt |
  1392. (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
  1393. }
  1394. /*
  1395. * Must be called with sdma_lock held, or before init finished.
  1396. */
  1397. static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
  1398. {
  1399. /* Commit writes to memory and advance the tail on the chip */
  1400. wmb();
  1401. ppd->sdma_descq_tail = tail;
  1402. qib_write_kreg_port(ppd, krp_senddmatail, tail);
  1403. }
  1404. /*
  1405. * This is called with interrupts disabled and sdma_lock held.
  1406. */
  1407. static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
  1408. {
  1409. /*
  1410. * Drain all FIFOs.
  1411. * The hardware doesn't require this but we do it so that verbs
  1412. * and user applications don't wait for link active to send stale
  1413. * data.
  1414. */
  1415. sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
  1416. qib_sdma_7322_setlengen(ppd);
  1417. qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
  1418. ppd->sdma_head_dma[0] = 0;
  1419. qib_7322_sdma_sendctrl(ppd,
  1420. ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
  1421. }
  1422. #define DISABLES_SDMA ( \
  1423. QIB_E_P_SDMAHALT | \
  1424. QIB_E_P_SDMADESCADDRMISALIGN | \
  1425. QIB_E_P_SDMAMISSINGDW | \
  1426. QIB_E_P_SDMADWEN | \
  1427. QIB_E_P_SDMARPYTAG | \
  1428. QIB_E_P_SDMA1STDESC | \
  1429. QIB_E_P_SDMABASE | \
  1430. QIB_E_P_SDMATAILOUTOFBOUND | \
  1431. QIB_E_P_SDMAOUTOFBOUND | \
  1432. QIB_E_P_SDMAGENMISMATCH)
  1433. static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
  1434. {
  1435. unsigned long flags;
  1436. struct qib_devdata *dd = ppd->dd;
  1437. errs &= QIB_E_P_SDMAERRS;
  1438. err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
  1439. errs, qib_7322p_error_msgs);
  1440. if (errs & QIB_E_P_SDMAUNEXPDATA)
  1441. qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
  1442. ppd->port);
  1443. spin_lock_irqsave(&ppd->sdma_lock, flags);
  1444. if (errs != QIB_E_P_SDMAHALT) {
  1445. /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
  1446. qib_dev_porterr(dd, ppd->port,
  1447. "SDMA %s 0x%016llx %s\n",
  1448. qib_sdma_state_names[ppd->sdma_state.current_state],
  1449. errs, ppd->cpspec->sdmamsgbuf);
  1450. dump_sdma_7322_state(ppd);
  1451. }
  1452. switch (ppd->sdma_state.current_state) {
  1453. case qib_sdma_state_s00_hw_down:
  1454. break;
  1455. case qib_sdma_state_s10_hw_start_up_wait:
  1456. if (errs & QIB_E_P_SDMAHALT)
  1457. __qib_sdma_process_event(ppd,
  1458. qib_sdma_event_e20_hw_started);
  1459. break;
  1460. case qib_sdma_state_s20_idle:
  1461. break;
  1462. case qib_sdma_state_s30_sw_clean_up_wait:
  1463. break;
  1464. case qib_sdma_state_s40_hw_clean_up_wait:
  1465. if (errs & QIB_E_P_SDMAHALT)
  1466. __qib_sdma_process_event(ppd,
  1467. qib_sdma_event_e50_hw_cleaned);
  1468. break;
  1469. case qib_sdma_state_s50_hw_halt_wait:
  1470. if (errs & QIB_E_P_SDMAHALT)
  1471. __qib_sdma_process_event(ppd,
  1472. qib_sdma_event_e60_hw_halted);
  1473. break;
  1474. case qib_sdma_state_s99_running:
  1475. __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
  1476. __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
  1477. break;
  1478. }
  1479. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  1480. }
  1481. /*
  1482. * handle per-device errors (not per-port errors)
  1483. */
  1484. static noinline void handle_7322_errors(struct qib_devdata *dd)
  1485. {
  1486. char *msg;
  1487. u64 iserr = 0;
  1488. u64 errs;
  1489. u64 mask;
  1490. int log_idx;
  1491. qib_stats.sps_errints++;
  1492. errs = qib_read_kreg64(dd, kr_errstatus);
  1493. if (!errs) {
  1494. qib_devinfo(dd->pcidev,
  1495. "device error interrupt, but no error bits set!\n");
  1496. goto done;
  1497. }
  1498. /* don't report errors that are masked */
  1499. errs &= dd->cspec->errormask;
  1500. msg = dd->cspec->emsgbuf;
  1501. /* do these first, they are most important */
  1502. if (errs & QIB_E_HARDWARE) {
  1503. *msg = '\0';
  1504. qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
  1505. } else
  1506. for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
  1507. if (errs & dd->eep_st_masks[log_idx].errs_to_log)
  1508. qib_inc_eeprom_err(dd, log_idx, 1);
  1509. if (errs & QIB_E_SPKTERRS) {
  1510. qib_disarm_7322_senderrbufs(dd->pport);
  1511. qib_stats.sps_txerrs++;
  1512. } else if (errs & QIB_E_INVALIDADDR)
  1513. qib_stats.sps_txerrs++;
  1514. else if (errs & QIB_E_ARMLAUNCH) {
  1515. qib_stats.sps_txerrs++;
  1516. qib_disarm_7322_senderrbufs(dd->pport);
  1517. }
  1518. qib_write_kreg(dd, kr_errclear, errs);
  1519. /*
  1520. * The ones we mask off are handled specially below
  1521. * or above. Also mask SDMADISABLED by default as it
  1522. * is too chatty.
  1523. */
  1524. mask = QIB_E_HARDWARE;
  1525. *msg = '\0';
  1526. err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
  1527. qib_7322error_msgs);
  1528. /*
  1529. * Getting reset is a tragedy for all ports. Mark the device
  1530. * _and_ the ports as "offline" in way meaningful to each.
  1531. */
  1532. if (errs & QIB_E_RESET) {
  1533. int pidx;
  1534. qib_dev_err(dd,
  1535. "Got reset, requires re-init (unload and reload driver)\n");
  1536. dd->flags &= ~QIB_INITTED; /* needs re-init */
  1537. /* mark as having had error */
  1538. *dd->devstatusp |= QIB_STATUS_HWERROR;
  1539. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  1540. if (dd->pport[pidx].link_speed_supported)
  1541. *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
  1542. }
  1543. if (*msg && iserr)
  1544. qib_dev_err(dd, "%s error\n", msg);
  1545. /*
  1546. * If there were hdrq or egrfull errors, wake up any processes
  1547. * waiting in poll. We used to try to check which contexts had
  1548. * the overflow, but given the cost of that and the chip reads
  1549. * to support it, it's better to just wake everybody up if we
  1550. * get an overflow; waiters can poll again if it's not them.
  1551. */
  1552. if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
  1553. qib_handle_urcv(dd, ~0U);
  1554. if (errs & ERR_MASK(RcvEgrFullErr))
  1555. qib_stats.sps_buffull++;
  1556. else
  1557. qib_stats.sps_hdrfull++;
  1558. }
  1559. done:
  1560. return;
  1561. }
  1562. static void qib_error_tasklet(unsigned long data)
  1563. {
  1564. struct qib_devdata *dd = (struct qib_devdata *)data;
  1565. handle_7322_errors(dd);
  1566. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  1567. }
  1568. static void reenable_chase(unsigned long opaque)
  1569. {
  1570. struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
  1571. ppd->cpspec->chase_timer.expires = 0;
  1572. qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
  1573. QLOGIC_IB_IBCC_LINKINITCMD_POLL);
  1574. }
  1575. static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
  1576. u8 ibclt)
  1577. {
  1578. ppd->cpspec->chase_end = 0;
  1579. if (!qib_chase)
  1580. return;
  1581. qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
  1582. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  1583. ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
  1584. add_timer(&ppd->cpspec->chase_timer);
  1585. }
  1586. static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
  1587. {
  1588. u8 ibclt;
  1589. unsigned long tnow;
  1590. ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
  1591. /*
  1592. * Detect and handle the state chase issue, where we can
  1593. * get stuck if we are unlucky on timing on both sides of
  1594. * the link. If we are, we disable, set a timer, and
  1595. * then re-enable.
  1596. */
  1597. switch (ibclt) {
  1598. case IB_7322_LT_STATE_CFGRCVFCFG:
  1599. case IB_7322_LT_STATE_CFGWAITRMT:
  1600. case IB_7322_LT_STATE_TXREVLANES:
  1601. case IB_7322_LT_STATE_CFGENH:
  1602. tnow = jiffies;
  1603. if (ppd->cpspec->chase_end &&
  1604. time_after(tnow, ppd->cpspec->chase_end))
  1605. disable_chase(ppd, tnow, ibclt);
  1606. else if (!ppd->cpspec->chase_end)
  1607. ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
  1608. break;
  1609. default:
  1610. ppd->cpspec->chase_end = 0;
  1611. break;
  1612. }
  1613. if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
  1614. ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
  1615. ibclt == IB_7322_LT_STATE_LINKUP) &&
  1616. (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
  1617. force_h1(ppd);
  1618. ppd->cpspec->qdr_reforce = 1;
  1619. if (!ppd->dd->cspec->r1)
  1620. serdes_7322_los_enable(ppd, 0);
  1621. } else if (ppd->cpspec->qdr_reforce &&
  1622. (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
  1623. (ibclt == IB_7322_LT_STATE_CFGENH ||
  1624. ibclt == IB_7322_LT_STATE_CFGIDLE ||
  1625. ibclt == IB_7322_LT_STATE_LINKUP))
  1626. force_h1(ppd);
  1627. if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
  1628. ppd->link_speed_enabled == QIB_IB_QDR &&
  1629. (ibclt == IB_7322_LT_STATE_CFGTEST ||
  1630. ibclt == IB_7322_LT_STATE_CFGENH ||
  1631. (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
  1632. ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
  1633. adj_tx_serdes(ppd);
  1634. if (ibclt != IB_7322_LT_STATE_LINKUP) {
  1635. u8 ltstate = qib_7322_phys_portstate(ibcst);
  1636. u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
  1637. LinkTrainingState);
  1638. if (!ppd->dd->cspec->r1 &&
  1639. pibclt == IB_7322_LT_STATE_LINKUP &&
  1640. ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
  1641. ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
  1642. ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
  1643. ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
  1644. /* If the link went down (but no into recovery,
  1645. * turn LOS back on */
  1646. serdes_7322_los_enable(ppd, 1);
  1647. if (!ppd->cpspec->qdr_dfe_on &&
  1648. ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
  1649. ppd->cpspec->qdr_dfe_on = 1;
  1650. ppd->cpspec->qdr_dfe_time = 0;
  1651. /* On link down, reenable QDR adaptation */
  1652. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  1653. ppd->dd->cspec->r1 ?
  1654. QDR_STATIC_ADAPT_DOWN_R1 :
  1655. QDR_STATIC_ADAPT_DOWN);
  1656. pr_info(
  1657. "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
  1658. ppd->dd->unit, ppd->port, ibclt);
  1659. }
  1660. }
  1661. }
  1662. static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
  1663. /*
  1664. * This is per-pport error handling.
  1665. * will likely get it's own MSIx interrupt (one for each port,
  1666. * although just a single handler).
  1667. */
  1668. static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
  1669. {
  1670. char *msg;
  1671. u64 ignore_this_time = 0, iserr = 0, errs, fmask;
  1672. struct qib_devdata *dd = ppd->dd;
  1673. /* do this as soon as possible */
  1674. fmask = qib_read_kreg64(dd, kr_act_fmask);
  1675. if (!fmask)
  1676. check_7322_rxe_status(ppd);
  1677. errs = qib_read_kreg_port(ppd, krp_errstatus);
  1678. if (!errs)
  1679. qib_devinfo(dd->pcidev,
  1680. "Port%d error interrupt, but no error bits set!\n",
  1681. ppd->port);
  1682. if (!fmask)
  1683. errs &= ~QIB_E_P_IBSTATUSCHANGED;
  1684. if (!errs)
  1685. goto done;
  1686. msg = ppd->cpspec->epmsgbuf;
  1687. *msg = '\0';
  1688. if (errs & ~QIB_E_P_BITSEXTANT) {
  1689. err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
  1690. errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
  1691. if (!*msg)
  1692. snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
  1693. "no others");
  1694. qib_dev_porterr(dd, ppd->port,
  1695. "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
  1696. (errs & ~QIB_E_P_BITSEXTANT), msg);
  1697. *msg = '\0';
  1698. }
  1699. if (errs & QIB_E_P_SHDR) {
  1700. u64 symptom;
  1701. /* determine cause, then write to clear */
  1702. symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
  1703. qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
  1704. err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
  1705. hdrchk_msgs);
  1706. *msg = '\0';
  1707. /* senderrbuf cleared in SPKTERRS below */
  1708. }
  1709. if (errs & QIB_E_P_SPKTERRS) {
  1710. if ((errs & QIB_E_P_LINK_PKTERRS) &&
  1711. !(ppd->lflags & QIBL_LINKACTIVE)) {
  1712. /*
  1713. * This can happen when trying to bring the link
  1714. * up, but the IB link changes state at the "wrong"
  1715. * time. The IB logic then complains that the packet
  1716. * isn't valid. We don't want to confuse people, so
  1717. * we just don't print them, except at debug
  1718. */
  1719. err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
  1720. (errs & QIB_E_P_LINK_PKTERRS),
  1721. qib_7322p_error_msgs);
  1722. *msg = '\0';
  1723. ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
  1724. }
  1725. qib_disarm_7322_senderrbufs(ppd);
  1726. } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
  1727. !(ppd->lflags & QIBL_LINKACTIVE)) {
  1728. /*
  1729. * This can happen when SMA is trying to bring the link
  1730. * up, but the IB link changes state at the "wrong" time.
  1731. * The IB logic then complains that the packet isn't
  1732. * valid. We don't want to confuse people, so we just
  1733. * don't print them, except at debug
  1734. */
  1735. err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
  1736. qib_7322p_error_msgs);
  1737. ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
  1738. *msg = '\0';
  1739. }
  1740. qib_write_kreg_port(ppd, krp_errclear, errs);
  1741. errs &= ~ignore_this_time;
  1742. if (!errs)
  1743. goto done;
  1744. if (errs & QIB_E_P_RPKTERRS)
  1745. qib_stats.sps_rcverrs++;
  1746. if (errs & QIB_E_P_SPKTERRS)
  1747. qib_stats.sps_txerrs++;
  1748. iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
  1749. if (errs & QIB_E_P_SDMAERRS)
  1750. sdma_7322_p_errors(ppd, errs);
  1751. if (errs & QIB_E_P_IBSTATUSCHANGED) {
  1752. u64 ibcs;
  1753. u8 ltstate;
  1754. ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
  1755. ltstate = qib_7322_phys_portstate(ibcs);
  1756. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
  1757. handle_serdes_issues(ppd, ibcs);
  1758. if (!(ppd->cpspec->ibcctrl_a &
  1759. SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
  1760. /*
  1761. * We got our interrupt, so init code should be
  1762. * happy and not try alternatives. Now squelch
  1763. * other "chatter" from link-negotiation (pre Init)
  1764. */
  1765. ppd->cpspec->ibcctrl_a |=
  1766. SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  1767. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  1768. ppd->cpspec->ibcctrl_a);
  1769. }
  1770. /* Update our picture of width and speed from chip */
  1771. ppd->link_width_active =
  1772. (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
  1773. IB_WIDTH_4X : IB_WIDTH_1X;
  1774. ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
  1775. LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
  1776. SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
  1777. QIB_IB_DDR : QIB_IB_SDR;
  1778. if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
  1779. IB_PHYSPORTSTATE_DISABLED)
  1780. qib_set_ib_7322_lstate(ppd, 0,
  1781. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  1782. else
  1783. /*
  1784. * Since going into a recovery state causes the link
  1785. * state to go down and since recovery is transitory,
  1786. * it is better if we "miss" ever seeing the link
  1787. * training state go into recovery (i.e., ignore this
  1788. * transition for link state special handling purposes)
  1789. * without updating lastibcstat.
  1790. */
  1791. if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
  1792. ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
  1793. ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
  1794. ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
  1795. qib_handle_e_ibstatuschanged(ppd, ibcs);
  1796. }
  1797. if (*msg && iserr)
  1798. qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
  1799. if (ppd->state_wanted & ppd->lflags)
  1800. wake_up_interruptible(&ppd->state_wait);
  1801. done:
  1802. return;
  1803. }
  1804. /* enable/disable chip from delivering interrupts */
  1805. static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
  1806. {
  1807. if (enable) {
  1808. if (dd->flags & QIB_BADINTR)
  1809. return;
  1810. qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
  1811. /* cause any pending enabled interrupts to be re-delivered */
  1812. qib_write_kreg(dd, kr_intclear, 0ULL);
  1813. if (dd->cspec->num_msix_entries) {
  1814. /* and same for MSIx */
  1815. u64 val = qib_read_kreg64(dd, kr_intgranted);
  1816. if (val)
  1817. qib_write_kreg(dd, kr_intgranted, val);
  1818. }
  1819. } else
  1820. qib_write_kreg(dd, kr_intmask, 0ULL);
  1821. }
  1822. /*
  1823. * Try to cleanup as much as possible for anything that might have gone
  1824. * wrong while in freeze mode, such as pio buffers being written by user
  1825. * processes (causing armlaunch), send errors due to going into freeze mode,
  1826. * etc., and try to avoid causing extra interrupts while doing so.
  1827. * Forcibly update the in-memory pioavail register copies after cleanup
  1828. * because the chip won't do it while in freeze mode (the register values
  1829. * themselves are kept correct).
  1830. * Make sure that we don't lose any important interrupts by using the chip
  1831. * feature that says that writing 0 to a bit in *clear that is set in
  1832. * *status will cause an interrupt to be generated again (if allowed by
  1833. * the *mask value).
  1834. * This is in chip-specific code because of all of the register accesses,
  1835. * even though the details are similar on most chips.
  1836. */
  1837. static void qib_7322_clear_freeze(struct qib_devdata *dd)
  1838. {
  1839. int pidx;
  1840. /* disable error interrupts, to avoid confusion */
  1841. qib_write_kreg(dd, kr_errmask, 0ULL);
  1842. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  1843. if (dd->pport[pidx].link_speed_supported)
  1844. qib_write_kreg_port(dd->pport + pidx, krp_errmask,
  1845. 0ULL);
  1846. /* also disable interrupts; errormask is sometimes overwriten */
  1847. qib_7322_set_intr_state(dd, 0);
  1848. /* clear the freeze, and be sure chip saw it */
  1849. qib_write_kreg(dd, kr_control, dd->control);
  1850. qib_read_kreg32(dd, kr_scratch);
  1851. /*
  1852. * Force new interrupt if any hwerr, error or interrupt bits are
  1853. * still set, and clear "safe" send packet errors related to freeze
  1854. * and cancelling sends. Re-enable error interrupts before possible
  1855. * force of re-interrupt on pending interrupts.
  1856. */
  1857. qib_write_kreg(dd, kr_hwerrclear, 0ULL);
  1858. qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
  1859. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  1860. /* We need to purge per-port errs and reset mask, too */
  1861. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  1862. if (!dd->pport[pidx].link_speed_supported)
  1863. continue;
  1864. qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
  1865. qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
  1866. }
  1867. qib_7322_set_intr_state(dd, 1);
  1868. }
  1869. /* no error handling to speak of */
  1870. /**
  1871. * qib_7322_handle_hwerrors - display hardware errors.
  1872. * @dd: the qlogic_ib device
  1873. * @msg: the output buffer
  1874. * @msgl: the size of the output buffer
  1875. *
  1876. * Use same msg buffer as regular errors to avoid excessive stack
  1877. * use. Most hardware errors are catastrophic, but for right now,
  1878. * we'll print them and continue. We reuse the same message buffer as
  1879. * qib_handle_errors() to avoid excessive stack usage.
  1880. */
  1881. static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
  1882. size_t msgl)
  1883. {
  1884. u64 hwerrs;
  1885. u32 ctrl;
  1886. int isfatal = 0;
  1887. hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
  1888. if (!hwerrs)
  1889. goto bail;
  1890. if (hwerrs == ~0ULL) {
  1891. qib_dev_err(dd,
  1892. "Read of hardware error status failed (all bits set); ignoring\n");
  1893. goto bail;
  1894. }
  1895. qib_stats.sps_hwerrs++;
  1896. /* Always clear the error status register, except BIST fail */
  1897. qib_write_kreg(dd, kr_hwerrclear, hwerrs &
  1898. ~HWE_MASK(PowerOnBISTFailed));
  1899. hwerrs &= dd->cspec->hwerrmask;
  1900. /* no EEPROM logging, yet */
  1901. if (hwerrs)
  1902. qib_devinfo(dd->pcidev,
  1903. "Hardware error: hwerr=0x%llx (cleared)\n",
  1904. (unsigned long long) hwerrs);
  1905. ctrl = qib_read_kreg32(dd, kr_control);
  1906. if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
  1907. /*
  1908. * No recovery yet...
  1909. */
  1910. if ((hwerrs & ~HWE_MASK(LATriggered)) ||
  1911. dd->cspec->stay_in_freeze) {
  1912. /*
  1913. * If any set that we aren't ignoring only make the
  1914. * complaint once, in case it's stuck or recurring,
  1915. * and we get here multiple times
  1916. * Force link down, so switch knows, and
  1917. * LEDs are turned off.
  1918. */
  1919. if (dd->flags & QIB_INITTED)
  1920. isfatal = 1;
  1921. } else
  1922. qib_7322_clear_freeze(dd);
  1923. }
  1924. if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
  1925. isfatal = 1;
  1926. strlcpy(msg,
  1927. "[Memory BIST test failed, InfiniPath hardware unusable]",
  1928. msgl);
  1929. /* ignore from now on, so disable until driver reloaded */
  1930. dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
  1931. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  1932. }
  1933. err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
  1934. /* Ignore esoteric PLL failures et al. */
  1935. qib_dev_err(dd, "%s hardware error\n", msg);
  1936. if (hwerrs &
  1937. (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
  1938. SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
  1939. int pidx = 0;
  1940. int err;
  1941. unsigned long flags;
  1942. struct qib_pportdata *ppd = dd->pport;
  1943. for (; pidx < dd->num_pports; ++pidx, ppd++) {
  1944. err = 0;
  1945. if (pidx == 0 && (hwerrs &
  1946. SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
  1947. err++;
  1948. if (pidx == 1 && (hwerrs &
  1949. SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
  1950. err++;
  1951. if (err) {
  1952. spin_lock_irqsave(&ppd->sdma_lock, flags);
  1953. dump_sdma_7322_state(ppd);
  1954. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  1955. }
  1956. }
  1957. }
  1958. if (isfatal && !dd->diag_client) {
  1959. qib_dev_err(dd,
  1960. "Fatal Hardware Error, no longer usable, SN %.16s\n",
  1961. dd->serial);
  1962. /*
  1963. * for /sys status file and user programs to print; if no
  1964. * trailing brace is copied, we'll know it was truncated.
  1965. */
  1966. if (dd->freezemsg)
  1967. snprintf(dd->freezemsg, dd->freezelen,
  1968. "{%s}", msg);
  1969. qib_disable_after_error(dd);
  1970. }
  1971. bail:;
  1972. }
  1973. /**
  1974. * qib_7322_init_hwerrors - enable hardware errors
  1975. * @dd: the qlogic_ib device
  1976. *
  1977. * now that we have finished initializing everything that might reasonably
  1978. * cause a hardware error, and cleared those errors bits as they occur,
  1979. * we can enable hardware errors in the mask (potentially enabling
  1980. * freeze mode), and enable hardware errors as errors (along with
  1981. * everything else) in errormask
  1982. */
  1983. static void qib_7322_init_hwerrors(struct qib_devdata *dd)
  1984. {
  1985. int pidx;
  1986. u64 extsval;
  1987. extsval = qib_read_kreg64(dd, kr_extstatus);
  1988. if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
  1989. QIB_EXTS_MEMBIST_ENDTEST)))
  1990. qib_dev_err(dd, "MemBIST did not complete!\n");
  1991. /* never clear BIST failure, so reported on each driver load */
  1992. qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
  1993. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  1994. /* clear all */
  1995. qib_write_kreg(dd, kr_errclear, ~0ULL);
  1996. /* enable errors that are masked, at least this first time. */
  1997. qib_write_kreg(dd, kr_errmask, ~0ULL);
  1998. dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
  1999. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  2000. if (dd->pport[pidx].link_speed_supported)
  2001. qib_write_kreg_port(dd->pport + pidx, krp_errmask,
  2002. ~0ULL);
  2003. }
  2004. /*
  2005. * Disable and enable the armlaunch error. Used for PIO bandwidth testing
  2006. * on chips that are count-based, rather than trigger-based. There is no
  2007. * reference counting, but that's also fine, given the intended use.
  2008. * Only chip-specific because it's all register accesses
  2009. */
  2010. static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
  2011. {
  2012. if (enable) {
  2013. qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
  2014. dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
  2015. } else
  2016. dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
  2017. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  2018. }
  2019. /*
  2020. * Formerly took parameter <which> in pre-shifted,
  2021. * pre-merged form with LinkCmd and LinkInitCmd
  2022. * together, and assuming the zero was NOP.
  2023. */
  2024. static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  2025. u16 linitcmd)
  2026. {
  2027. u64 mod_wd;
  2028. struct qib_devdata *dd = ppd->dd;
  2029. unsigned long flags;
  2030. if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
  2031. /*
  2032. * If we are told to disable, note that so link-recovery
  2033. * code does not attempt to bring us back up.
  2034. * Also reset everything that we can, so we start
  2035. * completely clean when re-enabled (before we
  2036. * actually issue the disable to the IBC)
  2037. */
  2038. qib_7322_mini_pcs_reset(ppd);
  2039. spin_lock_irqsave(&ppd->lflags_lock, flags);
  2040. ppd->lflags |= QIBL_IB_LINK_DISABLED;
  2041. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  2042. } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
  2043. /*
  2044. * Any other linkinitcmd will lead to LINKDOWN and then
  2045. * to INIT (if all is well), so clear flag to let
  2046. * link-recovery code attempt to bring us back up.
  2047. */
  2048. spin_lock_irqsave(&ppd->lflags_lock, flags);
  2049. ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
  2050. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  2051. /*
  2052. * Clear status change interrupt reduction so the
  2053. * new state is seen.
  2054. */
  2055. ppd->cpspec->ibcctrl_a &=
  2056. ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  2057. }
  2058. mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
  2059. (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
  2060. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
  2061. mod_wd);
  2062. /* write to chip to prevent back-to-back writes of ibc reg */
  2063. qib_write_kreg(dd, kr_scratch, 0);
  2064. }
  2065. /*
  2066. * The total RCV buffer memory is 64KB, used for both ports, and is
  2067. * in units of 64 bytes (same as IB flow control credit unit).
  2068. * The consumedVL unit in the same registers are in 32 byte units!
  2069. * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
  2070. * and we can therefore allocate just 9 IB credits for 2 VL15 packets
  2071. * in krp_rxcreditvl15, rather than 10.
  2072. */
  2073. #define RCV_BUF_UNITSZ 64
  2074. #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
  2075. static void set_vls(struct qib_pportdata *ppd)
  2076. {
  2077. int i, numvls, totcred, cred_vl, vl0extra;
  2078. struct qib_devdata *dd = ppd->dd;
  2079. u64 val;
  2080. numvls = qib_num_vls(ppd->vls_operational);
  2081. /*
  2082. * Set up per-VL credits. Below is kluge based on these assumptions:
  2083. * 1) port is disabled at the time early_init is called.
  2084. * 2) give VL15 17 credits, for two max-plausible packets.
  2085. * 3) Give VL0-N the rest, with any rounding excess used for VL0
  2086. */
  2087. /* 2 VL15 packets @ 288 bytes each (including IB headers) */
  2088. totcred = NUM_RCV_BUF_UNITS(dd);
  2089. cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
  2090. totcred -= cred_vl;
  2091. qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
  2092. cred_vl = totcred / numvls;
  2093. vl0extra = totcred - cred_vl * numvls;
  2094. qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
  2095. for (i = 1; i < numvls; i++)
  2096. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
  2097. for (; i < 8; i++) /* no buffer space for other VLs */
  2098. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
  2099. /* Notify IBC that credits need to be recalculated */
  2100. val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
  2101. val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
  2102. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  2103. qib_write_kreg(dd, kr_scratch, 0ULL);
  2104. val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
  2105. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  2106. for (i = 0; i < numvls; i++)
  2107. val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
  2108. val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
  2109. /* Change the number of operational VLs */
  2110. ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
  2111. ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
  2112. ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
  2113. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  2114. qib_write_kreg(dd, kr_scratch, 0ULL);
  2115. }
  2116. /*
  2117. * The code that deals with actual SerDes is in serdes_7322_init().
  2118. * Compared to the code for iba7220, it is minimal.
  2119. */
  2120. static int serdes_7322_init(struct qib_pportdata *ppd);
  2121. /**
  2122. * qib_7322_bringup_serdes - bring up the serdes
  2123. * @ppd: physical port on the qlogic_ib device
  2124. */
  2125. static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
  2126. {
  2127. struct qib_devdata *dd = ppd->dd;
  2128. u64 val, guid, ibc;
  2129. unsigned long flags;
  2130. int ret = 0;
  2131. /*
  2132. * SerDes model not in Pd, but still need to
  2133. * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
  2134. * eventually.
  2135. */
  2136. /* Put IBC in reset, sends disabled (should be in reset already) */
  2137. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2138. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  2139. qib_write_kreg(dd, kr_scratch, 0ULL);
  2140. if (qib_compat_ddr_negotiate) {
  2141. ppd->cpspec->ibdeltainprog = 1;
  2142. ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
  2143. crp_ibsymbolerr);
  2144. ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
  2145. crp_iblinkerrrecov);
  2146. }
  2147. /* flowcontrolwatermark is in units of KBytes */
  2148. ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
  2149. /*
  2150. * Flow control is sent this often, even if no changes in
  2151. * buffer space occur. Units are 128ns for this chip.
  2152. * Set to 3usec.
  2153. */
  2154. ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
  2155. /* max error tolerance */
  2156. ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
  2157. /* IB credit flow control. */
  2158. ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
  2159. /*
  2160. * set initial max size pkt IBC will send, including ICRC; it's the
  2161. * PIO buffer size in dwords, less 1; also see qib_set_mtu()
  2162. */
  2163. ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
  2164. SYM_LSB(IBCCtrlA_0, MaxPktLen);
  2165. ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
  2166. /*
  2167. * Reset the PCS interface to the serdes (and also ibc, which is still
  2168. * in reset from above). Writes new value of ibcctrl_a as last step.
  2169. */
  2170. qib_7322_mini_pcs_reset(ppd);
  2171. if (!ppd->cpspec->ibcctrl_b) {
  2172. unsigned lse = ppd->link_speed_enabled;
  2173. /*
  2174. * Not on re-init after reset, establish shadow
  2175. * and force initial config.
  2176. */
  2177. ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
  2178. krp_ibcctrl_b);
  2179. ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
  2180. IBA7322_IBC_SPEED_DDR |
  2181. IBA7322_IBC_SPEED_SDR |
  2182. IBA7322_IBC_WIDTH_AUTONEG |
  2183. SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
  2184. if (lse & (lse - 1)) /* Muliple speeds enabled */
  2185. ppd->cpspec->ibcctrl_b |=
  2186. (lse << IBA7322_IBC_SPEED_LSB) |
  2187. IBA7322_IBC_IBTA_1_2_MASK |
  2188. IBA7322_IBC_MAX_SPEED_MASK;
  2189. else
  2190. ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
  2191. IBA7322_IBC_SPEED_QDR |
  2192. IBA7322_IBC_IBTA_1_2_MASK :
  2193. (lse == QIB_IB_DDR) ?
  2194. IBA7322_IBC_SPEED_DDR :
  2195. IBA7322_IBC_SPEED_SDR;
  2196. if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
  2197. (IB_WIDTH_1X | IB_WIDTH_4X))
  2198. ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
  2199. else
  2200. ppd->cpspec->ibcctrl_b |=
  2201. ppd->link_width_enabled == IB_WIDTH_4X ?
  2202. IBA7322_IBC_WIDTH_4X_ONLY :
  2203. IBA7322_IBC_WIDTH_1X_ONLY;
  2204. /* always enable these on driver reload, not sticky */
  2205. ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
  2206. IBA7322_IBC_HRTBT_MASK);
  2207. }
  2208. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  2209. /* setup so we have more time at CFGTEST to change H1 */
  2210. val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
  2211. val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
  2212. val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
  2213. qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
  2214. serdes_7322_init(ppd);
  2215. guid = be64_to_cpu(ppd->guid);
  2216. if (!guid) {
  2217. if (dd->base_guid)
  2218. guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
  2219. ppd->guid = cpu_to_be64(guid);
  2220. }
  2221. qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
  2222. /* write to chip to prevent back-to-back writes of ibc reg */
  2223. qib_write_kreg(dd, kr_scratch, 0);
  2224. /* Enable port */
  2225. ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2226. set_vls(ppd);
  2227. /* initially come up DISABLED, without sending anything. */
  2228. val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
  2229. QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
  2230. qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
  2231. qib_write_kreg(dd, kr_scratch, 0ULL);
  2232. /* clear the linkinit cmds */
  2233. ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
  2234. /* be paranoid against later code motion, etc. */
  2235. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  2236. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
  2237. qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
  2238. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  2239. /* Also enable IBSTATUSCHG interrupt. */
  2240. val = qib_read_kreg_port(ppd, krp_errmask);
  2241. qib_write_kreg_port(ppd, krp_errmask,
  2242. val | ERR_MASK_N(IBStatusChanged));
  2243. /* Always zero until we start messing with SerDes for real */
  2244. return ret;
  2245. }
  2246. /**
  2247. * qib_7322_quiet_serdes - set serdes to txidle
  2248. * @dd: the qlogic_ib device
  2249. * Called when driver is being unloaded
  2250. */
  2251. static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
  2252. {
  2253. u64 val;
  2254. unsigned long flags;
  2255. qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  2256. spin_lock_irqsave(&ppd->lflags_lock, flags);
  2257. ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
  2258. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  2259. wake_up(&ppd->cpspec->autoneg_wait);
  2260. cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
  2261. if (ppd->dd->cspec->r1)
  2262. cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
  2263. ppd->cpspec->chase_end = 0;
  2264. if (ppd->cpspec->chase_timer.data) /* if initted */
  2265. del_timer_sync(&ppd->cpspec->chase_timer);
  2266. /*
  2267. * Despite the name, actually disables IBC as well. Do it when
  2268. * we are as sure as possible that no more packets can be
  2269. * received, following the down and the PCS reset.
  2270. * The actual disabling happens in qib_7322_mini_pci_reset(),
  2271. * along with the PCS being reset.
  2272. */
  2273. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2274. qib_7322_mini_pcs_reset(ppd);
  2275. /*
  2276. * Update the adjusted counters so the adjustment persists
  2277. * across driver reload.
  2278. */
  2279. if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
  2280. ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
  2281. struct qib_devdata *dd = ppd->dd;
  2282. u64 diagc;
  2283. /* enable counter writes */
  2284. diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
  2285. qib_write_kreg(dd, kr_hwdiagctrl,
  2286. diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
  2287. if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
  2288. val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
  2289. if (ppd->cpspec->ibdeltainprog)
  2290. val -= val - ppd->cpspec->ibsymsnap;
  2291. val -= ppd->cpspec->ibsymdelta;
  2292. write_7322_creg_port(ppd, crp_ibsymbolerr, val);
  2293. }
  2294. if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
  2295. val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
  2296. if (ppd->cpspec->ibdeltainprog)
  2297. val -= val - ppd->cpspec->iblnkerrsnap;
  2298. val -= ppd->cpspec->iblnkerrdelta;
  2299. write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
  2300. }
  2301. if (ppd->cpspec->iblnkdowndelta) {
  2302. val = read_7322_creg32_port(ppd, crp_iblinkdown);
  2303. val += ppd->cpspec->iblnkdowndelta;
  2304. write_7322_creg_port(ppd, crp_iblinkdown, val);
  2305. }
  2306. /*
  2307. * No need to save ibmalfdelta since IB perfcounters
  2308. * are cleared on driver reload.
  2309. */
  2310. /* and disable counter writes */
  2311. qib_write_kreg(dd, kr_hwdiagctrl, diagc);
  2312. }
  2313. }
  2314. /**
  2315. * qib_setup_7322_setextled - set the state of the two external LEDs
  2316. * @ppd: physical port on the qlogic_ib device
  2317. * @on: whether the link is up or not
  2318. *
  2319. * The exact combo of LEDs if on is true is determined by looking
  2320. * at the ibcstatus.
  2321. *
  2322. * These LEDs indicate the physical and logical state of IB link.
  2323. * For this chip (at least with recommended board pinouts), LED1
  2324. * is Yellow (logical state) and LED2 is Green (physical state),
  2325. *
  2326. * Note: We try to match the Mellanox HCA LED behavior as best
  2327. * we can. Green indicates physical link state is OK (something is
  2328. * plugged in, and we can train).
  2329. * Amber indicates the link is logically up (ACTIVE).
  2330. * Mellanox further blinks the amber LED to indicate data packet
  2331. * activity, but we have no hardware support for that, so it would
  2332. * require waking up every 10-20 msecs and checking the counters
  2333. * on the chip, and then turning the LED off if appropriate. That's
  2334. * visible overhead, so not something we will do.
  2335. */
  2336. static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
  2337. {
  2338. struct qib_devdata *dd = ppd->dd;
  2339. u64 extctl, ledblink = 0, val;
  2340. unsigned long flags;
  2341. int yel, grn;
  2342. /*
  2343. * The diags use the LED to indicate diag info, so we leave
  2344. * the external LED alone when the diags are running.
  2345. */
  2346. if (dd->diag_client)
  2347. return;
  2348. /* Allow override of LED display for, e.g. Locating system in rack */
  2349. if (ppd->led_override) {
  2350. grn = (ppd->led_override & QIB_LED_PHYS);
  2351. yel = (ppd->led_override & QIB_LED_LOG);
  2352. } else if (on) {
  2353. val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
  2354. grn = qib_7322_phys_portstate(val) ==
  2355. IB_PHYSPORTSTATE_LINKUP;
  2356. yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
  2357. } else {
  2358. grn = 0;
  2359. yel = 0;
  2360. }
  2361. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  2362. extctl = dd->cspec->extctrl & (ppd->port == 1 ?
  2363. ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
  2364. if (grn) {
  2365. extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
  2366. /*
  2367. * Counts are in chip clock (4ns) periods.
  2368. * This is 1/16 sec (66.6ms) on,
  2369. * 3/16 sec (187.5 ms) off, with packets rcvd.
  2370. */
  2371. ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
  2372. ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
  2373. }
  2374. if (yel)
  2375. extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
  2376. dd->cspec->extctrl = extctl;
  2377. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  2378. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  2379. if (ledblink) /* blink the LED on packet receive */
  2380. qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
  2381. }
  2382. #ifdef CONFIG_INFINIBAND_QIB_DCA
  2383. static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
  2384. {
  2385. switch (event) {
  2386. case DCA_PROVIDER_ADD:
  2387. if (dd->flags & QIB_DCA_ENABLED)
  2388. break;
  2389. if (!dca_add_requester(&dd->pcidev->dev)) {
  2390. qib_devinfo(dd->pcidev, "DCA enabled\n");
  2391. dd->flags |= QIB_DCA_ENABLED;
  2392. qib_setup_dca(dd);
  2393. }
  2394. break;
  2395. case DCA_PROVIDER_REMOVE:
  2396. if (dd->flags & QIB_DCA_ENABLED) {
  2397. dca_remove_requester(&dd->pcidev->dev);
  2398. dd->flags &= ~QIB_DCA_ENABLED;
  2399. dd->cspec->dca_ctrl = 0;
  2400. qib_write_kreg(dd, KREG_IDX(DCACtrlA),
  2401. dd->cspec->dca_ctrl);
  2402. }
  2403. break;
  2404. }
  2405. return 0;
  2406. }
  2407. static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
  2408. {
  2409. struct qib_devdata *dd = rcd->dd;
  2410. struct qib_chip_specific *cspec = dd->cspec;
  2411. if (!(dd->flags & QIB_DCA_ENABLED))
  2412. return;
  2413. if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
  2414. const struct dca_reg_map *rmp;
  2415. cspec->rhdr_cpu[rcd->ctxt] = cpu;
  2416. rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
  2417. cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
  2418. cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
  2419. (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
  2420. qib_devinfo(dd->pcidev,
  2421. "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
  2422. (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
  2423. qib_write_kreg(dd, rmp->regno,
  2424. cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
  2425. cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
  2426. qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
  2427. }
  2428. }
  2429. static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
  2430. {
  2431. struct qib_devdata *dd = ppd->dd;
  2432. struct qib_chip_specific *cspec = dd->cspec;
  2433. unsigned pidx = ppd->port - 1;
  2434. if (!(dd->flags & QIB_DCA_ENABLED))
  2435. return;
  2436. if (cspec->sdma_cpu[pidx] != cpu) {
  2437. cspec->sdma_cpu[pidx] = cpu;
  2438. cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
  2439. SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
  2440. SYM_MASK(DCACtrlF, SendDma0DCAOPH));
  2441. cspec->dca_rcvhdr_ctrl[4] |=
  2442. (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
  2443. (ppd->hw_pidx ?
  2444. SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
  2445. SYM_LSB(DCACtrlF, SendDma0DCAOPH));
  2446. qib_devinfo(dd->pcidev,
  2447. "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
  2448. (long long) cspec->dca_rcvhdr_ctrl[4]);
  2449. qib_write_kreg(dd, KREG_IDX(DCACtrlF),
  2450. cspec->dca_rcvhdr_ctrl[4]);
  2451. cspec->dca_ctrl |= ppd->hw_pidx ?
  2452. SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
  2453. SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
  2454. qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
  2455. }
  2456. }
  2457. static void qib_setup_dca(struct qib_devdata *dd)
  2458. {
  2459. struct qib_chip_specific *cspec = dd->cspec;
  2460. int i;
  2461. for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
  2462. cspec->rhdr_cpu[i] = -1;
  2463. for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
  2464. cspec->sdma_cpu[i] = -1;
  2465. cspec->dca_rcvhdr_ctrl[0] =
  2466. (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
  2467. (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
  2468. (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
  2469. (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
  2470. cspec->dca_rcvhdr_ctrl[1] =
  2471. (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
  2472. (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
  2473. (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
  2474. (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
  2475. cspec->dca_rcvhdr_ctrl[2] =
  2476. (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
  2477. (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
  2478. (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
  2479. (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
  2480. cspec->dca_rcvhdr_ctrl[3] =
  2481. (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
  2482. (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
  2483. (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
  2484. (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
  2485. cspec->dca_rcvhdr_ctrl[4] =
  2486. (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
  2487. (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
  2488. for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
  2489. qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
  2490. cspec->dca_rcvhdr_ctrl[i]);
  2491. for (i = 0; i < cspec->num_msix_entries; i++)
  2492. setup_dca_notifier(dd, &cspec->msix_entries[i]);
  2493. }
  2494. static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
  2495. const cpumask_t *mask)
  2496. {
  2497. struct qib_irq_notify *n =
  2498. container_of(notify, struct qib_irq_notify, notify);
  2499. int cpu = cpumask_first(mask);
  2500. if (n->rcv) {
  2501. struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
  2502. qib_update_rhdrq_dca(rcd, cpu);
  2503. } else {
  2504. struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
  2505. qib_update_sdma_dca(ppd, cpu);
  2506. }
  2507. }
  2508. static void qib_irq_notifier_release(struct kref *ref)
  2509. {
  2510. struct qib_irq_notify *n =
  2511. container_of(ref, struct qib_irq_notify, notify.kref);
  2512. struct qib_devdata *dd;
  2513. if (n->rcv) {
  2514. struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
  2515. dd = rcd->dd;
  2516. } else {
  2517. struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
  2518. dd = ppd->dd;
  2519. }
  2520. qib_devinfo(dd->pcidev,
  2521. "release on HCA notify 0x%p n 0x%p\n", ref, n);
  2522. kfree(n);
  2523. }
  2524. #endif
  2525. /*
  2526. * Disable MSIx interrupt if enabled, call generic MSIx code
  2527. * to cleanup, and clear pending MSIx interrupts.
  2528. * Used for fallback to INTx, after reset, and when MSIx setup fails.
  2529. */
  2530. static void qib_7322_nomsix(struct qib_devdata *dd)
  2531. {
  2532. u64 intgranted;
  2533. int n;
  2534. dd->cspec->main_int_mask = ~0ULL;
  2535. n = dd->cspec->num_msix_entries;
  2536. if (n) {
  2537. int i;
  2538. dd->cspec->num_msix_entries = 0;
  2539. for (i = 0; i < n; i++) {
  2540. #ifdef CONFIG_INFINIBAND_QIB_DCA
  2541. reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
  2542. #endif
  2543. irq_set_affinity_hint(
  2544. dd->cspec->msix_entries[i].msix.vector, NULL);
  2545. free_cpumask_var(dd->cspec->msix_entries[i].mask);
  2546. free_irq(dd->cspec->msix_entries[i].msix.vector,
  2547. dd->cspec->msix_entries[i].arg);
  2548. }
  2549. qib_nomsix(dd);
  2550. }
  2551. /* make sure no MSIx interrupts are left pending */
  2552. intgranted = qib_read_kreg64(dd, kr_intgranted);
  2553. if (intgranted)
  2554. qib_write_kreg(dd, kr_intgranted, intgranted);
  2555. }
  2556. static void qib_7322_free_irq(struct qib_devdata *dd)
  2557. {
  2558. if (dd->cspec->irq) {
  2559. free_irq(dd->cspec->irq, dd);
  2560. dd->cspec->irq = 0;
  2561. }
  2562. qib_7322_nomsix(dd);
  2563. }
  2564. static void qib_setup_7322_cleanup(struct qib_devdata *dd)
  2565. {
  2566. int i;
  2567. #ifdef CONFIG_INFINIBAND_QIB_DCA
  2568. if (dd->flags & QIB_DCA_ENABLED) {
  2569. dca_remove_requester(&dd->pcidev->dev);
  2570. dd->flags &= ~QIB_DCA_ENABLED;
  2571. dd->cspec->dca_ctrl = 0;
  2572. qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
  2573. }
  2574. #endif
  2575. qib_7322_free_irq(dd);
  2576. kfree(dd->cspec->cntrs);
  2577. kfree(dd->cspec->sendchkenable);
  2578. kfree(dd->cspec->sendgrhchk);
  2579. kfree(dd->cspec->sendibchk);
  2580. kfree(dd->cspec->msix_entries);
  2581. for (i = 0; i < dd->num_pports; i++) {
  2582. unsigned long flags;
  2583. u32 mask = QSFP_GPIO_MOD_PRS_N |
  2584. (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
  2585. kfree(dd->pport[i].cpspec->portcntrs);
  2586. if (dd->flags & QIB_HAS_QSFP) {
  2587. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  2588. dd->cspec->gpio_mask &= ~mask;
  2589. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  2590. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  2591. qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
  2592. }
  2593. if (dd->pport[i].ibport_data.smi_ah)
  2594. ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
  2595. }
  2596. }
  2597. /* handle SDMA interrupts */
  2598. static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
  2599. {
  2600. struct qib_pportdata *ppd0 = &dd->pport[0];
  2601. struct qib_pportdata *ppd1 = &dd->pport[1];
  2602. u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
  2603. INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
  2604. u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
  2605. INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
  2606. if (intr0)
  2607. qib_sdma_intr(ppd0);
  2608. if (intr1)
  2609. qib_sdma_intr(ppd1);
  2610. if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
  2611. qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
  2612. if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
  2613. qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
  2614. }
  2615. /*
  2616. * Set or clear the Send buffer available interrupt enable bit.
  2617. */
  2618. static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
  2619. {
  2620. unsigned long flags;
  2621. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  2622. if (needint)
  2623. dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
  2624. else
  2625. dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
  2626. qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
  2627. qib_write_kreg(dd, kr_scratch, 0ULL);
  2628. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  2629. }
  2630. /*
  2631. * Somehow got an interrupt with reserved bits set in interrupt status.
  2632. * Print a message so we know it happened, then clear them.
  2633. * keep mainline interrupt handler cache-friendly
  2634. */
  2635. static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
  2636. {
  2637. u64 kills;
  2638. char msg[128];
  2639. kills = istat & ~QIB_I_BITSEXTANT;
  2640. qib_dev_err(dd,
  2641. "Clearing reserved interrupt(s) 0x%016llx: %s\n",
  2642. (unsigned long long) kills, msg);
  2643. qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
  2644. }
  2645. /* keep mainline interrupt handler cache-friendly */
  2646. static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
  2647. {
  2648. u32 gpiostatus;
  2649. int handled = 0;
  2650. int pidx;
  2651. /*
  2652. * Boards for this chip currently don't use GPIO interrupts,
  2653. * so clear by writing GPIOstatus to GPIOclear, and complain
  2654. * to developer. To avoid endless repeats, clear
  2655. * the bits in the mask, since there is some kind of
  2656. * programming error or chip problem.
  2657. */
  2658. gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
  2659. /*
  2660. * In theory, writing GPIOstatus to GPIOclear could
  2661. * have a bad side-effect on some diagnostic that wanted
  2662. * to poll for a status-change, but the various shadows
  2663. * make that problematic at best. Diags will just suppress
  2664. * all GPIO interrupts during such tests.
  2665. */
  2666. qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
  2667. /*
  2668. * Check for QSFP MOD_PRS changes
  2669. * only works for single port if IB1 != pidx1
  2670. */
  2671. for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
  2672. ++pidx) {
  2673. struct qib_pportdata *ppd;
  2674. struct qib_qsfp_data *qd;
  2675. u32 mask;
  2676. if (!dd->pport[pidx].link_speed_supported)
  2677. continue;
  2678. mask = QSFP_GPIO_MOD_PRS_N;
  2679. ppd = dd->pport + pidx;
  2680. mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
  2681. if (gpiostatus & dd->cspec->gpio_mask & mask) {
  2682. u64 pins;
  2683. qd = &ppd->cpspec->qsfp_data;
  2684. gpiostatus &= ~mask;
  2685. pins = qib_read_kreg64(dd, kr_extstatus);
  2686. pins >>= SYM_LSB(EXTStatus, GPIOIn);
  2687. if (!(pins & mask)) {
  2688. ++handled;
  2689. qd->t_insert = jiffies;
  2690. queue_work(ib_wq, &qd->work);
  2691. }
  2692. }
  2693. }
  2694. if (gpiostatus && !handled) {
  2695. const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
  2696. u32 gpio_irq = mask & gpiostatus;
  2697. /*
  2698. * Clear any troublemakers, and update chip from shadow
  2699. */
  2700. dd->cspec->gpio_mask &= ~gpio_irq;
  2701. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  2702. }
  2703. }
  2704. /*
  2705. * Handle errors and unusual events first, separate function
  2706. * to improve cache hits for fast path interrupt handling.
  2707. */
  2708. static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
  2709. {
  2710. if (istat & ~QIB_I_BITSEXTANT)
  2711. unknown_7322_ibits(dd, istat);
  2712. if (istat & QIB_I_GPIO)
  2713. unknown_7322_gpio_intr(dd);
  2714. if (istat & QIB_I_C_ERROR) {
  2715. qib_write_kreg(dd, kr_errmask, 0ULL);
  2716. tasklet_schedule(&dd->error_tasklet);
  2717. }
  2718. if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
  2719. handle_7322_p_errors(dd->rcd[0]->ppd);
  2720. if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
  2721. handle_7322_p_errors(dd->rcd[1]->ppd);
  2722. }
  2723. /*
  2724. * Dynamically adjust the rcv int timeout for a context based on incoming
  2725. * packet rate.
  2726. */
  2727. static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
  2728. {
  2729. struct qib_devdata *dd = rcd->dd;
  2730. u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
  2731. /*
  2732. * Dynamically adjust idle timeout on chip
  2733. * based on number of packets processed.
  2734. */
  2735. if (npkts < rcv_int_count && timeout > 2)
  2736. timeout >>= 1;
  2737. else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
  2738. timeout = min(timeout << 1, rcv_int_timeout);
  2739. else
  2740. return;
  2741. dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
  2742. qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
  2743. }
  2744. /*
  2745. * This is the main interrupt handler.
  2746. * It will normally only be used for low frequency interrupts but may
  2747. * have to handle all interrupts if INTx is enabled or fewer than normal
  2748. * MSIx interrupts were allocated.
  2749. * This routine should ignore the interrupt bits for any of the
  2750. * dedicated MSIx handlers.
  2751. */
  2752. static irqreturn_t qib_7322intr(int irq, void *data)
  2753. {
  2754. struct qib_devdata *dd = data;
  2755. irqreturn_t ret;
  2756. u64 istat;
  2757. u64 ctxtrbits;
  2758. u64 rmask;
  2759. unsigned i;
  2760. u32 npkts;
  2761. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
  2762. /*
  2763. * This return value is not great, but we do not want the
  2764. * interrupt core code to remove our interrupt handler
  2765. * because we don't appear to be handling an interrupt
  2766. * during a chip reset.
  2767. */
  2768. ret = IRQ_HANDLED;
  2769. goto bail;
  2770. }
  2771. istat = qib_read_kreg64(dd, kr_intstatus);
  2772. if (unlikely(istat == ~0ULL)) {
  2773. qib_bad_intrstatus(dd);
  2774. qib_dev_err(dd, "Interrupt status all f's, skipping\n");
  2775. /* don't know if it was our interrupt or not */
  2776. ret = IRQ_NONE;
  2777. goto bail;
  2778. }
  2779. istat &= dd->cspec->main_int_mask;
  2780. if (unlikely(!istat)) {
  2781. /* already handled, or shared and not us */
  2782. ret = IRQ_NONE;
  2783. goto bail;
  2784. }
  2785. qib_stats.sps_ints++;
  2786. if (dd->int_counter != (u32) -1)
  2787. dd->int_counter++;
  2788. /* handle "errors" of various kinds first, device ahead of port */
  2789. if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
  2790. QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
  2791. INT_MASK_P(Err, 1))))
  2792. unlikely_7322_intr(dd, istat);
  2793. /*
  2794. * Clear the interrupt bits we found set, relatively early, so we
  2795. * "know" know the chip will have seen this by the time we process
  2796. * the queue, and will re-interrupt if necessary. The processor
  2797. * itself won't take the interrupt again until we return.
  2798. */
  2799. qib_write_kreg(dd, kr_intclear, istat);
  2800. /*
  2801. * Handle kernel receive queues before checking for pio buffers
  2802. * available since receives can overflow; piobuf waiters can afford
  2803. * a few extra cycles, since they were waiting anyway.
  2804. */
  2805. ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
  2806. if (ctxtrbits) {
  2807. rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
  2808. (1ULL << QIB_I_RCVURG_LSB);
  2809. for (i = 0; i < dd->first_user_ctxt; i++) {
  2810. if (ctxtrbits & rmask) {
  2811. ctxtrbits &= ~rmask;
  2812. if (dd->rcd[i])
  2813. qib_kreceive(dd->rcd[i], NULL, &npkts);
  2814. }
  2815. rmask <<= 1;
  2816. }
  2817. if (ctxtrbits) {
  2818. ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
  2819. (ctxtrbits >> QIB_I_RCVURG_LSB);
  2820. qib_handle_urcv(dd, ctxtrbits);
  2821. }
  2822. }
  2823. if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
  2824. sdma_7322_intr(dd, istat);
  2825. if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
  2826. qib_ib_piobufavail(dd);
  2827. ret = IRQ_HANDLED;
  2828. bail:
  2829. return ret;
  2830. }
  2831. /*
  2832. * Dedicated receive packet available interrupt handler.
  2833. */
  2834. static irqreturn_t qib_7322pintr(int irq, void *data)
  2835. {
  2836. struct qib_ctxtdata *rcd = data;
  2837. struct qib_devdata *dd = rcd->dd;
  2838. u32 npkts;
  2839. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2840. /*
  2841. * This return value is not great, but we do not want the
  2842. * interrupt core code to remove our interrupt handler
  2843. * because we don't appear to be handling an interrupt
  2844. * during a chip reset.
  2845. */
  2846. return IRQ_HANDLED;
  2847. qib_stats.sps_ints++;
  2848. if (dd->int_counter != (u32) -1)
  2849. dd->int_counter++;
  2850. /* Clear the interrupt bit we expect to be set. */
  2851. qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
  2852. (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
  2853. qib_kreceive(rcd, NULL, &npkts);
  2854. return IRQ_HANDLED;
  2855. }
  2856. /*
  2857. * Dedicated Send buffer available interrupt handler.
  2858. */
  2859. static irqreturn_t qib_7322bufavail(int irq, void *data)
  2860. {
  2861. struct qib_devdata *dd = data;
  2862. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2863. /*
  2864. * This return value is not great, but we do not want the
  2865. * interrupt core code to remove our interrupt handler
  2866. * because we don't appear to be handling an interrupt
  2867. * during a chip reset.
  2868. */
  2869. return IRQ_HANDLED;
  2870. qib_stats.sps_ints++;
  2871. if (dd->int_counter != (u32) -1)
  2872. dd->int_counter++;
  2873. /* Clear the interrupt bit we expect to be set. */
  2874. qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
  2875. /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
  2876. if (dd->flags & QIB_INITTED)
  2877. qib_ib_piobufavail(dd);
  2878. else
  2879. qib_wantpiobuf_7322_intr(dd, 0);
  2880. return IRQ_HANDLED;
  2881. }
  2882. /*
  2883. * Dedicated Send DMA interrupt handler.
  2884. */
  2885. static irqreturn_t sdma_intr(int irq, void *data)
  2886. {
  2887. struct qib_pportdata *ppd = data;
  2888. struct qib_devdata *dd = ppd->dd;
  2889. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2890. /*
  2891. * This return value is not great, but we do not want the
  2892. * interrupt core code to remove our interrupt handler
  2893. * because we don't appear to be handling an interrupt
  2894. * during a chip reset.
  2895. */
  2896. return IRQ_HANDLED;
  2897. qib_stats.sps_ints++;
  2898. if (dd->int_counter != (u32) -1)
  2899. dd->int_counter++;
  2900. /* Clear the interrupt bit we expect to be set. */
  2901. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2902. INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
  2903. qib_sdma_intr(ppd);
  2904. return IRQ_HANDLED;
  2905. }
  2906. /*
  2907. * Dedicated Send DMA idle interrupt handler.
  2908. */
  2909. static irqreturn_t sdma_idle_intr(int irq, void *data)
  2910. {
  2911. struct qib_pportdata *ppd = data;
  2912. struct qib_devdata *dd = ppd->dd;
  2913. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2914. /*
  2915. * This return value is not great, but we do not want the
  2916. * interrupt core code to remove our interrupt handler
  2917. * because we don't appear to be handling an interrupt
  2918. * during a chip reset.
  2919. */
  2920. return IRQ_HANDLED;
  2921. qib_stats.sps_ints++;
  2922. if (dd->int_counter != (u32) -1)
  2923. dd->int_counter++;
  2924. /* Clear the interrupt bit we expect to be set. */
  2925. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2926. INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
  2927. qib_sdma_intr(ppd);
  2928. return IRQ_HANDLED;
  2929. }
  2930. /*
  2931. * Dedicated Send DMA progress interrupt handler.
  2932. */
  2933. static irqreturn_t sdma_progress_intr(int irq, void *data)
  2934. {
  2935. struct qib_pportdata *ppd = data;
  2936. struct qib_devdata *dd = ppd->dd;
  2937. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2938. /*
  2939. * This return value is not great, but we do not want the
  2940. * interrupt core code to remove our interrupt handler
  2941. * because we don't appear to be handling an interrupt
  2942. * during a chip reset.
  2943. */
  2944. return IRQ_HANDLED;
  2945. qib_stats.sps_ints++;
  2946. if (dd->int_counter != (u32) -1)
  2947. dd->int_counter++;
  2948. /* Clear the interrupt bit we expect to be set. */
  2949. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2950. INT_MASK_P(SDmaProgress, 1) :
  2951. INT_MASK_P(SDmaProgress, 0));
  2952. qib_sdma_intr(ppd);
  2953. return IRQ_HANDLED;
  2954. }
  2955. /*
  2956. * Dedicated Send DMA cleanup interrupt handler.
  2957. */
  2958. static irqreturn_t sdma_cleanup_intr(int irq, void *data)
  2959. {
  2960. struct qib_pportdata *ppd = data;
  2961. struct qib_devdata *dd = ppd->dd;
  2962. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2963. /*
  2964. * This return value is not great, but we do not want the
  2965. * interrupt core code to remove our interrupt handler
  2966. * because we don't appear to be handling an interrupt
  2967. * during a chip reset.
  2968. */
  2969. return IRQ_HANDLED;
  2970. qib_stats.sps_ints++;
  2971. if (dd->int_counter != (u32) -1)
  2972. dd->int_counter++;
  2973. /* Clear the interrupt bit we expect to be set. */
  2974. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2975. INT_MASK_PM(SDmaCleanupDone, 1) :
  2976. INT_MASK_PM(SDmaCleanupDone, 0));
  2977. qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
  2978. return IRQ_HANDLED;
  2979. }
  2980. #ifdef CONFIG_INFINIBAND_QIB_DCA
  2981. static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
  2982. {
  2983. if (!m->dca)
  2984. return;
  2985. qib_devinfo(dd->pcidev,
  2986. "Disabling notifier on HCA %d irq %d\n",
  2987. dd->unit,
  2988. m->msix.vector);
  2989. irq_set_affinity_notifier(
  2990. m->msix.vector,
  2991. NULL);
  2992. m->notifier = NULL;
  2993. }
  2994. static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
  2995. {
  2996. struct qib_irq_notify *n;
  2997. if (!m->dca)
  2998. return;
  2999. n = kzalloc(sizeof(*n), GFP_KERNEL);
  3000. if (n) {
  3001. int ret;
  3002. m->notifier = n;
  3003. n->notify.irq = m->msix.vector;
  3004. n->notify.notify = qib_irq_notifier_notify;
  3005. n->notify.release = qib_irq_notifier_release;
  3006. n->arg = m->arg;
  3007. n->rcv = m->rcv;
  3008. qib_devinfo(dd->pcidev,
  3009. "set notifier irq %d rcv %d notify %p\n",
  3010. n->notify.irq, n->rcv, &n->notify);
  3011. ret = irq_set_affinity_notifier(
  3012. n->notify.irq,
  3013. &n->notify);
  3014. if (ret) {
  3015. m->notifier = NULL;
  3016. kfree(n);
  3017. }
  3018. }
  3019. }
  3020. #endif
  3021. /*
  3022. * Set up our chip-specific interrupt handler.
  3023. * The interrupt type has already been setup, so
  3024. * we just need to do the registration and error checking.
  3025. * If we are using MSIx interrupts, we may fall back to
  3026. * INTx later, if the interrupt handler doesn't get called
  3027. * within 1/2 second (see verify_interrupt()).
  3028. */
  3029. static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
  3030. {
  3031. int ret, i, msixnum;
  3032. u64 redirect[6];
  3033. u64 mask;
  3034. const struct cpumask *local_mask;
  3035. int firstcpu, secondcpu = 0, currrcvcpu = 0;
  3036. if (!dd->num_pports)
  3037. return;
  3038. if (clearpend) {
  3039. /*
  3040. * if not switching interrupt types, be sure interrupts are
  3041. * disabled, and then clear anything pending at this point,
  3042. * because we are starting clean.
  3043. */
  3044. qib_7322_set_intr_state(dd, 0);
  3045. /* clear the reset error, init error/hwerror mask */
  3046. qib_7322_init_hwerrors(dd);
  3047. /* clear any interrupt bits that might be set */
  3048. qib_write_kreg(dd, kr_intclear, ~0ULL);
  3049. /* make sure no pending MSIx intr, and clear diag reg */
  3050. qib_write_kreg(dd, kr_intgranted, ~0ULL);
  3051. qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
  3052. }
  3053. if (!dd->cspec->num_msix_entries) {
  3054. /* Try to get INTx interrupt */
  3055. try_intx:
  3056. if (!dd->pcidev->irq) {
  3057. qib_dev_err(dd,
  3058. "irq is 0, BIOS error? Interrupts won't work\n");
  3059. goto bail;
  3060. }
  3061. ret = request_irq(dd->pcidev->irq, qib_7322intr,
  3062. IRQF_SHARED, QIB_DRV_NAME, dd);
  3063. if (ret) {
  3064. qib_dev_err(dd,
  3065. "Couldn't setup INTx interrupt (irq=%d): %d\n",
  3066. dd->pcidev->irq, ret);
  3067. goto bail;
  3068. }
  3069. dd->cspec->irq = dd->pcidev->irq;
  3070. dd->cspec->main_int_mask = ~0ULL;
  3071. goto bail;
  3072. }
  3073. /* Try to get MSIx interrupts */
  3074. memset(redirect, 0, sizeof redirect);
  3075. mask = ~0ULL;
  3076. msixnum = 0;
  3077. local_mask = cpumask_of_pcibus(dd->pcidev->bus);
  3078. firstcpu = cpumask_first(local_mask);
  3079. if (firstcpu >= nr_cpu_ids ||
  3080. cpumask_weight(local_mask) == num_online_cpus()) {
  3081. local_mask = topology_core_cpumask(0);
  3082. firstcpu = cpumask_first(local_mask);
  3083. }
  3084. if (firstcpu < nr_cpu_ids) {
  3085. secondcpu = cpumask_next(firstcpu, local_mask);
  3086. if (secondcpu >= nr_cpu_ids)
  3087. secondcpu = firstcpu;
  3088. currrcvcpu = secondcpu;
  3089. }
  3090. for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
  3091. irq_handler_t handler;
  3092. void *arg;
  3093. u64 val;
  3094. int lsb, reg, sh;
  3095. #ifdef CONFIG_INFINIBAND_QIB_DCA
  3096. int dca = 0;
  3097. #endif
  3098. dd->cspec->msix_entries[msixnum].
  3099. name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
  3100. = '\0';
  3101. if (i < ARRAY_SIZE(irq_table)) {
  3102. if (irq_table[i].port) {
  3103. /* skip if for a non-configured port */
  3104. if (irq_table[i].port > dd->num_pports)
  3105. continue;
  3106. arg = dd->pport + irq_table[i].port - 1;
  3107. } else
  3108. arg = dd;
  3109. #ifdef CONFIG_INFINIBAND_QIB_DCA
  3110. dca = irq_table[i].dca;
  3111. #endif
  3112. lsb = irq_table[i].lsb;
  3113. handler = irq_table[i].handler;
  3114. snprintf(dd->cspec->msix_entries[msixnum].name,
  3115. sizeof(dd->cspec->msix_entries[msixnum].name)
  3116. - 1,
  3117. QIB_DRV_NAME "%d%s", dd->unit,
  3118. irq_table[i].name);
  3119. } else {
  3120. unsigned ctxt;
  3121. ctxt = i - ARRAY_SIZE(irq_table);
  3122. /* per krcvq context receive interrupt */
  3123. arg = dd->rcd[ctxt];
  3124. if (!arg)
  3125. continue;
  3126. if (qib_krcvq01_no_msi && ctxt < 2)
  3127. continue;
  3128. #ifdef CONFIG_INFINIBAND_QIB_DCA
  3129. dca = 1;
  3130. #endif
  3131. lsb = QIB_I_RCVAVAIL_LSB + ctxt;
  3132. handler = qib_7322pintr;
  3133. snprintf(dd->cspec->msix_entries[msixnum].name,
  3134. sizeof(dd->cspec->msix_entries[msixnum].name)
  3135. - 1,
  3136. QIB_DRV_NAME "%d (kctx)", dd->unit);
  3137. }
  3138. ret = request_irq(
  3139. dd->cspec->msix_entries[msixnum].msix.vector,
  3140. handler, 0, dd->cspec->msix_entries[msixnum].name,
  3141. arg);
  3142. if (ret) {
  3143. /*
  3144. * Shouldn't happen since the enable said we could
  3145. * have as many as we are trying to setup here.
  3146. */
  3147. qib_dev_err(dd,
  3148. "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
  3149. msixnum,
  3150. dd->cspec->msix_entries[msixnum].msix.vector,
  3151. ret);
  3152. qib_7322_nomsix(dd);
  3153. goto try_intx;
  3154. }
  3155. dd->cspec->msix_entries[msixnum].arg = arg;
  3156. #ifdef CONFIG_INFINIBAND_QIB_DCA
  3157. dd->cspec->msix_entries[msixnum].dca = dca;
  3158. dd->cspec->msix_entries[msixnum].rcv =
  3159. handler == qib_7322pintr;
  3160. #endif
  3161. if (lsb >= 0) {
  3162. reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
  3163. sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
  3164. SYM_LSB(IntRedirect0, vec1);
  3165. mask &= ~(1ULL << lsb);
  3166. redirect[reg] |= ((u64) msixnum) << sh;
  3167. }
  3168. val = qib_read_kreg64(dd, 2 * msixnum + 1 +
  3169. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  3170. if (firstcpu < nr_cpu_ids &&
  3171. zalloc_cpumask_var(
  3172. &dd->cspec->msix_entries[msixnum].mask,
  3173. GFP_KERNEL)) {
  3174. if (handler == qib_7322pintr) {
  3175. cpumask_set_cpu(currrcvcpu,
  3176. dd->cspec->msix_entries[msixnum].mask);
  3177. currrcvcpu = cpumask_next(currrcvcpu,
  3178. local_mask);
  3179. if (currrcvcpu >= nr_cpu_ids)
  3180. currrcvcpu = secondcpu;
  3181. } else {
  3182. cpumask_set_cpu(firstcpu,
  3183. dd->cspec->msix_entries[msixnum].mask);
  3184. }
  3185. irq_set_affinity_hint(
  3186. dd->cspec->msix_entries[msixnum].msix.vector,
  3187. dd->cspec->msix_entries[msixnum].mask);
  3188. }
  3189. msixnum++;
  3190. }
  3191. /* Initialize the vector mapping */
  3192. for (i = 0; i < ARRAY_SIZE(redirect); i++)
  3193. qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
  3194. dd->cspec->main_int_mask = mask;
  3195. tasklet_init(&dd->error_tasklet, qib_error_tasklet,
  3196. (unsigned long)dd);
  3197. bail:;
  3198. }
  3199. /**
  3200. * qib_7322_boardname - fill in the board name and note features
  3201. * @dd: the qlogic_ib device
  3202. *
  3203. * info will be based on the board revision register
  3204. */
  3205. static unsigned qib_7322_boardname(struct qib_devdata *dd)
  3206. {
  3207. /* Will need enumeration of board-types here */
  3208. char *n;
  3209. u32 boardid, namelen;
  3210. unsigned features = DUAL_PORT_CAP;
  3211. boardid = SYM_FIELD(dd->revision, Revision, BoardID);
  3212. switch (boardid) {
  3213. case 0:
  3214. n = "InfiniPath_QLE7342_Emulation";
  3215. break;
  3216. case 1:
  3217. n = "InfiniPath_QLE7340";
  3218. dd->flags |= QIB_HAS_QSFP;
  3219. features = PORT_SPD_CAP;
  3220. break;
  3221. case 2:
  3222. n = "InfiniPath_QLE7342";
  3223. dd->flags |= QIB_HAS_QSFP;
  3224. break;
  3225. case 3:
  3226. n = "InfiniPath_QMI7342";
  3227. break;
  3228. case 4:
  3229. n = "InfiniPath_Unsupported7342";
  3230. qib_dev_err(dd, "Unsupported version of QMH7342\n");
  3231. features = 0;
  3232. break;
  3233. case BOARD_QMH7342:
  3234. n = "InfiniPath_QMH7342";
  3235. features = 0x24;
  3236. break;
  3237. case BOARD_QME7342:
  3238. n = "InfiniPath_QME7342";
  3239. break;
  3240. case 8:
  3241. n = "InfiniPath_QME7362";
  3242. dd->flags |= QIB_HAS_QSFP;
  3243. break;
  3244. case 15:
  3245. n = "InfiniPath_QLE7342_TEST";
  3246. dd->flags |= QIB_HAS_QSFP;
  3247. break;
  3248. default:
  3249. n = "InfiniPath_QLE73xy_UNKNOWN";
  3250. qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
  3251. break;
  3252. }
  3253. dd->board_atten = 1; /* index into txdds_Xdr */
  3254. namelen = strlen(n) + 1;
  3255. dd->boardname = kmalloc(namelen, GFP_KERNEL);
  3256. if (!dd->boardname)
  3257. qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
  3258. else
  3259. snprintf(dd->boardname, namelen, "%s", n);
  3260. snprintf(dd->boardversion, sizeof(dd->boardversion),
  3261. "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
  3262. QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
  3263. (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
  3264. dd->majrev, dd->minrev,
  3265. (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
  3266. if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
  3267. qib_devinfo(dd->pcidev,
  3268. "IB%u: Forced to single port mode by module parameter\n",
  3269. dd->unit);
  3270. features &= PORT_SPD_CAP;
  3271. }
  3272. return features;
  3273. }
  3274. /*
  3275. * This routine sleeps, so it can only be called from user context, not
  3276. * from interrupt context.
  3277. */
  3278. static int qib_do_7322_reset(struct qib_devdata *dd)
  3279. {
  3280. u64 val;
  3281. u64 *msix_vecsave;
  3282. int i, msix_entries, ret = 1;
  3283. u16 cmdval;
  3284. u8 int_line, clinesz;
  3285. unsigned long flags;
  3286. /* Use dev_err so it shows up in logs, etc. */
  3287. qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
  3288. qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
  3289. msix_entries = dd->cspec->num_msix_entries;
  3290. /* no interrupts till re-initted */
  3291. qib_7322_set_intr_state(dd, 0);
  3292. if (msix_entries) {
  3293. qib_7322_nomsix(dd);
  3294. /* can be up to 512 bytes, too big for stack */
  3295. msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
  3296. sizeof(u64), GFP_KERNEL);
  3297. if (!msix_vecsave)
  3298. qib_dev_err(dd, "No mem to save MSIx data\n");
  3299. } else
  3300. msix_vecsave = NULL;
  3301. /*
  3302. * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
  3303. * info that is set up by the BIOS, so we have to save and restore
  3304. * it ourselves. There is some risk something could change it,
  3305. * after we save it, but since we have disabled the MSIx, it
  3306. * shouldn't be touched...
  3307. */
  3308. for (i = 0; i < msix_entries; i++) {
  3309. u64 vecaddr, vecdata;
  3310. vecaddr = qib_read_kreg64(dd, 2 * i +
  3311. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  3312. vecdata = qib_read_kreg64(dd, 1 + 2 * i +
  3313. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  3314. if (msix_vecsave) {
  3315. msix_vecsave[2 * i] = vecaddr;
  3316. /* save it without the masked bit set */
  3317. msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
  3318. }
  3319. }
  3320. dd->pport->cpspec->ibdeltainprog = 0;
  3321. dd->pport->cpspec->ibsymdelta = 0;
  3322. dd->pport->cpspec->iblnkerrdelta = 0;
  3323. dd->pport->cpspec->ibmalfdelta = 0;
  3324. dd->int_counter = 0; /* so we check interrupts work again */
  3325. /*
  3326. * Keep chip from being accessed until we are ready. Use
  3327. * writeq() directly, to allow the write even though QIB_PRESENT
  3328. * isn't set.
  3329. */
  3330. dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
  3331. dd->flags |= QIB_DOING_RESET;
  3332. val = dd->control | QLOGIC_IB_C_RESET;
  3333. writeq(val, &dd->kregbase[kr_control]);
  3334. for (i = 1; i <= 5; i++) {
  3335. /*
  3336. * Allow MBIST, etc. to complete; longer on each retry.
  3337. * We sometimes get machine checks from bus timeout if no
  3338. * response, so for now, make it *really* long.
  3339. */
  3340. msleep(1000 + (1 + i) * 3000);
  3341. qib_pcie_reenable(dd, cmdval, int_line, clinesz);
  3342. /*
  3343. * Use readq directly, so we don't need to mark it as PRESENT
  3344. * until we get a successful indication that all is well.
  3345. */
  3346. val = readq(&dd->kregbase[kr_revision]);
  3347. if (val == dd->revision)
  3348. break;
  3349. if (i == 5) {
  3350. qib_dev_err(dd,
  3351. "Failed to initialize after reset, unusable\n");
  3352. ret = 0;
  3353. goto bail;
  3354. }
  3355. }
  3356. dd->flags |= QIB_PRESENT; /* it's back */
  3357. if (msix_entries) {
  3358. /* restore the MSIx vector address and data if saved above */
  3359. for (i = 0; i < msix_entries; i++) {
  3360. dd->cspec->msix_entries[i].msix.entry = i;
  3361. if (!msix_vecsave || !msix_vecsave[2 * i])
  3362. continue;
  3363. qib_write_kreg(dd, 2 * i +
  3364. (QIB_7322_MsixTable_OFFS / sizeof(u64)),
  3365. msix_vecsave[2 * i]);
  3366. qib_write_kreg(dd, 1 + 2 * i +
  3367. (QIB_7322_MsixTable_OFFS / sizeof(u64)),
  3368. msix_vecsave[1 + 2 * i]);
  3369. }
  3370. }
  3371. /* initialize the remaining registers. */
  3372. for (i = 0; i < dd->num_pports; ++i)
  3373. write_7322_init_portregs(&dd->pport[i]);
  3374. write_7322_initregs(dd);
  3375. if (qib_pcie_params(dd, dd->lbus_width,
  3376. &dd->cspec->num_msix_entries,
  3377. dd->cspec->msix_entries))
  3378. qib_dev_err(dd,
  3379. "Reset failed to setup PCIe or interrupts; continuing anyway\n");
  3380. qib_setup_7322_interrupt(dd, 1);
  3381. for (i = 0; i < dd->num_pports; ++i) {
  3382. struct qib_pportdata *ppd = &dd->pport[i];
  3383. spin_lock_irqsave(&ppd->lflags_lock, flags);
  3384. ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
  3385. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  3386. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  3387. }
  3388. bail:
  3389. dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
  3390. kfree(msix_vecsave);
  3391. return ret;
  3392. }
  3393. /**
  3394. * qib_7322_put_tid - write a TID to the chip
  3395. * @dd: the qlogic_ib device
  3396. * @tidptr: pointer to the expected TID (in chip) to update
  3397. * @tidtype: 0 for eager, 1 for expected
  3398. * @pa: physical address of in memory buffer; tidinvalid if freeing
  3399. */
  3400. static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
  3401. u32 type, unsigned long pa)
  3402. {
  3403. if (!(dd->flags & QIB_PRESENT))
  3404. return;
  3405. if (pa != dd->tidinvalid) {
  3406. u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
  3407. /* paranoia checks */
  3408. if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
  3409. qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
  3410. pa);
  3411. return;
  3412. }
  3413. if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
  3414. qib_dev_err(dd,
  3415. "Physical page address 0x%lx larger than supported\n",
  3416. pa);
  3417. return;
  3418. }
  3419. if (type == RCVHQ_RCV_TYPE_EAGER)
  3420. chippa |= dd->tidtemplate;
  3421. else /* for now, always full 4KB page */
  3422. chippa |= IBA7322_TID_SZ_4K;
  3423. pa = chippa;
  3424. }
  3425. writeq(pa, tidptr);
  3426. mmiowb();
  3427. }
  3428. /**
  3429. * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
  3430. * @dd: the qlogic_ib device
  3431. * @ctxt: the ctxt
  3432. *
  3433. * clear all TID entries for a ctxt, expected and eager.
  3434. * Used from qib_close().
  3435. */
  3436. static void qib_7322_clear_tids(struct qib_devdata *dd,
  3437. struct qib_ctxtdata *rcd)
  3438. {
  3439. u64 __iomem *tidbase;
  3440. unsigned long tidinv;
  3441. u32 ctxt;
  3442. int i;
  3443. if (!dd->kregbase || !rcd)
  3444. return;
  3445. ctxt = rcd->ctxt;
  3446. tidinv = dd->tidinvalid;
  3447. tidbase = (u64 __iomem *)
  3448. ((char __iomem *) dd->kregbase +
  3449. dd->rcvtidbase +
  3450. ctxt * dd->rcvtidcnt * sizeof(*tidbase));
  3451. for (i = 0; i < dd->rcvtidcnt; i++)
  3452. qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
  3453. tidinv);
  3454. tidbase = (u64 __iomem *)
  3455. ((char __iomem *) dd->kregbase +
  3456. dd->rcvegrbase +
  3457. rcd->rcvegr_tid_base * sizeof(*tidbase));
  3458. for (i = 0; i < rcd->rcvegrcnt; i++)
  3459. qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
  3460. tidinv);
  3461. }
  3462. /**
  3463. * qib_7322_tidtemplate - setup constants for TID updates
  3464. * @dd: the qlogic_ib device
  3465. *
  3466. * We setup stuff that we use a lot, to avoid calculating each time
  3467. */
  3468. static void qib_7322_tidtemplate(struct qib_devdata *dd)
  3469. {
  3470. /*
  3471. * For now, we always allocate 4KB buffers (at init) so we can
  3472. * receive max size packets. We may want a module parameter to
  3473. * specify 2KB or 4KB and/or make it per port instead of per device
  3474. * for those who want to reduce memory footprint. Note that the
  3475. * rcvhdrentsize size must be large enough to hold the largest
  3476. * IB header (currently 96 bytes) that we expect to handle (plus of
  3477. * course the 2 dwords of RHF).
  3478. */
  3479. if (dd->rcvegrbufsize == 2048)
  3480. dd->tidtemplate = IBA7322_TID_SZ_2K;
  3481. else if (dd->rcvegrbufsize == 4096)
  3482. dd->tidtemplate = IBA7322_TID_SZ_4K;
  3483. dd->tidinvalid = 0;
  3484. }
  3485. /**
  3486. * qib_init_7322_get_base_info - set chip-specific flags for user code
  3487. * @rcd: the qlogic_ib ctxt
  3488. * @kbase: qib_base_info pointer
  3489. *
  3490. * We set the PCIE flag because the lower bandwidth on PCIe vs
  3491. * HyperTransport can affect some user packet algorithims.
  3492. */
  3493. static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
  3494. struct qib_base_info *kinfo)
  3495. {
  3496. kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
  3497. QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
  3498. QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
  3499. if (rcd->dd->cspec->r1)
  3500. kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
  3501. if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
  3502. kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
  3503. return 0;
  3504. }
  3505. static struct qib_message_header *
  3506. qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
  3507. {
  3508. u32 offset = qib_hdrget_offset(rhf_addr);
  3509. return (struct qib_message_header *)
  3510. (rhf_addr - dd->rhf_offset + offset);
  3511. }
  3512. /*
  3513. * Configure number of contexts.
  3514. */
  3515. static void qib_7322_config_ctxts(struct qib_devdata *dd)
  3516. {
  3517. unsigned long flags;
  3518. u32 nchipctxts;
  3519. nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
  3520. dd->cspec->numctxts = nchipctxts;
  3521. if (qib_n_krcv_queues > 1 && dd->num_pports) {
  3522. dd->first_user_ctxt = NUM_IB_PORTS +
  3523. (qib_n_krcv_queues - 1) * dd->num_pports;
  3524. if (dd->first_user_ctxt > nchipctxts)
  3525. dd->first_user_ctxt = nchipctxts;
  3526. dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
  3527. } else {
  3528. dd->first_user_ctxt = NUM_IB_PORTS;
  3529. dd->n_krcv_queues = 1;
  3530. }
  3531. if (!qib_cfgctxts) {
  3532. int nctxts = dd->first_user_ctxt + num_online_cpus();
  3533. if (nctxts <= 6)
  3534. dd->ctxtcnt = 6;
  3535. else if (nctxts <= 10)
  3536. dd->ctxtcnt = 10;
  3537. else if (nctxts <= nchipctxts)
  3538. dd->ctxtcnt = nchipctxts;
  3539. } else if (qib_cfgctxts < dd->num_pports)
  3540. dd->ctxtcnt = dd->num_pports;
  3541. else if (qib_cfgctxts <= nchipctxts)
  3542. dd->ctxtcnt = qib_cfgctxts;
  3543. if (!dd->ctxtcnt) /* none of the above, set to max */
  3544. dd->ctxtcnt = nchipctxts;
  3545. /*
  3546. * Chip can be configured for 6, 10, or 18 ctxts, and choice
  3547. * affects number of eager TIDs per ctxt (1K, 2K, 4K).
  3548. * Lock to be paranoid about later motion, etc.
  3549. */
  3550. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  3551. if (dd->ctxtcnt > 10)
  3552. dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
  3553. else if (dd->ctxtcnt > 6)
  3554. dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
  3555. /* else configure for default 6 receive ctxts */
  3556. /* The XRC opcode is 5. */
  3557. dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
  3558. /*
  3559. * RcvCtrl *must* be written here so that the
  3560. * chip understands how to change rcvegrcnt below.
  3561. */
  3562. qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
  3563. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  3564. /* kr_rcvegrcnt changes based on the number of contexts enabled */
  3565. dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
  3566. if (qib_rcvhdrcnt)
  3567. dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
  3568. else
  3569. dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
  3570. dd->num_pports > 1 ? 1024U : 2048U);
  3571. }
  3572. static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
  3573. {
  3574. int lsb, ret = 0;
  3575. u64 maskr; /* right-justified mask */
  3576. switch (which) {
  3577. case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
  3578. ret = ppd->link_width_enabled;
  3579. goto done;
  3580. case QIB_IB_CFG_LWID: /* Get currently active Link-width */
  3581. ret = ppd->link_width_active;
  3582. goto done;
  3583. case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
  3584. ret = ppd->link_speed_enabled;
  3585. goto done;
  3586. case QIB_IB_CFG_SPD: /* Get current Link spd */
  3587. ret = ppd->link_speed_active;
  3588. goto done;
  3589. case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
  3590. lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3591. maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3592. break;
  3593. case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
  3594. lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3595. maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3596. break;
  3597. case QIB_IB_CFG_LINKLATENCY:
  3598. ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
  3599. SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
  3600. goto done;
  3601. case QIB_IB_CFG_OP_VLS:
  3602. ret = ppd->vls_operational;
  3603. goto done;
  3604. case QIB_IB_CFG_VL_HIGH_CAP:
  3605. ret = 16;
  3606. goto done;
  3607. case QIB_IB_CFG_VL_LOW_CAP:
  3608. ret = 16;
  3609. goto done;
  3610. case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  3611. ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3612. OverrunThreshold);
  3613. goto done;
  3614. case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  3615. ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3616. PhyerrThreshold);
  3617. goto done;
  3618. case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  3619. /* will only take effect when the link state changes */
  3620. ret = (ppd->cpspec->ibcctrl_a &
  3621. SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
  3622. IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
  3623. goto done;
  3624. case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
  3625. lsb = IBA7322_IBC_HRTBT_LSB;
  3626. maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
  3627. break;
  3628. case QIB_IB_CFG_PMA_TICKS:
  3629. /*
  3630. * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
  3631. * Since the clock is always 250MHz, the value is 3, 1 or 0.
  3632. */
  3633. if (ppd->link_speed_active == QIB_IB_QDR)
  3634. ret = 3;
  3635. else if (ppd->link_speed_active == QIB_IB_DDR)
  3636. ret = 1;
  3637. else
  3638. ret = 0;
  3639. goto done;
  3640. default:
  3641. ret = -EINVAL;
  3642. goto done;
  3643. }
  3644. ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
  3645. done:
  3646. return ret;
  3647. }
  3648. /*
  3649. * Below again cribbed liberally from older version. Do not lean
  3650. * heavily on it.
  3651. */
  3652. #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
  3653. #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
  3654. | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
  3655. static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
  3656. {
  3657. struct qib_devdata *dd = ppd->dd;
  3658. u64 maskr; /* right-justified mask */
  3659. int lsb, ret = 0;
  3660. u16 lcmd, licmd;
  3661. unsigned long flags;
  3662. switch (which) {
  3663. case QIB_IB_CFG_LIDLMC:
  3664. /*
  3665. * Set LID and LMC. Combined to avoid possible hazard
  3666. * caller puts LMC in 16MSbits, DLID in 16LSbits of val
  3667. */
  3668. lsb = IBA7322_IBC_DLIDLMC_SHIFT;
  3669. maskr = IBA7322_IBC_DLIDLMC_MASK;
  3670. /*
  3671. * For header-checking, the SLID in the packet will
  3672. * be masked with SendIBSLMCMask, and compared
  3673. * with SendIBSLIDAssignMask. Make sure we do not
  3674. * set any bits not covered by the mask, or we get
  3675. * false-positives.
  3676. */
  3677. qib_write_kreg_port(ppd, krp_sendslid,
  3678. val & (val >> 16) & SendIBSLIDAssignMask);
  3679. qib_write_kreg_port(ppd, krp_sendslidmask,
  3680. (val >> 16) & SendIBSLMCMask);
  3681. break;
  3682. case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
  3683. ppd->link_width_enabled = val;
  3684. /* convert IB value to chip register value */
  3685. if (val == IB_WIDTH_1X)
  3686. val = 0;
  3687. else if (val == IB_WIDTH_4X)
  3688. val = 1;
  3689. else
  3690. val = 3;
  3691. maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
  3692. lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
  3693. break;
  3694. case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
  3695. /*
  3696. * As with width, only write the actual register if the
  3697. * link is currently down, otherwise takes effect on next
  3698. * link change. Since setting is being explicitly requested
  3699. * (via MAD or sysfs), clear autoneg failure status if speed
  3700. * autoneg is enabled.
  3701. */
  3702. ppd->link_speed_enabled = val;
  3703. val <<= IBA7322_IBC_SPEED_LSB;
  3704. maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
  3705. IBA7322_IBC_MAX_SPEED_MASK;
  3706. if (val & (val - 1)) {
  3707. /* Muliple speeds enabled */
  3708. val |= IBA7322_IBC_IBTA_1_2_MASK |
  3709. IBA7322_IBC_MAX_SPEED_MASK;
  3710. spin_lock_irqsave(&ppd->lflags_lock, flags);
  3711. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  3712. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  3713. } else if (val & IBA7322_IBC_SPEED_QDR)
  3714. val |= IBA7322_IBC_IBTA_1_2_MASK;
  3715. /* IBTA 1.2 mode + min/max + speed bits are contiguous */
  3716. lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
  3717. break;
  3718. case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
  3719. lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3720. maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3721. break;
  3722. case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
  3723. lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3724. maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3725. break;
  3726. case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  3727. maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3728. OverrunThreshold);
  3729. if (maskr != val) {
  3730. ppd->cpspec->ibcctrl_a &=
  3731. ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
  3732. ppd->cpspec->ibcctrl_a |= (u64) val <<
  3733. SYM_LSB(IBCCtrlA_0, OverrunThreshold);
  3734. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3735. ppd->cpspec->ibcctrl_a);
  3736. qib_write_kreg(dd, kr_scratch, 0ULL);
  3737. }
  3738. goto bail;
  3739. case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  3740. maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3741. PhyerrThreshold);
  3742. if (maskr != val) {
  3743. ppd->cpspec->ibcctrl_a &=
  3744. ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
  3745. ppd->cpspec->ibcctrl_a |= (u64) val <<
  3746. SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
  3747. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3748. ppd->cpspec->ibcctrl_a);
  3749. qib_write_kreg(dd, kr_scratch, 0ULL);
  3750. }
  3751. goto bail;
  3752. case QIB_IB_CFG_PKEYS: /* update pkeys */
  3753. maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
  3754. ((u64) ppd->pkeys[2] << 32) |
  3755. ((u64) ppd->pkeys[3] << 48);
  3756. qib_write_kreg_port(ppd, krp_partitionkey, maskr);
  3757. goto bail;
  3758. case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  3759. /* will only take effect when the link state changes */
  3760. if (val == IB_LINKINITCMD_POLL)
  3761. ppd->cpspec->ibcctrl_a &=
  3762. ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
  3763. else /* SLEEP */
  3764. ppd->cpspec->ibcctrl_a |=
  3765. SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
  3766. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  3767. qib_write_kreg(dd, kr_scratch, 0ULL);
  3768. goto bail;
  3769. case QIB_IB_CFG_MTU: /* update the MTU in IBC */
  3770. /*
  3771. * Update our housekeeping variables, and set IBC max
  3772. * size, same as init code; max IBC is max we allow in
  3773. * buffer, less the qword pbc, plus 1 for ICRC, in dwords
  3774. * Set even if it's unchanged, print debug message only
  3775. * on changes.
  3776. */
  3777. val = (ppd->ibmaxlen >> 2) + 1;
  3778. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
  3779. ppd->cpspec->ibcctrl_a |= (u64)val <<
  3780. SYM_LSB(IBCCtrlA_0, MaxPktLen);
  3781. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3782. ppd->cpspec->ibcctrl_a);
  3783. qib_write_kreg(dd, kr_scratch, 0ULL);
  3784. goto bail;
  3785. case QIB_IB_CFG_LSTATE: /* set the IB link state */
  3786. switch (val & 0xffff0000) {
  3787. case IB_LINKCMD_DOWN:
  3788. lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
  3789. ppd->cpspec->ibmalfusesnap = 1;
  3790. ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
  3791. crp_errlink);
  3792. if (!ppd->cpspec->ibdeltainprog &&
  3793. qib_compat_ddr_negotiate) {
  3794. ppd->cpspec->ibdeltainprog = 1;
  3795. ppd->cpspec->ibsymsnap =
  3796. read_7322_creg32_port(ppd,
  3797. crp_ibsymbolerr);
  3798. ppd->cpspec->iblnkerrsnap =
  3799. read_7322_creg32_port(ppd,
  3800. crp_iblinkerrrecov);
  3801. }
  3802. break;
  3803. case IB_LINKCMD_ARMED:
  3804. lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
  3805. if (ppd->cpspec->ibmalfusesnap) {
  3806. ppd->cpspec->ibmalfusesnap = 0;
  3807. ppd->cpspec->ibmalfdelta +=
  3808. read_7322_creg32_port(ppd,
  3809. crp_errlink) -
  3810. ppd->cpspec->ibmalfsnap;
  3811. }
  3812. break;
  3813. case IB_LINKCMD_ACTIVE:
  3814. lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
  3815. break;
  3816. default:
  3817. ret = -EINVAL;
  3818. qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
  3819. goto bail;
  3820. }
  3821. switch (val & 0xffff) {
  3822. case IB_LINKINITCMD_NOP:
  3823. licmd = 0;
  3824. break;
  3825. case IB_LINKINITCMD_POLL:
  3826. licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
  3827. break;
  3828. case IB_LINKINITCMD_SLEEP:
  3829. licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
  3830. break;
  3831. case IB_LINKINITCMD_DISABLE:
  3832. licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
  3833. ppd->cpspec->chase_end = 0;
  3834. /*
  3835. * stop state chase counter and timer, if running.
  3836. * wait forpending timer, but don't clear .data (ppd)!
  3837. */
  3838. if (ppd->cpspec->chase_timer.expires) {
  3839. del_timer_sync(&ppd->cpspec->chase_timer);
  3840. ppd->cpspec->chase_timer.expires = 0;
  3841. }
  3842. break;
  3843. default:
  3844. ret = -EINVAL;
  3845. qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
  3846. val & 0xffff);
  3847. goto bail;
  3848. }
  3849. qib_set_ib_7322_lstate(ppd, lcmd, licmd);
  3850. goto bail;
  3851. case QIB_IB_CFG_OP_VLS:
  3852. if (ppd->vls_operational != val) {
  3853. ppd->vls_operational = val;
  3854. set_vls(ppd);
  3855. }
  3856. goto bail;
  3857. case QIB_IB_CFG_VL_HIGH_LIMIT:
  3858. qib_write_kreg_port(ppd, krp_highprio_limit, val);
  3859. goto bail;
  3860. case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
  3861. if (val > 3) {
  3862. ret = -EINVAL;
  3863. goto bail;
  3864. }
  3865. lsb = IBA7322_IBC_HRTBT_LSB;
  3866. maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
  3867. break;
  3868. case QIB_IB_CFG_PORT:
  3869. /* val is the port number of the switch we are connected to. */
  3870. if (ppd->dd->cspec->r1) {
  3871. cancel_delayed_work(&ppd->cpspec->ipg_work);
  3872. ppd->cpspec->ipg_tries = 0;
  3873. }
  3874. goto bail;
  3875. default:
  3876. ret = -EINVAL;
  3877. goto bail;
  3878. }
  3879. ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
  3880. ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
  3881. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  3882. qib_write_kreg(dd, kr_scratch, 0);
  3883. bail:
  3884. return ret;
  3885. }
  3886. static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
  3887. {
  3888. int ret = 0;
  3889. u64 val, ctrlb;
  3890. /* only IBC loopback, may add serdes and xgxs loopbacks later */
  3891. if (!strncmp(what, "ibc", 3)) {
  3892. ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
  3893. Loopback);
  3894. val = 0; /* disable heart beat, so link will come up */
  3895. qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
  3896. ppd->dd->unit, ppd->port);
  3897. } else if (!strncmp(what, "off", 3)) {
  3898. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
  3899. Loopback);
  3900. /* enable heart beat again */
  3901. val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
  3902. qib_devinfo(ppd->dd->pcidev,
  3903. "Disabling IB%u:%u IBC loopback (normal)\n",
  3904. ppd->dd->unit, ppd->port);
  3905. } else
  3906. ret = -EINVAL;
  3907. if (!ret) {
  3908. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3909. ppd->cpspec->ibcctrl_a);
  3910. ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
  3911. << IBA7322_IBC_HRTBT_LSB);
  3912. ppd->cpspec->ibcctrl_b = ctrlb | val;
  3913. qib_write_kreg_port(ppd, krp_ibcctrl_b,
  3914. ppd->cpspec->ibcctrl_b);
  3915. qib_write_kreg(ppd->dd, kr_scratch, 0);
  3916. }
  3917. return ret;
  3918. }
  3919. static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
  3920. struct ib_vl_weight_elem *vl)
  3921. {
  3922. unsigned i;
  3923. for (i = 0; i < 16; i++, regno++, vl++) {
  3924. u32 val = qib_read_kreg_port(ppd, regno);
  3925. vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
  3926. SYM_RMASK(LowPriority0_0, VirtualLane);
  3927. vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
  3928. SYM_RMASK(LowPriority0_0, Weight);
  3929. }
  3930. }
  3931. static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
  3932. struct ib_vl_weight_elem *vl)
  3933. {
  3934. unsigned i;
  3935. for (i = 0; i < 16; i++, regno++, vl++) {
  3936. u64 val;
  3937. val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
  3938. SYM_LSB(LowPriority0_0, VirtualLane)) |
  3939. ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
  3940. SYM_LSB(LowPriority0_0, Weight));
  3941. qib_write_kreg_port(ppd, regno, val);
  3942. }
  3943. if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
  3944. struct qib_devdata *dd = ppd->dd;
  3945. unsigned long flags;
  3946. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  3947. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
  3948. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  3949. qib_write_kreg(dd, kr_scratch, 0);
  3950. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  3951. }
  3952. }
  3953. static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
  3954. {
  3955. switch (which) {
  3956. case QIB_IB_TBL_VL_HIGH_ARB:
  3957. get_vl_weights(ppd, krp_highprio_0, t);
  3958. break;
  3959. case QIB_IB_TBL_VL_LOW_ARB:
  3960. get_vl_weights(ppd, krp_lowprio_0, t);
  3961. break;
  3962. default:
  3963. return -EINVAL;
  3964. }
  3965. return 0;
  3966. }
  3967. static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
  3968. {
  3969. switch (which) {
  3970. case QIB_IB_TBL_VL_HIGH_ARB:
  3971. set_vl_weights(ppd, krp_highprio_0, t);
  3972. break;
  3973. case QIB_IB_TBL_VL_LOW_ARB:
  3974. set_vl_weights(ppd, krp_lowprio_0, t);
  3975. break;
  3976. default:
  3977. return -EINVAL;
  3978. }
  3979. return 0;
  3980. }
  3981. static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
  3982. u32 updegr, u32 egrhd, u32 npkts)
  3983. {
  3984. /*
  3985. * Need to write timeout register before updating rcvhdrhead to ensure
  3986. * that the timer is enabled on reception of a packet.
  3987. */
  3988. if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
  3989. adjust_rcv_timeout(rcd, npkts);
  3990. if (updegr)
  3991. qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
  3992. mmiowb();
  3993. qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
  3994. qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
  3995. mmiowb();
  3996. }
  3997. static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
  3998. {
  3999. u32 head, tail;
  4000. head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
  4001. if (rcd->rcvhdrtail_kvaddr)
  4002. tail = qib_get_rcvhdrtail(rcd);
  4003. else
  4004. tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
  4005. return head == tail;
  4006. }
  4007. #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
  4008. QIB_RCVCTRL_CTXT_DIS | \
  4009. QIB_RCVCTRL_TIDFLOW_ENB | \
  4010. QIB_RCVCTRL_TIDFLOW_DIS | \
  4011. QIB_RCVCTRL_TAILUPD_ENB | \
  4012. QIB_RCVCTRL_TAILUPD_DIS | \
  4013. QIB_RCVCTRL_INTRAVAIL_ENB | \
  4014. QIB_RCVCTRL_INTRAVAIL_DIS | \
  4015. QIB_RCVCTRL_BP_ENB | \
  4016. QIB_RCVCTRL_BP_DIS)
  4017. #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
  4018. QIB_RCVCTRL_CTXT_DIS | \
  4019. QIB_RCVCTRL_PKEY_DIS | \
  4020. QIB_RCVCTRL_PKEY_ENB)
  4021. /*
  4022. * Modify the RCVCTRL register in chip-specific way. This
  4023. * is a function because bit positions and (future) register
  4024. * location is chip-specifc, but the needed operations are
  4025. * generic. <op> is a bit-mask because we often want to
  4026. * do multiple modifications.
  4027. */
  4028. static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
  4029. int ctxt)
  4030. {
  4031. struct qib_devdata *dd = ppd->dd;
  4032. struct qib_ctxtdata *rcd;
  4033. u64 mask, val;
  4034. unsigned long flags;
  4035. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  4036. if (op & QIB_RCVCTRL_TIDFLOW_ENB)
  4037. dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
  4038. if (op & QIB_RCVCTRL_TIDFLOW_DIS)
  4039. dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
  4040. if (op & QIB_RCVCTRL_TAILUPD_ENB)
  4041. dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
  4042. if (op & QIB_RCVCTRL_TAILUPD_DIS)
  4043. dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
  4044. if (op & QIB_RCVCTRL_PKEY_ENB)
  4045. ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
  4046. if (op & QIB_RCVCTRL_PKEY_DIS)
  4047. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
  4048. if (ctxt < 0) {
  4049. mask = (1ULL << dd->ctxtcnt) - 1;
  4050. rcd = NULL;
  4051. } else {
  4052. mask = (1ULL << ctxt);
  4053. rcd = dd->rcd[ctxt];
  4054. }
  4055. if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
  4056. ppd->p_rcvctrl |=
  4057. (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
  4058. if (!(dd->flags & QIB_NODMA_RTAIL)) {
  4059. op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
  4060. dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
  4061. }
  4062. /* Write these registers before the context is enabled. */
  4063. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
  4064. rcd->rcvhdrqtailaddr_phys);
  4065. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
  4066. rcd->rcvhdrq_phys);
  4067. rcd->seq_cnt = 1;
  4068. }
  4069. if (op & QIB_RCVCTRL_CTXT_DIS)
  4070. ppd->p_rcvctrl &=
  4071. ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
  4072. if (op & QIB_RCVCTRL_BP_ENB)
  4073. dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
  4074. if (op & QIB_RCVCTRL_BP_DIS)
  4075. dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
  4076. if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
  4077. dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
  4078. if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
  4079. dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
  4080. /*
  4081. * Decide which registers to write depending on the ops enabled.
  4082. * Special case is "flush" (no bits set at all)
  4083. * which needs to write both.
  4084. */
  4085. if (op == 0 || (op & RCVCTRL_COMMON_MODS))
  4086. qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
  4087. if (op == 0 || (op & RCVCTRL_PORT_MODS))
  4088. qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
  4089. if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
  4090. /*
  4091. * Init the context registers also; if we were
  4092. * disabled, tail and head should both be zero
  4093. * already from the enable, but since we don't
  4094. * know, we have to do it explicitly.
  4095. */
  4096. val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
  4097. qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
  4098. /* be sure enabling write seen; hd/tl should be 0 */
  4099. (void) qib_read_kreg32(dd, kr_scratch);
  4100. val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
  4101. dd->rcd[ctxt]->head = val;
  4102. /* If kctxt, interrupt on next receive. */
  4103. if (ctxt < dd->first_user_ctxt)
  4104. val |= dd->rhdrhead_intr_off;
  4105. qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
  4106. } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
  4107. dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
  4108. /* arm rcv interrupt */
  4109. val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
  4110. qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
  4111. }
  4112. if (op & QIB_RCVCTRL_CTXT_DIS) {
  4113. unsigned f;
  4114. /* Now that the context is disabled, clear these registers. */
  4115. if (ctxt >= 0) {
  4116. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
  4117. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
  4118. for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
  4119. qib_write_ureg(dd, ur_rcvflowtable + f,
  4120. TIDFLOW_ERRBITS, ctxt);
  4121. } else {
  4122. unsigned i;
  4123. for (i = 0; i < dd->cfgctxts; i++) {
  4124. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
  4125. i, 0);
  4126. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
  4127. for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
  4128. qib_write_ureg(dd, ur_rcvflowtable + f,
  4129. TIDFLOW_ERRBITS, i);
  4130. }
  4131. }
  4132. }
  4133. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  4134. }
  4135. /*
  4136. * Modify the SENDCTRL register in chip-specific way. This
  4137. * is a function where there are multiple such registers with
  4138. * slightly different layouts.
  4139. * The chip doesn't allow back-to-back sendctrl writes, so write
  4140. * the scratch register after writing sendctrl.
  4141. *
  4142. * Which register is written depends on the operation.
  4143. * Most operate on the common register, while
  4144. * SEND_ENB and SEND_DIS operate on the per-port ones.
  4145. * SEND_ENB is included in common because it can change SPCL_TRIG
  4146. */
  4147. #define SENDCTRL_COMMON_MODS (\
  4148. QIB_SENDCTRL_CLEAR | \
  4149. QIB_SENDCTRL_AVAIL_DIS | \
  4150. QIB_SENDCTRL_AVAIL_ENB | \
  4151. QIB_SENDCTRL_AVAIL_BLIP | \
  4152. QIB_SENDCTRL_DISARM | \
  4153. QIB_SENDCTRL_DISARM_ALL | \
  4154. QIB_SENDCTRL_SEND_ENB)
  4155. #define SENDCTRL_PORT_MODS (\
  4156. QIB_SENDCTRL_CLEAR | \
  4157. QIB_SENDCTRL_SEND_ENB | \
  4158. QIB_SENDCTRL_SEND_DIS | \
  4159. QIB_SENDCTRL_FLUSH)
  4160. static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
  4161. {
  4162. struct qib_devdata *dd = ppd->dd;
  4163. u64 tmp_dd_sendctrl;
  4164. unsigned long flags;
  4165. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  4166. /* First the dd ones that are "sticky", saved in shadow */
  4167. if (op & QIB_SENDCTRL_CLEAR)
  4168. dd->sendctrl = 0;
  4169. if (op & QIB_SENDCTRL_AVAIL_DIS)
  4170. dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  4171. else if (op & QIB_SENDCTRL_AVAIL_ENB) {
  4172. dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
  4173. if (dd->flags & QIB_USE_SPCL_TRIG)
  4174. dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
  4175. }
  4176. /* Then the ppd ones that are "sticky", saved in shadow */
  4177. if (op & QIB_SENDCTRL_SEND_DIS)
  4178. ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
  4179. else if (op & QIB_SENDCTRL_SEND_ENB)
  4180. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
  4181. if (op & QIB_SENDCTRL_DISARM_ALL) {
  4182. u32 i, last;
  4183. tmp_dd_sendctrl = dd->sendctrl;
  4184. last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  4185. /*
  4186. * Disarm any buffers that are not yet launched,
  4187. * disabling updates until done.
  4188. */
  4189. tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  4190. for (i = 0; i < last; i++) {
  4191. qib_write_kreg(dd, kr_sendctrl,
  4192. tmp_dd_sendctrl |
  4193. SYM_MASK(SendCtrl, Disarm) | i);
  4194. qib_write_kreg(dd, kr_scratch, 0);
  4195. }
  4196. }
  4197. if (op & QIB_SENDCTRL_FLUSH) {
  4198. u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
  4199. /*
  4200. * Now drain all the fifos. The Abort bit should never be
  4201. * needed, so for now, at least, we don't use it.
  4202. */
  4203. tmp_ppd_sendctrl |=
  4204. SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
  4205. SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
  4206. SYM_MASK(SendCtrl_0, TxeBypassIbc);
  4207. qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
  4208. qib_write_kreg(dd, kr_scratch, 0);
  4209. }
  4210. tmp_dd_sendctrl = dd->sendctrl;
  4211. if (op & QIB_SENDCTRL_DISARM)
  4212. tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
  4213. ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
  4214. SYM_LSB(SendCtrl, DisarmSendBuf));
  4215. if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
  4216. (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
  4217. tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  4218. if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
  4219. qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
  4220. qib_write_kreg(dd, kr_scratch, 0);
  4221. }
  4222. if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
  4223. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  4224. qib_write_kreg(dd, kr_scratch, 0);
  4225. }
  4226. if (op & QIB_SENDCTRL_AVAIL_BLIP) {
  4227. qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
  4228. qib_write_kreg(dd, kr_scratch, 0);
  4229. }
  4230. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  4231. if (op & QIB_SENDCTRL_FLUSH) {
  4232. u32 v;
  4233. /*
  4234. * ensure writes have hit chip, then do a few
  4235. * more reads, to allow DMA of pioavail registers
  4236. * to occur, so in-memory copy is in sync with
  4237. * the chip. Not always safe to sleep.
  4238. */
  4239. v = qib_read_kreg32(dd, kr_scratch);
  4240. qib_write_kreg(dd, kr_scratch, v);
  4241. v = qib_read_kreg32(dd, kr_scratch);
  4242. qib_write_kreg(dd, kr_scratch, v);
  4243. qib_read_kreg32(dd, kr_scratch);
  4244. }
  4245. }
  4246. #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
  4247. #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
  4248. #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
  4249. /**
  4250. * qib_portcntr_7322 - read a per-port chip counter
  4251. * @ppd: the qlogic_ib pport
  4252. * @creg: the counter to read (not a chip offset)
  4253. */
  4254. static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
  4255. {
  4256. struct qib_devdata *dd = ppd->dd;
  4257. u64 ret = 0ULL;
  4258. u16 creg;
  4259. /* 0xffff for unimplemented or synthesized counters */
  4260. static const u32 xlator[] = {
  4261. [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
  4262. [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
  4263. [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
  4264. [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
  4265. [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
  4266. [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
  4267. [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
  4268. [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
  4269. [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
  4270. [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
  4271. [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
  4272. [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
  4273. [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
  4274. [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
  4275. [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
  4276. [QIBPORTCNTR_ERRICRC] = crp_erricrc,
  4277. [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
  4278. [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
  4279. [QIBPORTCNTR_BADFORMAT] = crp_badformat,
  4280. [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
  4281. [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
  4282. [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
  4283. [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
  4284. [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
  4285. [QIBPORTCNTR_ERRLINK] = crp_errlink,
  4286. [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
  4287. [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
  4288. [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
  4289. [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
  4290. [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
  4291. /*
  4292. * the next 3 aren't really counters, but were implemented
  4293. * as counters in older chips, so still get accessed as
  4294. * though they were counters from this code.
  4295. */
  4296. [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
  4297. [QIBPORTCNTR_PSSTART] = krp_psstart,
  4298. [QIBPORTCNTR_PSSTAT] = krp_psstat,
  4299. /* pseudo-counter, summed for all ports */
  4300. [QIBPORTCNTR_KHDROVFL] = 0xffff,
  4301. };
  4302. if (reg >= ARRAY_SIZE(xlator)) {
  4303. qib_devinfo(ppd->dd->pcidev,
  4304. "Unimplemented portcounter %u\n", reg);
  4305. goto done;
  4306. }
  4307. creg = xlator[reg] & _PORT_CNTR_IDXMASK;
  4308. /* handle non-counters and special cases first */
  4309. if (reg == QIBPORTCNTR_KHDROVFL) {
  4310. int i;
  4311. /* sum over all kernel contexts (skip if mini_init) */
  4312. for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
  4313. struct qib_ctxtdata *rcd = dd->rcd[i];
  4314. if (!rcd || rcd->ppd != ppd)
  4315. continue;
  4316. ret += read_7322_creg32(dd, cr_base_egrovfl + i);
  4317. }
  4318. goto done;
  4319. } else if (reg == QIBPORTCNTR_RXDROPPKT) {
  4320. /*
  4321. * Used as part of the synthesis of port_rcv_errors
  4322. * in the verbs code for IBTA counters. Not needed for 7322,
  4323. * because all the errors are already counted by other cntrs.
  4324. */
  4325. goto done;
  4326. } else if (reg == QIBPORTCNTR_PSINTERVAL ||
  4327. reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
  4328. /* were counters in older chips, now per-port kernel regs */
  4329. ret = qib_read_kreg_port(ppd, creg);
  4330. goto done;
  4331. }
  4332. /*
  4333. * Only fast increment counters are 64 bits; use 32 bit reads to
  4334. * avoid two independent reads when on Opteron.
  4335. */
  4336. if (xlator[reg] & _PORT_64BIT_FLAG)
  4337. ret = read_7322_creg_port(ppd, creg);
  4338. else
  4339. ret = read_7322_creg32_port(ppd, creg);
  4340. if (creg == crp_ibsymbolerr) {
  4341. if (ppd->cpspec->ibdeltainprog)
  4342. ret -= ret - ppd->cpspec->ibsymsnap;
  4343. ret -= ppd->cpspec->ibsymdelta;
  4344. } else if (creg == crp_iblinkerrrecov) {
  4345. if (ppd->cpspec->ibdeltainprog)
  4346. ret -= ret - ppd->cpspec->iblnkerrsnap;
  4347. ret -= ppd->cpspec->iblnkerrdelta;
  4348. } else if (creg == crp_errlink)
  4349. ret -= ppd->cpspec->ibmalfdelta;
  4350. else if (creg == crp_iblinkdown)
  4351. ret += ppd->cpspec->iblnkdowndelta;
  4352. done:
  4353. return ret;
  4354. }
  4355. /*
  4356. * Device counter names (not port-specific), one line per stat,
  4357. * single string. Used by utilities like ipathstats to print the stats
  4358. * in a way which works for different versions of drivers, without changing
  4359. * the utility. Names need to be 12 chars or less (w/o newline), for proper
  4360. * display by utility.
  4361. * Non-error counters are first.
  4362. * Start of "error" conters is indicated by a leading "E " on the first
  4363. * "error" counter, and doesn't count in label length.
  4364. * The EgrOvfl list needs to be last so we truncate them at the configured
  4365. * context count for the device.
  4366. * cntr7322indices contains the corresponding register indices.
  4367. */
  4368. static const char cntr7322names[] =
  4369. "Interrupts\n"
  4370. "HostBusStall\n"
  4371. "E RxTIDFull\n"
  4372. "RxTIDInvalid\n"
  4373. "RxTIDFloDrop\n" /* 7322 only */
  4374. "Ctxt0EgrOvfl\n"
  4375. "Ctxt1EgrOvfl\n"
  4376. "Ctxt2EgrOvfl\n"
  4377. "Ctxt3EgrOvfl\n"
  4378. "Ctxt4EgrOvfl\n"
  4379. "Ctxt5EgrOvfl\n"
  4380. "Ctxt6EgrOvfl\n"
  4381. "Ctxt7EgrOvfl\n"
  4382. "Ctxt8EgrOvfl\n"
  4383. "Ctxt9EgrOvfl\n"
  4384. "Ctx10EgrOvfl\n"
  4385. "Ctx11EgrOvfl\n"
  4386. "Ctx12EgrOvfl\n"
  4387. "Ctx13EgrOvfl\n"
  4388. "Ctx14EgrOvfl\n"
  4389. "Ctx15EgrOvfl\n"
  4390. "Ctx16EgrOvfl\n"
  4391. "Ctx17EgrOvfl\n"
  4392. ;
  4393. static const u32 cntr7322indices[] = {
  4394. cr_lbint | _PORT_64BIT_FLAG,
  4395. cr_lbstall | _PORT_64BIT_FLAG,
  4396. cr_tidfull,
  4397. cr_tidinvalid,
  4398. cr_rxtidflowdrop,
  4399. cr_base_egrovfl + 0,
  4400. cr_base_egrovfl + 1,
  4401. cr_base_egrovfl + 2,
  4402. cr_base_egrovfl + 3,
  4403. cr_base_egrovfl + 4,
  4404. cr_base_egrovfl + 5,
  4405. cr_base_egrovfl + 6,
  4406. cr_base_egrovfl + 7,
  4407. cr_base_egrovfl + 8,
  4408. cr_base_egrovfl + 9,
  4409. cr_base_egrovfl + 10,
  4410. cr_base_egrovfl + 11,
  4411. cr_base_egrovfl + 12,
  4412. cr_base_egrovfl + 13,
  4413. cr_base_egrovfl + 14,
  4414. cr_base_egrovfl + 15,
  4415. cr_base_egrovfl + 16,
  4416. cr_base_egrovfl + 17,
  4417. };
  4418. /*
  4419. * same as cntr7322names and cntr7322indices, but for port-specific counters.
  4420. * portcntr7322indices is somewhat complicated by some registers needing
  4421. * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
  4422. */
  4423. static const char portcntr7322names[] =
  4424. "TxPkt\n"
  4425. "TxFlowPkt\n"
  4426. "TxWords\n"
  4427. "RxPkt\n"
  4428. "RxFlowPkt\n"
  4429. "RxWords\n"
  4430. "TxFlowStall\n"
  4431. "TxDmaDesc\n" /* 7220 and 7322-only */
  4432. "E RxDlidFltr\n" /* 7220 and 7322-only */
  4433. "IBStatusChng\n"
  4434. "IBLinkDown\n"
  4435. "IBLnkRecov\n"
  4436. "IBRxLinkErr\n"
  4437. "IBSymbolErr\n"
  4438. "RxLLIErr\n"
  4439. "RxBadFormat\n"
  4440. "RxBadLen\n"
  4441. "RxBufOvrfl\n"
  4442. "RxEBP\n"
  4443. "RxFlowCtlErr\n"
  4444. "RxICRCerr\n"
  4445. "RxLPCRCerr\n"
  4446. "RxVCRCerr\n"
  4447. "RxInvalLen\n"
  4448. "RxInvalPKey\n"
  4449. "RxPktDropped\n"
  4450. "TxBadLength\n"
  4451. "TxDropped\n"
  4452. "TxInvalLen\n"
  4453. "TxUnderrun\n"
  4454. "TxUnsupVL\n"
  4455. "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
  4456. "RxVL15Drop\n"
  4457. "RxVlErr\n"
  4458. "XcessBufOvfl\n"
  4459. "RxQPBadCtxt\n" /* 7322-only from here down */
  4460. "TXBadHeader\n"
  4461. ;
  4462. static const u32 portcntr7322indices[] = {
  4463. QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
  4464. crp_pktsendflow,
  4465. QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
  4466. QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
  4467. crp_pktrcvflowctrl,
  4468. QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
  4469. QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
  4470. crp_txsdmadesc | _PORT_64BIT_FLAG,
  4471. crp_rxdlidfltr,
  4472. crp_ibstatuschange,
  4473. QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
  4474. QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
  4475. QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
  4476. QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
  4477. QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
  4478. QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
  4479. QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
  4480. QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
  4481. QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
  4482. crp_rcvflowctrlviol,
  4483. QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
  4484. QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
  4485. QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
  4486. QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
  4487. QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
  4488. QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
  4489. crp_txminmaxlenerr,
  4490. crp_txdroppedpkt,
  4491. crp_txlenerr,
  4492. crp_txunderrun,
  4493. crp_txunsupvl,
  4494. QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
  4495. QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
  4496. QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
  4497. QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
  4498. crp_rxqpinvalidctxt,
  4499. crp_txhdrerr,
  4500. };
  4501. /* do all the setup to make the counter reads efficient later */
  4502. static void init_7322_cntrnames(struct qib_devdata *dd)
  4503. {
  4504. int i, j = 0;
  4505. char *s;
  4506. for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
  4507. i++) {
  4508. /* we always have at least one counter before the egrovfl */
  4509. if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
  4510. j = 1;
  4511. s = strchr(s + 1, '\n');
  4512. if (s && j)
  4513. j++;
  4514. }
  4515. dd->cspec->ncntrs = i;
  4516. if (!s)
  4517. /* full list; size is without terminating null */
  4518. dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
  4519. else
  4520. dd->cspec->cntrnamelen = 1 + s - cntr7322names;
  4521. dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
  4522. * sizeof(u64), GFP_KERNEL);
  4523. if (!dd->cspec->cntrs)
  4524. qib_dev_err(dd, "Failed allocation for counters\n");
  4525. for (i = 0, s = (char *)portcntr7322names; s; i++)
  4526. s = strchr(s + 1, '\n');
  4527. dd->cspec->nportcntrs = i - 1;
  4528. dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
  4529. for (i = 0; i < dd->num_pports; ++i) {
  4530. dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
  4531. * sizeof(u64), GFP_KERNEL);
  4532. if (!dd->pport[i].cpspec->portcntrs)
  4533. qib_dev_err(dd,
  4534. "Failed allocation for portcounters\n");
  4535. }
  4536. }
  4537. static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
  4538. u64 **cntrp)
  4539. {
  4540. u32 ret;
  4541. if (namep) {
  4542. ret = dd->cspec->cntrnamelen;
  4543. if (pos >= ret)
  4544. ret = 0; /* final read after getting everything */
  4545. else
  4546. *namep = (char *) cntr7322names;
  4547. } else {
  4548. u64 *cntr = dd->cspec->cntrs;
  4549. int i;
  4550. ret = dd->cspec->ncntrs * sizeof(u64);
  4551. if (!cntr || pos >= ret) {
  4552. /* everything read, or couldn't get memory */
  4553. ret = 0;
  4554. goto done;
  4555. }
  4556. *cntrp = cntr;
  4557. for (i = 0; i < dd->cspec->ncntrs; i++)
  4558. if (cntr7322indices[i] & _PORT_64BIT_FLAG)
  4559. *cntr++ = read_7322_creg(dd,
  4560. cntr7322indices[i] &
  4561. _PORT_CNTR_IDXMASK);
  4562. else
  4563. *cntr++ = read_7322_creg32(dd,
  4564. cntr7322indices[i]);
  4565. }
  4566. done:
  4567. return ret;
  4568. }
  4569. static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
  4570. char **namep, u64 **cntrp)
  4571. {
  4572. u32 ret;
  4573. if (namep) {
  4574. ret = dd->cspec->portcntrnamelen;
  4575. if (pos >= ret)
  4576. ret = 0; /* final read after getting everything */
  4577. else
  4578. *namep = (char *)portcntr7322names;
  4579. } else {
  4580. struct qib_pportdata *ppd = &dd->pport[port];
  4581. u64 *cntr = ppd->cpspec->portcntrs;
  4582. int i;
  4583. ret = dd->cspec->nportcntrs * sizeof(u64);
  4584. if (!cntr || pos >= ret) {
  4585. /* everything read, or couldn't get memory */
  4586. ret = 0;
  4587. goto done;
  4588. }
  4589. *cntrp = cntr;
  4590. for (i = 0; i < dd->cspec->nportcntrs; i++) {
  4591. if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
  4592. *cntr++ = qib_portcntr_7322(ppd,
  4593. portcntr7322indices[i] &
  4594. _PORT_CNTR_IDXMASK);
  4595. else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
  4596. *cntr++ = read_7322_creg_port(ppd,
  4597. portcntr7322indices[i] &
  4598. _PORT_CNTR_IDXMASK);
  4599. else
  4600. *cntr++ = read_7322_creg32_port(ppd,
  4601. portcntr7322indices[i]);
  4602. }
  4603. }
  4604. done:
  4605. return ret;
  4606. }
  4607. /**
  4608. * qib_get_7322_faststats - get word counters from chip before they overflow
  4609. * @opaque - contains a pointer to the qlogic_ib device qib_devdata
  4610. *
  4611. * VESTIGIAL IBA7322 has no "small fast counters", so the only
  4612. * real purpose of this function is to maintain the notion of
  4613. * "active time", which in turn is only logged into the eeprom,
  4614. * which we don;t have, yet, for 7322-based boards.
  4615. *
  4616. * called from add_timer
  4617. */
  4618. static void qib_get_7322_faststats(unsigned long opaque)
  4619. {
  4620. struct qib_devdata *dd = (struct qib_devdata *) opaque;
  4621. struct qib_pportdata *ppd;
  4622. unsigned long flags;
  4623. u64 traffic_wds;
  4624. int pidx;
  4625. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  4626. ppd = dd->pport + pidx;
  4627. /*
  4628. * If port isn't enabled or not operational ports, or
  4629. * diags is running (can cause memory diags to fail)
  4630. * skip this port this time.
  4631. */
  4632. if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
  4633. || dd->diag_client)
  4634. continue;
  4635. /*
  4636. * Maintain an activity timer, based on traffic
  4637. * exceeding a threshold, so we need to check the word-counts
  4638. * even if they are 64-bit.
  4639. */
  4640. traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
  4641. qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
  4642. spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
  4643. traffic_wds -= ppd->dd->traffic_wds;
  4644. ppd->dd->traffic_wds += traffic_wds;
  4645. if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
  4646. atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
  4647. spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
  4648. if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
  4649. QIB_IB_QDR) &&
  4650. (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
  4651. QIBL_LINKACTIVE)) &&
  4652. ppd->cpspec->qdr_dfe_time &&
  4653. time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
  4654. ppd->cpspec->qdr_dfe_on = 0;
  4655. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  4656. ppd->dd->cspec->r1 ?
  4657. QDR_STATIC_ADAPT_INIT_R1 :
  4658. QDR_STATIC_ADAPT_INIT);
  4659. force_h1(ppd);
  4660. }
  4661. }
  4662. mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
  4663. }
  4664. /*
  4665. * If we were using MSIx, try to fallback to INTx.
  4666. */
  4667. static int qib_7322_intr_fallback(struct qib_devdata *dd)
  4668. {
  4669. if (!dd->cspec->num_msix_entries)
  4670. return 0; /* already using INTx */
  4671. qib_devinfo(dd->pcidev,
  4672. "MSIx interrupt not detected, trying INTx interrupts\n");
  4673. qib_7322_nomsix(dd);
  4674. qib_enable_intx(dd->pcidev);
  4675. qib_setup_7322_interrupt(dd, 0);
  4676. return 1;
  4677. }
  4678. /*
  4679. * Reset the XGXS (between serdes and IBC). Slightly less intrusive
  4680. * than resetting the IBC or external link state, and useful in some
  4681. * cases to cause some retraining. To do this right, we reset IBC
  4682. * as well, then return to previous state (which may be still in reset)
  4683. * NOTE: some callers of this "know" this writes the current value
  4684. * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
  4685. * check all callers.
  4686. */
  4687. static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
  4688. {
  4689. u64 val;
  4690. struct qib_devdata *dd = ppd->dd;
  4691. const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
  4692. SYM_MASK(IBPCSConfig_0, xcv_treset) |
  4693. SYM_MASK(IBPCSConfig_0, tx_rx_reset);
  4694. val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
  4695. qib_write_kreg(dd, kr_hwerrmask,
  4696. dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
  4697. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  4698. ppd->cpspec->ibcctrl_a &
  4699. ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
  4700. qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
  4701. qib_read_kreg32(dd, kr_scratch);
  4702. qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
  4703. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  4704. qib_write_kreg(dd, kr_scratch, 0ULL);
  4705. qib_write_kreg(dd, kr_hwerrclear,
  4706. SYM_MASK(HwErrClear, statusValidNoEopClear));
  4707. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  4708. }
  4709. /*
  4710. * This code for non-IBTA-compliant IB speed negotiation is only known to
  4711. * work for the SDR to DDR transition, and only between an HCA and a switch
  4712. * with recent firmware. It is based on observed heuristics, rather than
  4713. * actual knowledge of the non-compliant speed negotiation.
  4714. * It has a number of hard-coded fields, since the hope is to rewrite this
  4715. * when a spec is available on how the negoation is intended to work.
  4716. */
  4717. static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
  4718. u32 dcnt, u32 *data)
  4719. {
  4720. int i;
  4721. u64 pbc;
  4722. u32 __iomem *piobuf;
  4723. u32 pnum, control, len;
  4724. struct qib_devdata *dd = ppd->dd;
  4725. i = 0;
  4726. len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
  4727. control = qib_7322_setpbc_control(ppd, len, 0, 15);
  4728. pbc = ((u64) control << 32) | len;
  4729. while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
  4730. if (i++ > 15)
  4731. return;
  4732. udelay(2);
  4733. }
  4734. /* disable header check on this packet, since it can't be valid */
  4735. dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
  4736. writeq(pbc, piobuf);
  4737. qib_flush_wc();
  4738. qib_pio_copy(piobuf + 2, hdr, 7);
  4739. qib_pio_copy(piobuf + 9, data, dcnt);
  4740. if (dd->flags & QIB_USE_SPCL_TRIG) {
  4741. u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
  4742. qib_flush_wc();
  4743. __raw_writel(0xaebecede, piobuf + spcl_off);
  4744. }
  4745. qib_flush_wc();
  4746. qib_sendbuf_done(dd, pnum);
  4747. /* and re-enable hdr check */
  4748. dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
  4749. }
  4750. /*
  4751. * _start packet gets sent twice at start, _done gets sent twice at end
  4752. */
  4753. static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
  4754. {
  4755. struct qib_devdata *dd = ppd->dd;
  4756. static u32 swapped;
  4757. u32 dw, i, hcnt, dcnt, *data;
  4758. static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
  4759. static u32 madpayload_start[0x40] = {
  4760. 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
  4761. 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
  4762. 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
  4763. };
  4764. static u32 madpayload_done[0x40] = {
  4765. 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
  4766. 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
  4767. 0x40000001, 0x1388, 0x15e, /* rest 0's */
  4768. };
  4769. dcnt = ARRAY_SIZE(madpayload_start);
  4770. hcnt = ARRAY_SIZE(hdr);
  4771. if (!swapped) {
  4772. /* for maintainability, do it at runtime */
  4773. for (i = 0; i < hcnt; i++) {
  4774. dw = (__force u32) cpu_to_be32(hdr[i]);
  4775. hdr[i] = dw;
  4776. }
  4777. for (i = 0; i < dcnt; i++) {
  4778. dw = (__force u32) cpu_to_be32(madpayload_start[i]);
  4779. madpayload_start[i] = dw;
  4780. dw = (__force u32) cpu_to_be32(madpayload_done[i]);
  4781. madpayload_done[i] = dw;
  4782. }
  4783. swapped = 1;
  4784. }
  4785. data = which ? madpayload_done : madpayload_start;
  4786. autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
  4787. qib_read_kreg64(dd, kr_scratch);
  4788. udelay(2);
  4789. autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
  4790. qib_read_kreg64(dd, kr_scratch);
  4791. udelay(2);
  4792. }
  4793. /*
  4794. * Do the absolute minimum to cause an IB speed change, and make it
  4795. * ready, but don't actually trigger the change. The caller will
  4796. * do that when ready (if link is in Polling training state, it will
  4797. * happen immediately, otherwise when link next goes down)
  4798. *
  4799. * This routine should only be used as part of the DDR autonegotation
  4800. * code for devices that are not compliant with IB 1.2 (or code that
  4801. * fixes things up for same).
  4802. *
  4803. * When link has gone down, and autoneg enabled, or autoneg has
  4804. * failed and we give up until next time we set both speeds, and
  4805. * then we want IBTA enabled as well as "use max enabled speed.
  4806. */
  4807. static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
  4808. {
  4809. u64 newctrlb;
  4810. newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
  4811. IBA7322_IBC_IBTA_1_2_MASK |
  4812. IBA7322_IBC_MAX_SPEED_MASK);
  4813. if (speed & (speed - 1)) /* multiple speeds */
  4814. newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
  4815. IBA7322_IBC_IBTA_1_2_MASK |
  4816. IBA7322_IBC_MAX_SPEED_MASK;
  4817. else
  4818. newctrlb |= speed == QIB_IB_QDR ?
  4819. IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
  4820. ((speed == QIB_IB_DDR ?
  4821. IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
  4822. if (newctrlb == ppd->cpspec->ibcctrl_b)
  4823. return;
  4824. ppd->cpspec->ibcctrl_b = newctrlb;
  4825. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  4826. qib_write_kreg(ppd->dd, kr_scratch, 0);
  4827. }
  4828. /*
  4829. * This routine is only used when we are not talking to another
  4830. * IB 1.2-compliant device that we think can do DDR.
  4831. * (This includes all existing switch chips as of Oct 2007.)
  4832. * 1.2-compliant devices go directly to DDR prior to reaching INIT
  4833. */
  4834. static void try_7322_autoneg(struct qib_pportdata *ppd)
  4835. {
  4836. unsigned long flags;
  4837. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4838. ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
  4839. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4840. qib_autoneg_7322_send(ppd, 0);
  4841. set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
  4842. qib_7322_mini_pcs_reset(ppd);
  4843. /* 2 msec is minimum length of a poll cycle */
  4844. queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
  4845. msecs_to_jiffies(2));
  4846. }
  4847. /*
  4848. * Handle the empirically determined mechanism for auto-negotiation
  4849. * of DDR speed with switches.
  4850. */
  4851. static void autoneg_7322_work(struct work_struct *work)
  4852. {
  4853. struct qib_pportdata *ppd;
  4854. struct qib_devdata *dd;
  4855. u64 startms;
  4856. u32 i;
  4857. unsigned long flags;
  4858. ppd = container_of(work, struct qib_chippport_specific,
  4859. autoneg_work.work)->ppd;
  4860. dd = ppd->dd;
  4861. startms = jiffies_to_msecs(jiffies);
  4862. /*
  4863. * Busy wait for this first part, it should be at most a
  4864. * few hundred usec, since we scheduled ourselves for 2msec.
  4865. */
  4866. for (i = 0; i < 25; i++) {
  4867. if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
  4868. == IB_7322_LT_STATE_POLLQUIET) {
  4869. qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
  4870. break;
  4871. }
  4872. udelay(100);
  4873. }
  4874. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
  4875. goto done; /* we got there early or told to stop */
  4876. /* we expect this to timeout */
  4877. if (wait_event_timeout(ppd->cpspec->autoneg_wait,
  4878. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4879. msecs_to_jiffies(90)))
  4880. goto done;
  4881. qib_7322_mini_pcs_reset(ppd);
  4882. /* we expect this to timeout */
  4883. if (wait_event_timeout(ppd->cpspec->autoneg_wait,
  4884. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4885. msecs_to_jiffies(1700)))
  4886. goto done;
  4887. qib_7322_mini_pcs_reset(ppd);
  4888. set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
  4889. /*
  4890. * Wait up to 250 msec for link to train and get to INIT at DDR;
  4891. * this should terminate early.
  4892. */
  4893. wait_event_timeout(ppd->cpspec->autoneg_wait,
  4894. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4895. msecs_to_jiffies(250));
  4896. done:
  4897. if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
  4898. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4899. ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
  4900. if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
  4901. ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
  4902. ppd->cpspec->autoneg_tries = 0;
  4903. }
  4904. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4905. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  4906. }
  4907. }
  4908. /*
  4909. * This routine is used to request IPG set in the QLogic switch.
  4910. * Only called if r1.
  4911. */
  4912. static void try_7322_ipg(struct qib_pportdata *ppd)
  4913. {
  4914. struct qib_ibport *ibp = &ppd->ibport_data;
  4915. struct ib_mad_send_buf *send_buf;
  4916. struct ib_mad_agent *agent;
  4917. struct ib_smp *smp;
  4918. unsigned delay;
  4919. int ret;
  4920. agent = ibp->send_agent;
  4921. if (!agent)
  4922. goto retry;
  4923. send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
  4924. IB_MGMT_MAD_DATA, GFP_ATOMIC);
  4925. if (IS_ERR(send_buf))
  4926. goto retry;
  4927. if (!ibp->smi_ah) {
  4928. struct ib_ah *ah;
  4929. ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
  4930. if (IS_ERR(ah))
  4931. ret = PTR_ERR(ah);
  4932. else {
  4933. send_buf->ah = ah;
  4934. ibp->smi_ah = to_iah(ah);
  4935. ret = 0;
  4936. }
  4937. } else {
  4938. send_buf->ah = &ibp->smi_ah->ibah;
  4939. ret = 0;
  4940. }
  4941. smp = send_buf->mad;
  4942. smp->base_version = IB_MGMT_BASE_VERSION;
  4943. smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
  4944. smp->class_version = 1;
  4945. smp->method = IB_MGMT_METHOD_SEND;
  4946. smp->hop_cnt = 1;
  4947. smp->attr_id = QIB_VENDOR_IPG;
  4948. smp->attr_mod = 0;
  4949. if (!ret)
  4950. ret = ib_post_send_mad(send_buf, NULL);
  4951. if (ret)
  4952. ib_free_send_mad(send_buf);
  4953. retry:
  4954. delay = 2 << ppd->cpspec->ipg_tries;
  4955. queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
  4956. msecs_to_jiffies(delay));
  4957. }
  4958. /*
  4959. * Timeout handler for setting IPG.
  4960. * Only called if r1.
  4961. */
  4962. static void ipg_7322_work(struct work_struct *work)
  4963. {
  4964. struct qib_pportdata *ppd;
  4965. ppd = container_of(work, struct qib_chippport_specific,
  4966. ipg_work.work)->ppd;
  4967. if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
  4968. && ++ppd->cpspec->ipg_tries <= 10)
  4969. try_7322_ipg(ppd);
  4970. }
  4971. static u32 qib_7322_iblink_state(u64 ibcs)
  4972. {
  4973. u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
  4974. switch (state) {
  4975. case IB_7322_L_STATE_INIT:
  4976. state = IB_PORT_INIT;
  4977. break;
  4978. case IB_7322_L_STATE_ARM:
  4979. state = IB_PORT_ARMED;
  4980. break;
  4981. case IB_7322_L_STATE_ACTIVE:
  4982. /* fall through */
  4983. case IB_7322_L_STATE_ACT_DEFER:
  4984. state = IB_PORT_ACTIVE;
  4985. break;
  4986. default: /* fall through */
  4987. case IB_7322_L_STATE_DOWN:
  4988. state = IB_PORT_DOWN;
  4989. break;
  4990. }
  4991. return state;
  4992. }
  4993. /* returns the IBTA port state, rather than the IBC link training state */
  4994. static u8 qib_7322_phys_portstate(u64 ibcs)
  4995. {
  4996. u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
  4997. return qib_7322_physportstate[state];
  4998. }
  4999. static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
  5000. {
  5001. int ret = 0, symadj = 0;
  5002. unsigned long flags;
  5003. int mult;
  5004. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5005. ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
  5006. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5007. /* Update our picture of width and speed from chip */
  5008. if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
  5009. ppd->link_speed_active = QIB_IB_QDR;
  5010. mult = 4;
  5011. } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
  5012. ppd->link_speed_active = QIB_IB_DDR;
  5013. mult = 2;
  5014. } else {
  5015. ppd->link_speed_active = QIB_IB_SDR;
  5016. mult = 1;
  5017. }
  5018. if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
  5019. ppd->link_width_active = IB_WIDTH_4X;
  5020. mult *= 4;
  5021. } else
  5022. ppd->link_width_active = IB_WIDTH_1X;
  5023. ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
  5024. if (!ibup) {
  5025. u64 clr;
  5026. /* Link went down. */
  5027. /* do IPG MAD again after linkdown, even if last time failed */
  5028. ppd->cpspec->ipg_tries = 0;
  5029. clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
  5030. (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
  5031. SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
  5032. if (clr)
  5033. qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
  5034. if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
  5035. QIBL_IB_AUTONEG_INPROG)))
  5036. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  5037. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  5038. struct qib_qsfp_data *qd =
  5039. &ppd->cpspec->qsfp_data;
  5040. /* unlock the Tx settings, speed may change */
  5041. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  5042. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  5043. reset_tx_deemphasis_override));
  5044. qib_cancel_sends(ppd);
  5045. /* on link down, ensure sane pcs state */
  5046. qib_7322_mini_pcs_reset(ppd);
  5047. /* schedule the qsfp refresh which should turn the link
  5048. off */
  5049. if (ppd->dd->flags & QIB_HAS_QSFP) {
  5050. qd->t_insert = jiffies;
  5051. queue_work(ib_wq, &qd->work);
  5052. }
  5053. spin_lock_irqsave(&ppd->sdma_lock, flags);
  5054. if (__qib_sdma_running(ppd))
  5055. __qib_sdma_process_event(ppd,
  5056. qib_sdma_event_e70_go_idle);
  5057. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  5058. }
  5059. clr = read_7322_creg32_port(ppd, crp_iblinkdown);
  5060. if (clr == ppd->cpspec->iblnkdownsnap)
  5061. ppd->cpspec->iblnkdowndelta++;
  5062. } else {
  5063. if (qib_compat_ddr_negotiate &&
  5064. !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
  5065. QIBL_IB_AUTONEG_INPROG)) &&
  5066. ppd->link_speed_active == QIB_IB_SDR &&
  5067. (ppd->link_speed_enabled & QIB_IB_DDR)
  5068. && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
  5069. /* we are SDR, and auto-negotiation enabled */
  5070. ++ppd->cpspec->autoneg_tries;
  5071. if (!ppd->cpspec->ibdeltainprog) {
  5072. ppd->cpspec->ibdeltainprog = 1;
  5073. ppd->cpspec->ibsymdelta +=
  5074. read_7322_creg32_port(ppd,
  5075. crp_ibsymbolerr) -
  5076. ppd->cpspec->ibsymsnap;
  5077. ppd->cpspec->iblnkerrdelta +=
  5078. read_7322_creg32_port(ppd,
  5079. crp_iblinkerrrecov) -
  5080. ppd->cpspec->iblnkerrsnap;
  5081. }
  5082. try_7322_autoneg(ppd);
  5083. ret = 1; /* no other IB status change processing */
  5084. } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
  5085. ppd->link_speed_active == QIB_IB_SDR) {
  5086. qib_autoneg_7322_send(ppd, 1);
  5087. set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
  5088. qib_7322_mini_pcs_reset(ppd);
  5089. udelay(2);
  5090. ret = 1; /* no other IB status change processing */
  5091. } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
  5092. (ppd->link_speed_active & QIB_IB_DDR)) {
  5093. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5094. ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
  5095. QIBL_IB_AUTONEG_FAILED);
  5096. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5097. ppd->cpspec->autoneg_tries = 0;
  5098. /* re-enable SDR, for next link down */
  5099. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  5100. wake_up(&ppd->cpspec->autoneg_wait);
  5101. symadj = 1;
  5102. } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
  5103. /*
  5104. * Clear autoneg failure flag, and do setup
  5105. * so we'll try next time link goes down and
  5106. * back to INIT (possibly connected to a
  5107. * different device).
  5108. */
  5109. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5110. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  5111. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5112. ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
  5113. symadj = 1;
  5114. }
  5115. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  5116. symadj = 1;
  5117. if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
  5118. try_7322_ipg(ppd);
  5119. if (!ppd->cpspec->recovery_init)
  5120. setup_7322_link_recovery(ppd, 0);
  5121. ppd->cpspec->qdr_dfe_time = jiffies +
  5122. msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
  5123. }
  5124. ppd->cpspec->ibmalfusesnap = 0;
  5125. ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
  5126. crp_errlink);
  5127. }
  5128. if (symadj) {
  5129. ppd->cpspec->iblnkdownsnap =
  5130. read_7322_creg32_port(ppd, crp_iblinkdown);
  5131. if (ppd->cpspec->ibdeltainprog) {
  5132. ppd->cpspec->ibdeltainprog = 0;
  5133. ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
  5134. crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
  5135. ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
  5136. crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
  5137. }
  5138. } else if (!ibup && qib_compat_ddr_negotiate &&
  5139. !ppd->cpspec->ibdeltainprog &&
  5140. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  5141. ppd->cpspec->ibdeltainprog = 1;
  5142. ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
  5143. crp_ibsymbolerr);
  5144. ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
  5145. crp_iblinkerrrecov);
  5146. }
  5147. if (!ret)
  5148. qib_setup_7322_setextled(ppd, ibup);
  5149. return ret;
  5150. }
  5151. /*
  5152. * Does read/modify/write to appropriate registers to
  5153. * set output and direction bits selected by mask.
  5154. * these are in their canonical postions (e.g. lsb of
  5155. * dir will end up in D48 of extctrl on existing chips).
  5156. * returns contents of GP Inputs.
  5157. */
  5158. static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
  5159. {
  5160. u64 read_val, new_out;
  5161. unsigned long flags;
  5162. if (mask) {
  5163. /* some bits being written, lock access to GPIO */
  5164. dir &= mask;
  5165. out &= mask;
  5166. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  5167. dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
  5168. dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
  5169. new_out = (dd->cspec->gpio_out & ~mask) | out;
  5170. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  5171. qib_write_kreg(dd, kr_gpio_out, new_out);
  5172. dd->cspec->gpio_out = new_out;
  5173. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  5174. }
  5175. /*
  5176. * It is unlikely that a read at this time would get valid
  5177. * data on a pin whose direction line was set in the same
  5178. * call to this function. We include the read here because
  5179. * that allows us to potentially combine a change on one pin with
  5180. * a read on another, and because the old code did something like
  5181. * this.
  5182. */
  5183. read_val = qib_read_kreg64(dd, kr_extstatus);
  5184. return SYM_FIELD(read_val, EXTStatus, GPIOIn);
  5185. }
  5186. /* Enable writes to config EEPROM, if possible. Returns previous state */
  5187. static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
  5188. {
  5189. int prev_wen;
  5190. u32 mask;
  5191. mask = 1 << QIB_EEPROM_WEN_NUM;
  5192. prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
  5193. gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
  5194. return prev_wen & 1;
  5195. }
  5196. /*
  5197. * Read fundamental info we need to use the chip. These are
  5198. * the registers that describe chip capabilities, and are
  5199. * saved in shadow registers.
  5200. */
  5201. static void get_7322_chip_params(struct qib_devdata *dd)
  5202. {
  5203. u64 val;
  5204. u32 piobufs;
  5205. int mtu;
  5206. dd->palign = qib_read_kreg32(dd, kr_pagealign);
  5207. dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
  5208. dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
  5209. dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
  5210. dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
  5211. dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
  5212. dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
  5213. val = qib_read_kreg64(dd, kr_sendpiobufcnt);
  5214. dd->piobcnt2k = val & ~0U;
  5215. dd->piobcnt4k = val >> 32;
  5216. val = qib_read_kreg64(dd, kr_sendpiosize);
  5217. dd->piosize2k = val & ~0U;
  5218. dd->piosize4k = val >> 32;
  5219. mtu = ib_mtu_enum_to_int(qib_ibmtu);
  5220. if (mtu == -1)
  5221. mtu = QIB_DEFAULT_MTU;
  5222. dd->pport[0].ibmtu = (u32)mtu;
  5223. dd->pport[1].ibmtu = (u32)mtu;
  5224. /* these may be adjusted in init_chip_wc_pat() */
  5225. dd->pio2kbase = (u32 __iomem *)
  5226. ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
  5227. dd->pio4kbase = (u32 __iomem *)
  5228. ((char __iomem *) dd->kregbase +
  5229. (dd->piobufbase >> 32));
  5230. /*
  5231. * 4K buffers take 2 pages; we use roundup just to be
  5232. * paranoid; we calculate it once here, rather than on
  5233. * ever buf allocate
  5234. */
  5235. dd->align4k = ALIGN(dd->piosize4k, dd->palign);
  5236. piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
  5237. dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
  5238. (sizeof(u64) * BITS_PER_BYTE / 2);
  5239. }
  5240. /*
  5241. * The chip base addresses in cspec and cpspec have to be set
  5242. * after possible init_chip_wc_pat(), rather than in
  5243. * get_7322_chip_params(), so split out as separate function
  5244. */
  5245. static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
  5246. {
  5247. u32 cregbase;
  5248. cregbase = qib_read_kreg32(dd, kr_counterregbase);
  5249. dd->cspec->cregbase = (u64 __iomem *)(cregbase +
  5250. (char __iomem *)dd->kregbase);
  5251. dd->egrtidbase = (u64 __iomem *)
  5252. ((char __iomem *) dd->kregbase + dd->rcvegrbase);
  5253. /* port registers are defined as relative to base of chip */
  5254. dd->pport[0].cpspec->kpregbase =
  5255. (u64 __iomem *)((char __iomem *)dd->kregbase);
  5256. dd->pport[1].cpspec->kpregbase =
  5257. (u64 __iomem *)(dd->palign +
  5258. (char __iomem *)dd->kregbase);
  5259. dd->pport[0].cpspec->cpregbase =
  5260. (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
  5261. kr_counterregbase) + (char __iomem *)dd->kregbase);
  5262. dd->pport[1].cpspec->cpregbase =
  5263. (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
  5264. kr_counterregbase) + (char __iomem *)dd->kregbase);
  5265. }
  5266. /*
  5267. * This is a fairly special-purpose observer, so we only support
  5268. * the port-specific parts of SendCtrl
  5269. */
  5270. #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
  5271. SYM_MASK(SendCtrl_0, SDmaEnable) | \
  5272. SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
  5273. SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
  5274. SYM_MASK(SendCtrl_0, SDmaHalt) | \
  5275. SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
  5276. SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
  5277. static int sendctrl_hook(struct qib_devdata *dd,
  5278. const struct diag_observer *op, u32 offs,
  5279. u64 *data, u64 mask, int only_32)
  5280. {
  5281. unsigned long flags;
  5282. unsigned idx;
  5283. unsigned pidx;
  5284. struct qib_pportdata *ppd = NULL;
  5285. u64 local_data, all_bits;
  5286. /*
  5287. * The fixed correspondence between Physical ports and pports is
  5288. * severed. We need to hunt for the ppd that corresponds
  5289. * to the offset we got. And we have to do that without admitting
  5290. * we know the stride, apparently.
  5291. */
  5292. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  5293. u64 __iomem *psptr;
  5294. u32 psoffs;
  5295. ppd = dd->pport + pidx;
  5296. if (!ppd->cpspec->kpregbase)
  5297. continue;
  5298. psptr = ppd->cpspec->kpregbase + krp_sendctrl;
  5299. psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
  5300. if (psoffs == offs)
  5301. break;
  5302. }
  5303. /* If pport is not being managed by driver, just avoid shadows. */
  5304. if (pidx >= dd->num_pports)
  5305. ppd = NULL;
  5306. /* In any case, "idx" is flat index in kreg space */
  5307. idx = offs / sizeof(u64);
  5308. all_bits = ~0ULL;
  5309. if (only_32)
  5310. all_bits >>= 32;
  5311. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  5312. if (!ppd || (mask & all_bits) != all_bits) {
  5313. /*
  5314. * At least some mask bits are zero, so we need
  5315. * to read. The judgement call is whether from
  5316. * reg or shadow. First-cut: read reg, and complain
  5317. * if any bits which should be shadowed are different
  5318. * from their shadowed value.
  5319. */
  5320. if (only_32)
  5321. local_data = (u64)qib_read_kreg32(dd, idx);
  5322. else
  5323. local_data = qib_read_kreg64(dd, idx);
  5324. *data = (local_data & ~mask) | (*data & mask);
  5325. }
  5326. if (mask) {
  5327. /*
  5328. * At least some mask bits are one, so we need
  5329. * to write, but only shadow some bits.
  5330. */
  5331. u64 sval, tval; /* Shadowed, transient */
  5332. /*
  5333. * New shadow val is bits we don't want to touch,
  5334. * ORed with bits we do, that are intended for shadow.
  5335. */
  5336. if (ppd) {
  5337. sval = ppd->p_sendctrl & ~mask;
  5338. sval |= *data & SENDCTRL_SHADOWED & mask;
  5339. ppd->p_sendctrl = sval;
  5340. } else
  5341. sval = *data & SENDCTRL_SHADOWED & mask;
  5342. tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
  5343. qib_write_kreg(dd, idx, tval);
  5344. qib_write_kreg(dd, kr_scratch, 0Ull);
  5345. }
  5346. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  5347. return only_32 ? 4 : 8;
  5348. }
  5349. static const struct diag_observer sendctrl_0_observer = {
  5350. sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
  5351. KREG_IDX(SendCtrl_0) * sizeof(u64)
  5352. };
  5353. static const struct diag_observer sendctrl_1_observer = {
  5354. sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
  5355. KREG_IDX(SendCtrl_1) * sizeof(u64)
  5356. };
  5357. static ushort sdma_fetch_prio = 8;
  5358. module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
  5359. MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
  5360. /* Besides logging QSFP events, we set appropriate TxDDS values */
  5361. static void init_txdds_table(struct qib_pportdata *ppd, int override);
  5362. static void qsfp_7322_event(struct work_struct *work)
  5363. {
  5364. struct qib_qsfp_data *qd;
  5365. struct qib_pportdata *ppd;
  5366. unsigned long pwrup;
  5367. unsigned long flags;
  5368. int ret;
  5369. u32 le2;
  5370. qd = container_of(work, struct qib_qsfp_data, work);
  5371. ppd = qd->ppd;
  5372. pwrup = qd->t_insert +
  5373. msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
  5374. /* Delay for 20 msecs to allow ModPrs resistor to setup */
  5375. mdelay(QSFP_MODPRS_LAG_MSEC);
  5376. if (!qib_qsfp_mod_present(ppd)) {
  5377. ppd->cpspec->qsfp_data.modpresent = 0;
  5378. /* Set the physical link to disabled */
  5379. qib_set_ib_7322_lstate(ppd, 0,
  5380. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  5381. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5382. ppd->lflags &= ~QIBL_LINKV;
  5383. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5384. } else {
  5385. /*
  5386. * Some QSFP's not only do not respond until the full power-up
  5387. * time, but may behave badly if we try. So hold off responding
  5388. * to insertion.
  5389. */
  5390. while (1) {
  5391. if (time_is_before_jiffies(pwrup))
  5392. break;
  5393. msleep(20);
  5394. }
  5395. ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
  5396. /*
  5397. * Need to change LE2 back to defaults if we couldn't
  5398. * read the cable type (to handle cable swaps), so do this
  5399. * even on failure to read cable information. We don't
  5400. * get here for QME, so IS_QME check not needed here.
  5401. */
  5402. if (!ret && !ppd->dd->cspec->r1) {
  5403. if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
  5404. le2 = LE2_QME;
  5405. else if (qd->cache.atten[1] >= qib_long_atten &&
  5406. QSFP_IS_CU(qd->cache.tech))
  5407. le2 = LE2_5m;
  5408. else
  5409. le2 = LE2_DEFAULT;
  5410. } else
  5411. le2 = LE2_DEFAULT;
  5412. ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
  5413. /*
  5414. * We always change parameteters, since we can choose
  5415. * values for cables without eeproms, and the cable may have
  5416. * changed from a cable with full or partial eeprom content
  5417. * to one with partial or no content.
  5418. */
  5419. init_txdds_table(ppd, 0);
  5420. /* The physical link is being re-enabled only when the
  5421. * previous state was DISABLED and the VALID bit is not
  5422. * set. This should only happen when the cable has been
  5423. * physically pulled. */
  5424. if (!ppd->cpspec->qsfp_data.modpresent &&
  5425. (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
  5426. ppd->cpspec->qsfp_data.modpresent = 1;
  5427. qib_set_ib_7322_lstate(ppd, 0,
  5428. QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
  5429. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5430. ppd->lflags |= QIBL_LINKV;
  5431. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5432. }
  5433. }
  5434. }
  5435. /*
  5436. * There is little we can do but complain to the user if QSFP
  5437. * initialization fails.
  5438. */
  5439. static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
  5440. {
  5441. unsigned long flags;
  5442. struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
  5443. struct qib_devdata *dd = ppd->dd;
  5444. u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
  5445. mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
  5446. qd->ppd = ppd;
  5447. qib_qsfp_init(qd, qsfp_7322_event);
  5448. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  5449. dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
  5450. dd->cspec->gpio_mask |= mod_prs_bit;
  5451. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  5452. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  5453. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  5454. }
  5455. /*
  5456. * called at device initialization time, and also if the txselect
  5457. * module parameter is changed. This is used for cables that don't
  5458. * have valid QSFP EEPROMs (not present, or attenuation is zero).
  5459. * We initialize to the default, then if there is a specific
  5460. * unit,port match, we use that (and set it immediately, for the
  5461. * current speed, if the link is at INIT or better).
  5462. * String format is "default# unit#,port#=# ... u,p=#", separators must
  5463. * be a SPACE character. A newline terminates. The u,p=# tuples may
  5464. * optionally have "u,p=#,#", where the final # is the H1 value
  5465. * The last specific match is used (actually, all are used, but last
  5466. * one is the one that winds up set); if none at all, fall back on default.
  5467. */
  5468. static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
  5469. {
  5470. char *nxt, *str;
  5471. u32 pidx, unit, port, deflt, h1;
  5472. unsigned long val;
  5473. int any = 0, seth1;
  5474. int txdds_size;
  5475. str = txselect_list;
  5476. /* default number is validated in setup_txselect() */
  5477. deflt = simple_strtoul(str, &nxt, 0);
  5478. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  5479. dd->pport[pidx].cpspec->no_eep = deflt;
  5480. txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
  5481. if (IS_QME(dd) || IS_QMH(dd))
  5482. txdds_size += TXDDS_MFG_SZ;
  5483. while (*nxt && nxt[1]) {
  5484. str = ++nxt;
  5485. unit = simple_strtoul(str, &nxt, 0);
  5486. if (nxt == str || !*nxt || *nxt != ',') {
  5487. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5488. ;
  5489. continue;
  5490. }
  5491. str = ++nxt;
  5492. port = simple_strtoul(str, &nxt, 0);
  5493. if (nxt == str || *nxt != '=') {
  5494. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5495. ;
  5496. continue;
  5497. }
  5498. str = ++nxt;
  5499. val = simple_strtoul(str, &nxt, 0);
  5500. if (nxt == str) {
  5501. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5502. ;
  5503. continue;
  5504. }
  5505. if (val >= txdds_size)
  5506. continue;
  5507. seth1 = 0;
  5508. h1 = 0; /* gcc thinks it might be used uninitted */
  5509. if (*nxt == ',' && nxt[1]) {
  5510. str = ++nxt;
  5511. h1 = (u32)simple_strtoul(str, &nxt, 0);
  5512. if (nxt == str)
  5513. while (*nxt && *nxt++ != ' ') /* skip */
  5514. ;
  5515. else
  5516. seth1 = 1;
  5517. }
  5518. for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
  5519. ++pidx) {
  5520. struct qib_pportdata *ppd = &dd->pport[pidx];
  5521. if (ppd->port != port || !ppd->link_speed_supported)
  5522. continue;
  5523. ppd->cpspec->no_eep = val;
  5524. if (seth1)
  5525. ppd->cpspec->h1_val = h1;
  5526. /* now change the IBC and serdes, overriding generic */
  5527. init_txdds_table(ppd, 1);
  5528. /* Re-enable the physical state machine on mezz boards
  5529. * now that the correct settings have been set.
  5530. * QSFP boards are handles by the QSFP event handler */
  5531. if (IS_QMH(dd) || IS_QME(dd))
  5532. qib_set_ib_7322_lstate(ppd, 0,
  5533. QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
  5534. any++;
  5535. }
  5536. if (*nxt == '\n')
  5537. break; /* done */
  5538. }
  5539. if (change && !any) {
  5540. /* no specific setting, use the default.
  5541. * Change the IBC and serdes, but since it's
  5542. * general, don't override specific settings.
  5543. */
  5544. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  5545. if (dd->pport[pidx].link_speed_supported)
  5546. init_txdds_table(&dd->pport[pidx], 0);
  5547. }
  5548. }
  5549. /* handle the txselect parameter changing */
  5550. static int setup_txselect(const char *str, struct kernel_param *kp)
  5551. {
  5552. struct qib_devdata *dd;
  5553. unsigned long val;
  5554. int ret;
  5555. if (strlen(str) >= MAX_ATTEN_LEN) {
  5556. pr_info("txselect_values string too long\n");
  5557. return -ENOSPC;
  5558. }
  5559. ret = kstrtoul(str, 0, &val);
  5560. if (ret || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
  5561. TXDDS_MFG_SZ)) {
  5562. pr_info("txselect_values must start with a number < %d\n",
  5563. TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
  5564. return ret ? ret : -EINVAL;
  5565. }
  5566. strcpy(txselect_list, str);
  5567. list_for_each_entry(dd, &qib_dev_list, list)
  5568. if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
  5569. set_no_qsfp_atten(dd, 1);
  5570. return 0;
  5571. }
  5572. /*
  5573. * Write the final few registers that depend on some of the
  5574. * init setup. Done late in init, just before bringing up
  5575. * the serdes.
  5576. */
  5577. static int qib_late_7322_initreg(struct qib_devdata *dd)
  5578. {
  5579. int ret = 0, n;
  5580. u64 val;
  5581. qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
  5582. qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
  5583. qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
  5584. qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
  5585. val = qib_read_kreg64(dd, kr_sendpioavailaddr);
  5586. if (val != dd->pioavailregs_phys) {
  5587. qib_dev_err(dd,
  5588. "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
  5589. (unsigned long) dd->pioavailregs_phys,
  5590. (unsigned long long) val);
  5591. ret = -EINVAL;
  5592. }
  5593. n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  5594. qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
  5595. /* driver sends get pkey, lid, etc. checking also, to catch bugs */
  5596. qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
  5597. qib_register_observer(dd, &sendctrl_0_observer);
  5598. qib_register_observer(dd, &sendctrl_1_observer);
  5599. dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
  5600. qib_write_kreg(dd, kr_control, dd->control);
  5601. /*
  5602. * Set SendDmaFetchPriority and init Tx params, including
  5603. * QSFP handler on boards that have QSFP.
  5604. * First set our default attenuation entry for cables that
  5605. * don't have valid attenuation.
  5606. */
  5607. set_no_qsfp_atten(dd, 0);
  5608. for (n = 0; n < dd->num_pports; ++n) {
  5609. struct qib_pportdata *ppd = dd->pport + n;
  5610. qib_write_kreg_port(ppd, krp_senddmaprioritythld,
  5611. sdma_fetch_prio & 0xf);
  5612. /* Initialize qsfp if present on board. */
  5613. if (dd->flags & QIB_HAS_QSFP)
  5614. qib_init_7322_qsfp(ppd);
  5615. }
  5616. dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
  5617. qib_write_kreg(dd, kr_control, dd->control);
  5618. return ret;
  5619. }
  5620. /* per IB port errors. */
  5621. #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
  5622. MASK_ACROSS(8, 15))
  5623. #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
  5624. #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
  5625. MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
  5626. MASK_ACROSS(0, 11))
  5627. /*
  5628. * Write the initialization per-port registers that need to be done at
  5629. * driver load and after reset completes (i.e., that aren't done as part
  5630. * of other init procedures called from qib_init.c).
  5631. * Some of these should be redundant on reset, but play safe.
  5632. */
  5633. static void write_7322_init_portregs(struct qib_pportdata *ppd)
  5634. {
  5635. u64 val;
  5636. int i;
  5637. if (!ppd->link_speed_supported) {
  5638. /* no buffer credits for this port */
  5639. for (i = 1; i < 8; i++)
  5640. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
  5641. qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
  5642. qib_write_kreg(ppd->dd, kr_scratch, 0);
  5643. return;
  5644. }
  5645. /*
  5646. * Set the number of supported virtual lanes in IBC,
  5647. * for flow control packet handling on unsupported VLs
  5648. */
  5649. val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
  5650. val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
  5651. val |= (u64)(ppd->vls_supported - 1) <<
  5652. SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
  5653. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  5654. qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
  5655. /* enable tx header checking */
  5656. qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
  5657. IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
  5658. IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
  5659. qib_write_kreg_port(ppd, krp_ncmodectrl,
  5660. SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
  5661. /*
  5662. * Unconditionally clear the bufmask bits. If SDMA is
  5663. * enabled, we'll set them appropriately later.
  5664. */
  5665. qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
  5666. qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
  5667. qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
  5668. if (ppd->dd->cspec->r1)
  5669. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
  5670. }
  5671. /*
  5672. * Write the initialization per-device registers that need to be done at
  5673. * driver load and after reset completes (i.e., that aren't done as part
  5674. * of other init procedures called from qib_init.c). Also write per-port
  5675. * registers that are affected by overall device config, such as QP mapping
  5676. * Some of these should be redundant on reset, but play safe.
  5677. */
  5678. static void write_7322_initregs(struct qib_devdata *dd)
  5679. {
  5680. struct qib_pportdata *ppd;
  5681. int i, pidx;
  5682. u64 val;
  5683. /* Set Multicast QPs received by port 2 to map to context one. */
  5684. qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
  5685. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  5686. unsigned n, regno;
  5687. unsigned long flags;
  5688. if (dd->n_krcv_queues < 2 ||
  5689. !dd->pport[pidx].link_speed_supported)
  5690. continue;
  5691. ppd = &dd->pport[pidx];
  5692. /* be paranoid against later code motion, etc. */
  5693. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  5694. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
  5695. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  5696. /* Initialize QP to context mapping */
  5697. regno = krp_rcvqpmaptable;
  5698. val = 0;
  5699. if (dd->num_pports > 1)
  5700. n = dd->first_user_ctxt / dd->num_pports;
  5701. else
  5702. n = dd->first_user_ctxt - 1;
  5703. for (i = 0; i < 32; ) {
  5704. unsigned ctxt;
  5705. if (dd->num_pports > 1)
  5706. ctxt = (i % n) * dd->num_pports + pidx;
  5707. else if (i % n)
  5708. ctxt = (i % n) + 1;
  5709. else
  5710. ctxt = ppd->hw_pidx;
  5711. val |= ctxt << (5 * (i % 6));
  5712. i++;
  5713. if (i % 6 == 0) {
  5714. qib_write_kreg_port(ppd, regno, val);
  5715. val = 0;
  5716. regno++;
  5717. }
  5718. }
  5719. qib_write_kreg_port(ppd, regno, val);
  5720. }
  5721. /*
  5722. * Setup up interrupt mitigation for kernel contexts, but
  5723. * not user contexts (user contexts use interrupts when
  5724. * stalled waiting for any packet, so want those interrupts
  5725. * right away).
  5726. */
  5727. for (i = 0; i < dd->first_user_ctxt; i++) {
  5728. dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
  5729. qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
  5730. }
  5731. /*
  5732. * Initialize as (disabled) rcvflow tables. Application code
  5733. * will setup each flow as it uses the flow.
  5734. * Doesn't clear any of the error bits that might be set.
  5735. */
  5736. val = TIDFLOW_ERRBITS; /* these are W1C */
  5737. for (i = 0; i < dd->cfgctxts; i++) {
  5738. int flow;
  5739. for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
  5740. qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
  5741. }
  5742. /*
  5743. * dual cards init to dual port recovery, single port cards to
  5744. * the one port. Dual port cards may later adjust to 1 port,
  5745. * and then back to dual port if both ports are connected
  5746. * */
  5747. if (dd->num_pports)
  5748. setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
  5749. }
  5750. static int qib_init_7322_variables(struct qib_devdata *dd)
  5751. {
  5752. struct qib_pportdata *ppd;
  5753. unsigned features, pidx, sbufcnt;
  5754. int ret, mtu;
  5755. u32 sbufs, updthresh;
  5756. /* pport structs are contiguous, allocated after devdata */
  5757. ppd = (struct qib_pportdata *)(dd + 1);
  5758. dd->pport = ppd;
  5759. ppd[0].dd = dd;
  5760. ppd[1].dd = dd;
  5761. dd->cspec = (struct qib_chip_specific *)(ppd + 2);
  5762. ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
  5763. ppd[1].cpspec = &ppd[0].cpspec[1];
  5764. ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
  5765. ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
  5766. spin_lock_init(&dd->cspec->rcvmod_lock);
  5767. spin_lock_init(&dd->cspec->gpio_lock);
  5768. /* we haven't yet set QIB_PRESENT, so use read directly */
  5769. dd->revision = readq(&dd->kregbase[kr_revision]);
  5770. if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
  5771. qib_dev_err(dd,
  5772. "Revision register read failure, giving up initialization\n");
  5773. ret = -ENODEV;
  5774. goto bail;
  5775. }
  5776. dd->flags |= QIB_PRESENT; /* now register routines work */
  5777. dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
  5778. dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
  5779. dd->cspec->r1 = dd->minrev == 1;
  5780. get_7322_chip_params(dd);
  5781. features = qib_7322_boardname(dd);
  5782. /* now that piobcnt2k and 4k set, we can allocate these */
  5783. sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
  5784. NUM_VL15_BUFS + BITS_PER_LONG - 1;
  5785. sbufcnt /= BITS_PER_LONG;
  5786. dd->cspec->sendchkenable = kmalloc(sbufcnt *
  5787. sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
  5788. dd->cspec->sendgrhchk = kmalloc(sbufcnt *
  5789. sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
  5790. dd->cspec->sendibchk = kmalloc(sbufcnt *
  5791. sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
  5792. if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
  5793. !dd->cspec->sendibchk) {
  5794. qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
  5795. ret = -ENOMEM;
  5796. goto bail;
  5797. }
  5798. ppd = dd->pport;
  5799. /*
  5800. * GPIO bits for TWSI data and clock,
  5801. * used for serial EEPROM.
  5802. */
  5803. dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
  5804. dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
  5805. dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
  5806. dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
  5807. QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
  5808. QIB_HAS_THRESH_UPDATE |
  5809. (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
  5810. dd->flags |= qib_special_trigger ?
  5811. QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
  5812. /*
  5813. * Setup initial values. These may change when PAT is enabled, but
  5814. * we need these to do initial chip register accesses.
  5815. */
  5816. qib_7322_set_baseaddrs(dd);
  5817. mtu = ib_mtu_enum_to_int(qib_ibmtu);
  5818. if (mtu == -1)
  5819. mtu = QIB_DEFAULT_MTU;
  5820. dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
  5821. /* all hwerrors become interrupts, unless special purposed */
  5822. dd->cspec->hwerrmask = ~0ULL;
  5823. /* link_recovery setup causes these errors, so ignore them,
  5824. * other than clearing them when they occur */
  5825. dd->cspec->hwerrmask &=
  5826. ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
  5827. SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
  5828. HWE_MASK(LATriggered));
  5829. for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
  5830. struct qib_chippport_specific *cp = ppd->cpspec;
  5831. ppd->link_speed_supported = features & PORT_SPD_CAP;
  5832. features >>= PORT_SPD_CAP_SHIFT;
  5833. if (!ppd->link_speed_supported) {
  5834. /* single port mode (7340, or configured) */
  5835. dd->skip_kctxt_mask |= 1 << pidx;
  5836. if (pidx == 0) {
  5837. /* Make sure port is disabled. */
  5838. qib_write_kreg_port(ppd, krp_rcvctrl, 0);
  5839. qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
  5840. ppd[0] = ppd[1];
  5841. dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
  5842. IBSerdesPClkNotDetectMask_0)
  5843. | SYM_MASK(HwErrMask,
  5844. SDmaMemReadErrMask_0));
  5845. dd->cspec->int_enable_mask &= ~(
  5846. SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
  5847. SYM_MASK(IntMask, SDmaIdleIntMask_0) |
  5848. SYM_MASK(IntMask, SDmaProgressIntMask_0) |
  5849. SYM_MASK(IntMask, SDmaIntMask_0) |
  5850. SYM_MASK(IntMask, ErrIntMask_0) |
  5851. SYM_MASK(IntMask, SendDoneIntMask_0));
  5852. } else {
  5853. /* Make sure port is disabled. */
  5854. qib_write_kreg_port(ppd, krp_rcvctrl, 0);
  5855. qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
  5856. dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
  5857. IBSerdesPClkNotDetectMask_1)
  5858. | SYM_MASK(HwErrMask,
  5859. SDmaMemReadErrMask_1));
  5860. dd->cspec->int_enable_mask &= ~(
  5861. SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
  5862. SYM_MASK(IntMask, SDmaIdleIntMask_1) |
  5863. SYM_MASK(IntMask, SDmaProgressIntMask_1) |
  5864. SYM_MASK(IntMask, SDmaIntMask_1) |
  5865. SYM_MASK(IntMask, ErrIntMask_1) |
  5866. SYM_MASK(IntMask, SendDoneIntMask_1));
  5867. }
  5868. continue;
  5869. }
  5870. dd->num_pports++;
  5871. qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
  5872. ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
  5873. ppd->link_width_enabled = IB_WIDTH_4X;
  5874. ppd->link_speed_enabled = ppd->link_speed_supported;
  5875. /*
  5876. * Set the initial values to reasonable default, will be set
  5877. * for real when link is up.
  5878. */
  5879. ppd->link_width_active = IB_WIDTH_4X;
  5880. ppd->link_speed_active = QIB_IB_SDR;
  5881. ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
  5882. switch (qib_num_cfg_vls) {
  5883. case 1:
  5884. ppd->vls_supported = IB_VL_VL0;
  5885. break;
  5886. case 2:
  5887. ppd->vls_supported = IB_VL_VL0_1;
  5888. break;
  5889. default:
  5890. qib_devinfo(dd->pcidev,
  5891. "Invalid num_vls %u, using 4 VLs\n",
  5892. qib_num_cfg_vls);
  5893. qib_num_cfg_vls = 4;
  5894. /* fall through */
  5895. case 4:
  5896. ppd->vls_supported = IB_VL_VL0_3;
  5897. break;
  5898. case 8:
  5899. if (mtu <= 2048)
  5900. ppd->vls_supported = IB_VL_VL0_7;
  5901. else {
  5902. qib_devinfo(dd->pcidev,
  5903. "Invalid num_vls %u for MTU %d "
  5904. ", using 4 VLs\n",
  5905. qib_num_cfg_vls, mtu);
  5906. ppd->vls_supported = IB_VL_VL0_3;
  5907. qib_num_cfg_vls = 4;
  5908. }
  5909. break;
  5910. }
  5911. ppd->vls_operational = ppd->vls_supported;
  5912. init_waitqueue_head(&cp->autoneg_wait);
  5913. INIT_DELAYED_WORK(&cp->autoneg_work,
  5914. autoneg_7322_work);
  5915. if (ppd->dd->cspec->r1)
  5916. INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
  5917. /*
  5918. * For Mez and similar cards, no qsfp info, so do
  5919. * the "cable info" setup here. Can be overridden
  5920. * in adapter-specific routines.
  5921. */
  5922. if (!(dd->flags & QIB_HAS_QSFP)) {
  5923. if (!IS_QMH(dd) && !IS_QME(dd))
  5924. qib_devinfo(dd->pcidev,
  5925. "IB%u:%u: Unknown mezzanine card type\n",
  5926. dd->unit, ppd->port);
  5927. cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
  5928. /*
  5929. * Choose center value as default tx serdes setting
  5930. * until changed through module parameter.
  5931. */
  5932. ppd->cpspec->no_eep = IS_QMH(dd) ?
  5933. TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
  5934. } else
  5935. cp->h1_val = H1_FORCE_VAL;
  5936. /* Avoid writes to chip for mini_init */
  5937. if (!qib_mini_init)
  5938. write_7322_init_portregs(ppd);
  5939. init_timer(&cp->chase_timer);
  5940. cp->chase_timer.function = reenable_chase;
  5941. cp->chase_timer.data = (unsigned long)ppd;
  5942. ppd++;
  5943. }
  5944. dd->rcvhdrentsize = qib_rcvhdrentsize ?
  5945. qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
  5946. dd->rcvhdrsize = qib_rcvhdrsize ?
  5947. qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
  5948. dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
  5949. /* we always allocate at least 2048 bytes for eager buffers */
  5950. dd->rcvegrbufsize = max(mtu, 2048);
  5951. BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
  5952. dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
  5953. qib_7322_tidtemplate(dd);
  5954. /*
  5955. * We can request a receive interrupt for 1 or
  5956. * more packets from current offset.
  5957. */
  5958. dd->rhdrhead_intr_off =
  5959. (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
  5960. /* setup the stats timer; the add_timer is done at end of init */
  5961. init_timer(&dd->stats_timer);
  5962. dd->stats_timer.function = qib_get_7322_faststats;
  5963. dd->stats_timer.data = (unsigned long) dd;
  5964. dd->ureg_align = 0x10000; /* 64KB alignment */
  5965. dd->piosize2kmax_dwords = dd->piosize2k >> 2;
  5966. qib_7322_config_ctxts(dd);
  5967. qib_set_ctxtcnt(dd);
  5968. if (qib_wc_pat) {
  5969. resource_size_t vl15off;
  5970. /*
  5971. * We do not set WC on the VL15 buffers to avoid
  5972. * a rare problem with unaligned writes from
  5973. * interrupt-flushed store buffers, so we need
  5974. * to map those separately here. We can't solve
  5975. * this for the rarely used mtrr case.
  5976. */
  5977. ret = init_chip_wc_pat(dd, 0);
  5978. if (ret)
  5979. goto bail;
  5980. /* vl15 buffers start just after the 4k buffers */
  5981. vl15off = dd->physaddr + (dd->piobufbase >> 32) +
  5982. dd->piobcnt4k * dd->align4k;
  5983. dd->piovl15base = ioremap_nocache(vl15off,
  5984. NUM_VL15_BUFS * dd->align4k);
  5985. if (!dd->piovl15base) {
  5986. ret = -ENOMEM;
  5987. goto bail;
  5988. }
  5989. }
  5990. qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
  5991. ret = 0;
  5992. if (qib_mini_init)
  5993. goto bail;
  5994. if (!dd->num_pports) {
  5995. qib_dev_err(dd, "No ports enabled, giving up initialization\n");
  5996. goto bail; /* no error, so can still figure out why err */
  5997. }
  5998. write_7322_initregs(dd);
  5999. ret = qib_create_ctxts(dd);
  6000. init_7322_cntrnames(dd);
  6001. updthresh = 8U; /* update threshold */
  6002. /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
  6003. * reserve the update threshold amount for other kernel use, such
  6004. * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
  6005. * unless we aren't enabling SDMA, in which case we want to use
  6006. * all the 4k bufs for the kernel.
  6007. * if this was less than the update threshold, we could wait
  6008. * a long time for an update. Coded this way because we
  6009. * sometimes change the update threshold for various reasons,
  6010. * and we want this to remain robust.
  6011. */
  6012. if (dd->flags & QIB_HAS_SEND_DMA) {
  6013. dd->cspec->sdmabufcnt = dd->piobcnt4k;
  6014. sbufs = updthresh > 3 ? updthresh : 3;
  6015. } else {
  6016. dd->cspec->sdmabufcnt = 0;
  6017. sbufs = dd->piobcnt4k;
  6018. }
  6019. dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
  6020. dd->cspec->sdmabufcnt;
  6021. dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
  6022. dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
  6023. dd->last_pio = dd->cspec->lastbuf_for_pio;
  6024. dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
  6025. dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
  6026. /*
  6027. * If we have 16 user contexts, we will have 7 sbufs
  6028. * per context, so reduce the update threshold to match. We
  6029. * want to update before we actually run out, at low pbufs/ctxt
  6030. * so give ourselves some margin.
  6031. */
  6032. if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
  6033. updthresh = dd->pbufsctxt - 2;
  6034. dd->cspec->updthresh_dflt = updthresh;
  6035. dd->cspec->updthresh = updthresh;
  6036. /* before full enable, no interrupts, no locking needed */
  6037. dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
  6038. << SYM_LSB(SendCtrl, AvailUpdThld)) |
  6039. SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
  6040. dd->psxmitwait_supported = 1;
  6041. dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
  6042. bail:
  6043. if (!dd->ctxtcnt)
  6044. dd->ctxtcnt = 1; /* for other initialization code */
  6045. return ret;
  6046. }
  6047. static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
  6048. u32 *pbufnum)
  6049. {
  6050. u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
  6051. struct qib_devdata *dd = ppd->dd;
  6052. /* last is same for 2k and 4k, because we use 4k if all 2k busy */
  6053. if (pbc & PBC_7322_VL15_SEND) {
  6054. first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
  6055. last = first;
  6056. } else {
  6057. if ((plen + 1) > dd->piosize2kmax_dwords)
  6058. first = dd->piobcnt2k;
  6059. else
  6060. first = 0;
  6061. last = dd->cspec->lastbuf_for_pio;
  6062. }
  6063. return qib_getsendbuf_range(dd, pbufnum, first, last);
  6064. }
  6065. static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
  6066. u32 start)
  6067. {
  6068. qib_write_kreg_port(ppd, krp_psinterval, intv);
  6069. qib_write_kreg_port(ppd, krp_psstart, start);
  6070. }
  6071. /*
  6072. * Must be called with sdma_lock held, or before init finished.
  6073. */
  6074. static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
  6075. {
  6076. qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
  6077. }
  6078. /*
  6079. * sdma_lock should be acquired before calling this routine
  6080. */
  6081. static void dump_sdma_7322_state(struct qib_pportdata *ppd)
  6082. {
  6083. u64 reg, reg1, reg2;
  6084. reg = qib_read_kreg_port(ppd, krp_senddmastatus);
  6085. qib_dev_porterr(ppd->dd, ppd->port,
  6086. "SDMA senddmastatus: 0x%016llx\n", reg);
  6087. reg = qib_read_kreg_port(ppd, krp_sendctrl);
  6088. qib_dev_porterr(ppd->dd, ppd->port,
  6089. "SDMA sendctrl: 0x%016llx\n", reg);
  6090. reg = qib_read_kreg_port(ppd, krp_senddmabase);
  6091. qib_dev_porterr(ppd->dd, ppd->port,
  6092. "SDMA senddmabase: 0x%016llx\n", reg);
  6093. reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
  6094. reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
  6095. reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
  6096. qib_dev_porterr(ppd->dd, ppd->port,
  6097. "SDMA senddmabufmask 0:%llx 1:%llx 2:%llx\n",
  6098. reg, reg1, reg2);
  6099. /* get bufuse bits, clear them, and print them again if non-zero */
  6100. reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
  6101. qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
  6102. reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
  6103. qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
  6104. reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
  6105. qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
  6106. /* 0 and 1 should always be zero, so print as short form */
  6107. qib_dev_porterr(ppd->dd, ppd->port,
  6108. "SDMA current senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
  6109. reg, reg1, reg2);
  6110. reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
  6111. reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
  6112. reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
  6113. /* 0 and 1 should always be zero, so print as short form */
  6114. qib_dev_porterr(ppd->dd, ppd->port,
  6115. "SDMA cleared senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
  6116. reg, reg1, reg2);
  6117. reg = qib_read_kreg_port(ppd, krp_senddmatail);
  6118. qib_dev_porterr(ppd->dd, ppd->port,
  6119. "SDMA senddmatail: 0x%016llx\n", reg);
  6120. reg = qib_read_kreg_port(ppd, krp_senddmahead);
  6121. qib_dev_porterr(ppd->dd, ppd->port,
  6122. "SDMA senddmahead: 0x%016llx\n", reg);
  6123. reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
  6124. qib_dev_porterr(ppd->dd, ppd->port,
  6125. "SDMA senddmaheadaddr: 0x%016llx\n", reg);
  6126. reg = qib_read_kreg_port(ppd, krp_senddmalengen);
  6127. qib_dev_porterr(ppd->dd, ppd->port,
  6128. "SDMA senddmalengen: 0x%016llx\n", reg);
  6129. reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
  6130. qib_dev_porterr(ppd->dd, ppd->port,
  6131. "SDMA senddmadesccnt: 0x%016llx\n", reg);
  6132. reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
  6133. qib_dev_porterr(ppd->dd, ppd->port,
  6134. "SDMA senddmaidlecnt: 0x%016llx\n", reg);
  6135. reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
  6136. qib_dev_porterr(ppd->dd, ppd->port,
  6137. "SDMA senddmapriorityhld: 0x%016llx\n", reg);
  6138. reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
  6139. qib_dev_porterr(ppd->dd, ppd->port,
  6140. "SDMA senddmareloadcnt: 0x%016llx\n", reg);
  6141. dump_sdma_state(ppd);
  6142. }
  6143. static struct sdma_set_state_action sdma_7322_action_table[] = {
  6144. [qib_sdma_state_s00_hw_down] = {
  6145. .go_s99_running_tofalse = 1,
  6146. .op_enable = 0,
  6147. .op_intenable = 0,
  6148. .op_halt = 0,
  6149. .op_drain = 0,
  6150. },
  6151. [qib_sdma_state_s10_hw_start_up_wait] = {
  6152. .op_enable = 0,
  6153. .op_intenable = 1,
  6154. .op_halt = 1,
  6155. .op_drain = 0,
  6156. },
  6157. [qib_sdma_state_s20_idle] = {
  6158. .op_enable = 1,
  6159. .op_intenable = 1,
  6160. .op_halt = 1,
  6161. .op_drain = 0,
  6162. },
  6163. [qib_sdma_state_s30_sw_clean_up_wait] = {
  6164. .op_enable = 0,
  6165. .op_intenable = 1,
  6166. .op_halt = 1,
  6167. .op_drain = 0,
  6168. },
  6169. [qib_sdma_state_s40_hw_clean_up_wait] = {
  6170. .op_enable = 1,
  6171. .op_intenable = 1,
  6172. .op_halt = 1,
  6173. .op_drain = 0,
  6174. },
  6175. [qib_sdma_state_s50_hw_halt_wait] = {
  6176. .op_enable = 1,
  6177. .op_intenable = 1,
  6178. .op_halt = 1,
  6179. .op_drain = 1,
  6180. },
  6181. [qib_sdma_state_s99_running] = {
  6182. .op_enable = 1,
  6183. .op_intenable = 1,
  6184. .op_halt = 0,
  6185. .op_drain = 0,
  6186. .go_s99_running_totrue = 1,
  6187. },
  6188. };
  6189. static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
  6190. {
  6191. ppd->sdma_state.set_state_action = sdma_7322_action_table;
  6192. }
  6193. static int init_sdma_7322_regs(struct qib_pportdata *ppd)
  6194. {
  6195. struct qib_devdata *dd = ppd->dd;
  6196. unsigned lastbuf, erstbuf;
  6197. u64 senddmabufmask[3] = { 0 };
  6198. int n, ret = 0;
  6199. qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
  6200. qib_sdma_7322_setlengen(ppd);
  6201. qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
  6202. qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
  6203. qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
  6204. qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
  6205. if (dd->num_pports)
  6206. n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
  6207. else
  6208. n = dd->cspec->sdmabufcnt; /* failsafe for init */
  6209. erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
  6210. ((dd->num_pports == 1 || ppd->port == 2) ? n :
  6211. dd->cspec->sdmabufcnt);
  6212. lastbuf = erstbuf + n;
  6213. ppd->sdma_state.first_sendbuf = erstbuf;
  6214. ppd->sdma_state.last_sendbuf = lastbuf;
  6215. for (; erstbuf < lastbuf; ++erstbuf) {
  6216. unsigned word = erstbuf / BITS_PER_LONG;
  6217. unsigned bit = erstbuf & (BITS_PER_LONG - 1);
  6218. BUG_ON(word >= 3);
  6219. senddmabufmask[word] |= 1ULL << bit;
  6220. }
  6221. qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
  6222. qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
  6223. qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
  6224. return ret;
  6225. }
  6226. /* sdma_lock must be held */
  6227. static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
  6228. {
  6229. struct qib_devdata *dd = ppd->dd;
  6230. int sane;
  6231. int use_dmahead;
  6232. u16 swhead;
  6233. u16 swtail;
  6234. u16 cnt;
  6235. u16 hwhead;
  6236. use_dmahead = __qib_sdma_running(ppd) &&
  6237. (dd->flags & QIB_HAS_SDMA_TIMEOUT);
  6238. retry:
  6239. hwhead = use_dmahead ?
  6240. (u16) le64_to_cpu(*ppd->sdma_head_dma) :
  6241. (u16) qib_read_kreg_port(ppd, krp_senddmahead);
  6242. swhead = ppd->sdma_descq_head;
  6243. swtail = ppd->sdma_descq_tail;
  6244. cnt = ppd->sdma_descq_cnt;
  6245. if (swhead < swtail)
  6246. /* not wrapped */
  6247. sane = (hwhead >= swhead) & (hwhead <= swtail);
  6248. else if (swhead > swtail)
  6249. /* wrapped around */
  6250. sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
  6251. (hwhead <= swtail);
  6252. else
  6253. /* empty */
  6254. sane = (hwhead == swhead);
  6255. if (unlikely(!sane)) {
  6256. if (use_dmahead) {
  6257. /* try one more time, directly from the register */
  6258. use_dmahead = 0;
  6259. goto retry;
  6260. }
  6261. /* proceed as if no progress */
  6262. hwhead = swhead;
  6263. }
  6264. return hwhead;
  6265. }
  6266. static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
  6267. {
  6268. u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
  6269. return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
  6270. (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
  6271. !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
  6272. !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
  6273. }
  6274. /*
  6275. * Compute the amount of delay before sending the next packet if the
  6276. * port's send rate differs from the static rate set for the QP.
  6277. * The delay affects the next packet and the amount of the delay is
  6278. * based on the length of the this packet.
  6279. */
  6280. static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
  6281. u8 srate, u8 vl)
  6282. {
  6283. u8 snd_mult = ppd->delay_mult;
  6284. u8 rcv_mult = ib_rate_to_delay[srate];
  6285. u32 ret;
  6286. ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
  6287. /* Indicate VL15, else set the VL in the control word */
  6288. if (vl == 15)
  6289. ret |= PBC_7322_VL15_SEND_CTRL;
  6290. else
  6291. ret |= vl << PBC_VL_NUM_LSB;
  6292. ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
  6293. return ret;
  6294. }
  6295. /*
  6296. * Enable the per-port VL15 send buffers for use.
  6297. * They follow the rest of the buffers, without a config parameter.
  6298. * This was in initregs, but that is done before the shadow
  6299. * is set up, and this has to be done after the shadow is
  6300. * set up.
  6301. */
  6302. static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
  6303. {
  6304. unsigned vl15bufs;
  6305. vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
  6306. qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
  6307. TXCHK_CHG_TYPE_KERN, NULL);
  6308. }
  6309. static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
  6310. {
  6311. if (rcd->ctxt < NUM_IB_PORTS) {
  6312. if (rcd->dd->num_pports > 1) {
  6313. rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
  6314. rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
  6315. } else {
  6316. rcd->rcvegrcnt = KCTXT0_EGRCNT;
  6317. rcd->rcvegr_tid_base = 0;
  6318. }
  6319. } else {
  6320. rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
  6321. rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
  6322. (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
  6323. }
  6324. }
  6325. #define QTXSLEEPS 5000
  6326. static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
  6327. u32 len, u32 which, struct qib_ctxtdata *rcd)
  6328. {
  6329. int i;
  6330. const int last = start + len - 1;
  6331. const int lastr = last / BITS_PER_LONG;
  6332. u32 sleeps = 0;
  6333. int wait = rcd != NULL;
  6334. unsigned long flags;
  6335. while (wait) {
  6336. unsigned long shadow;
  6337. int cstart, previ = -1;
  6338. /*
  6339. * when flipping from kernel to user, we can't change
  6340. * the checking type if the buffer is allocated to the
  6341. * driver. It's OK the other direction, because it's
  6342. * from close, and we have just disarm'ed all the
  6343. * buffers. All the kernel to kernel changes are also
  6344. * OK.
  6345. */
  6346. for (cstart = start; cstart <= last; cstart++) {
  6347. i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
  6348. / BITS_PER_LONG;
  6349. if (i != previ) {
  6350. shadow = (unsigned long)
  6351. le64_to_cpu(dd->pioavailregs_dma[i]);
  6352. previ = i;
  6353. }
  6354. if (test_bit(((2 * cstart) +
  6355. QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
  6356. % BITS_PER_LONG, &shadow))
  6357. break;
  6358. }
  6359. if (cstart > last)
  6360. break;
  6361. if (sleeps == QTXSLEEPS)
  6362. break;
  6363. /* make sure we see an updated copy next time around */
  6364. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  6365. sleeps++;
  6366. msleep(20);
  6367. }
  6368. switch (which) {
  6369. case TXCHK_CHG_TYPE_DIS1:
  6370. /*
  6371. * disable checking on a range; used by diags; just
  6372. * one buffer, but still written generically
  6373. */
  6374. for (i = start; i <= last; i++)
  6375. clear_bit(i, dd->cspec->sendchkenable);
  6376. break;
  6377. case TXCHK_CHG_TYPE_ENAB1:
  6378. /*
  6379. * (re)enable checking on a range; used by diags; just
  6380. * one buffer, but still written generically; read
  6381. * scratch to be sure buffer actually triggered, not
  6382. * just flushed from processor.
  6383. */
  6384. qib_read_kreg32(dd, kr_scratch);
  6385. for (i = start; i <= last; i++)
  6386. set_bit(i, dd->cspec->sendchkenable);
  6387. break;
  6388. case TXCHK_CHG_TYPE_KERN:
  6389. /* usable by kernel */
  6390. for (i = start; i <= last; i++) {
  6391. set_bit(i, dd->cspec->sendibchk);
  6392. clear_bit(i, dd->cspec->sendgrhchk);
  6393. }
  6394. spin_lock_irqsave(&dd->uctxt_lock, flags);
  6395. /* see if we need to raise avail update threshold */
  6396. for (i = dd->first_user_ctxt;
  6397. dd->cspec->updthresh != dd->cspec->updthresh_dflt
  6398. && i < dd->cfgctxts; i++)
  6399. if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
  6400. ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
  6401. < dd->cspec->updthresh_dflt)
  6402. break;
  6403. spin_unlock_irqrestore(&dd->uctxt_lock, flags);
  6404. if (i == dd->cfgctxts) {
  6405. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  6406. dd->cspec->updthresh = dd->cspec->updthresh_dflt;
  6407. dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
  6408. dd->sendctrl |= (dd->cspec->updthresh &
  6409. SYM_RMASK(SendCtrl, AvailUpdThld)) <<
  6410. SYM_LSB(SendCtrl, AvailUpdThld);
  6411. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  6412. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  6413. }
  6414. break;
  6415. case TXCHK_CHG_TYPE_USER:
  6416. /* for user process */
  6417. for (i = start; i <= last; i++) {
  6418. clear_bit(i, dd->cspec->sendibchk);
  6419. set_bit(i, dd->cspec->sendgrhchk);
  6420. }
  6421. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  6422. if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
  6423. / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
  6424. dd->cspec->updthresh = (rcd->piocnt /
  6425. rcd->subctxt_cnt) - 1;
  6426. dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
  6427. dd->sendctrl |= (dd->cspec->updthresh &
  6428. SYM_RMASK(SendCtrl, AvailUpdThld))
  6429. << SYM_LSB(SendCtrl, AvailUpdThld);
  6430. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  6431. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  6432. } else
  6433. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  6434. break;
  6435. default:
  6436. break;
  6437. }
  6438. for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
  6439. qib_write_kreg(dd, kr_sendcheckmask + i,
  6440. dd->cspec->sendchkenable[i]);
  6441. for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
  6442. qib_write_kreg(dd, kr_sendgrhcheckmask + i,
  6443. dd->cspec->sendgrhchk[i]);
  6444. qib_write_kreg(dd, kr_sendibpktmask + i,
  6445. dd->cspec->sendibchk[i]);
  6446. }
  6447. /*
  6448. * Be sure whatever we did was seen by the chip and acted upon,
  6449. * before we return. Mostly important for which >= 2.
  6450. */
  6451. qib_read_kreg32(dd, kr_scratch);
  6452. }
  6453. /* useful for trigger analyzers, etc. */
  6454. static void writescratch(struct qib_devdata *dd, u32 val)
  6455. {
  6456. qib_write_kreg(dd, kr_scratch, val);
  6457. }
  6458. /* Dummy for now, use chip regs soon */
  6459. static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
  6460. {
  6461. return -ENXIO;
  6462. }
  6463. /**
  6464. * qib_init_iba7322_funcs - set up the chip-specific function pointers
  6465. * @dev: the pci_dev for qlogic_ib device
  6466. * @ent: pci_device_id struct for this dev
  6467. *
  6468. * Also allocates, inits, and returns the devdata struct for this
  6469. * device instance
  6470. *
  6471. * This is global, and is called directly at init to set up the
  6472. * chip-specific function pointers for later use.
  6473. */
  6474. struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
  6475. const struct pci_device_id *ent)
  6476. {
  6477. struct qib_devdata *dd;
  6478. int ret, i;
  6479. u32 tabsize, actual_cnt = 0;
  6480. dd = qib_alloc_devdata(pdev,
  6481. NUM_IB_PORTS * sizeof(struct qib_pportdata) +
  6482. sizeof(struct qib_chip_specific) +
  6483. NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
  6484. if (IS_ERR(dd))
  6485. goto bail;
  6486. dd->f_bringup_serdes = qib_7322_bringup_serdes;
  6487. dd->f_cleanup = qib_setup_7322_cleanup;
  6488. dd->f_clear_tids = qib_7322_clear_tids;
  6489. dd->f_free_irq = qib_7322_free_irq;
  6490. dd->f_get_base_info = qib_7322_get_base_info;
  6491. dd->f_get_msgheader = qib_7322_get_msgheader;
  6492. dd->f_getsendbuf = qib_7322_getsendbuf;
  6493. dd->f_gpio_mod = gpio_7322_mod;
  6494. dd->f_eeprom_wen = qib_7322_eeprom_wen;
  6495. dd->f_hdrqempty = qib_7322_hdrqempty;
  6496. dd->f_ib_updown = qib_7322_ib_updown;
  6497. dd->f_init_ctxt = qib_7322_init_ctxt;
  6498. dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
  6499. dd->f_intr_fallback = qib_7322_intr_fallback;
  6500. dd->f_late_initreg = qib_late_7322_initreg;
  6501. dd->f_setpbc_control = qib_7322_setpbc_control;
  6502. dd->f_portcntr = qib_portcntr_7322;
  6503. dd->f_put_tid = qib_7322_put_tid;
  6504. dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
  6505. dd->f_rcvctrl = rcvctrl_7322_mod;
  6506. dd->f_read_cntrs = qib_read_7322cntrs;
  6507. dd->f_read_portcntrs = qib_read_7322portcntrs;
  6508. dd->f_reset = qib_do_7322_reset;
  6509. dd->f_init_sdma_regs = init_sdma_7322_regs;
  6510. dd->f_sdma_busy = qib_sdma_7322_busy;
  6511. dd->f_sdma_gethead = qib_sdma_7322_gethead;
  6512. dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
  6513. dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
  6514. dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
  6515. dd->f_sendctrl = sendctrl_7322_mod;
  6516. dd->f_set_armlaunch = qib_set_7322_armlaunch;
  6517. dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
  6518. dd->f_iblink_state = qib_7322_iblink_state;
  6519. dd->f_ibphys_portstate = qib_7322_phys_portstate;
  6520. dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
  6521. dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
  6522. dd->f_set_ib_loopback = qib_7322_set_loopback;
  6523. dd->f_get_ib_table = qib_7322_get_ib_table;
  6524. dd->f_set_ib_table = qib_7322_set_ib_table;
  6525. dd->f_set_intr_state = qib_7322_set_intr_state;
  6526. dd->f_setextled = qib_setup_7322_setextled;
  6527. dd->f_txchk_change = qib_7322_txchk_change;
  6528. dd->f_update_usrhead = qib_update_7322_usrhead;
  6529. dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
  6530. dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
  6531. dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
  6532. dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
  6533. dd->f_sdma_init_early = qib_7322_sdma_init_early;
  6534. dd->f_writescratch = writescratch;
  6535. dd->f_tempsense_rd = qib_7322_tempsense_rd;
  6536. #ifdef CONFIG_INFINIBAND_QIB_DCA
  6537. dd->f_notify_dca = qib_7322_notify_dca;
  6538. #endif
  6539. /*
  6540. * Do remaining PCIe setup and save PCIe values in dd.
  6541. * Any error printing is already done by the init code.
  6542. * On return, we have the chip mapped, but chip registers
  6543. * are not set up until start of qib_init_7322_variables.
  6544. */
  6545. ret = qib_pcie_ddinit(dd, pdev, ent);
  6546. if (ret < 0)
  6547. goto bail_free;
  6548. /* initialize chip-specific variables */
  6549. ret = qib_init_7322_variables(dd);
  6550. if (ret)
  6551. goto bail_cleanup;
  6552. if (qib_mini_init || !dd->num_pports)
  6553. goto bail;
  6554. /*
  6555. * Determine number of vectors we want; depends on port count
  6556. * and number of configured kernel receive queues actually used.
  6557. * Should also depend on whether sdma is enabled or not, but
  6558. * that's such a rare testing case it's not worth worrying about.
  6559. */
  6560. tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
  6561. for (i = 0; i < tabsize; i++)
  6562. if ((i < ARRAY_SIZE(irq_table) &&
  6563. irq_table[i].port <= dd->num_pports) ||
  6564. (i >= ARRAY_SIZE(irq_table) &&
  6565. dd->rcd[i - ARRAY_SIZE(irq_table)]))
  6566. actual_cnt++;
  6567. /* reduce by ctxt's < 2 */
  6568. if (qib_krcvq01_no_msi)
  6569. actual_cnt -= dd->num_pports;
  6570. tabsize = actual_cnt;
  6571. dd->cspec->msix_entries = kzalloc(tabsize *
  6572. sizeof(struct qib_msix_entry), GFP_KERNEL);
  6573. if (!dd->cspec->msix_entries) {
  6574. qib_dev_err(dd, "No memory for MSIx table\n");
  6575. tabsize = 0;
  6576. }
  6577. for (i = 0; i < tabsize; i++)
  6578. dd->cspec->msix_entries[i].msix.entry = i;
  6579. if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
  6580. qib_dev_err(dd,
  6581. "Failed to setup PCIe or interrupts; continuing anyway\n");
  6582. /* may be less than we wanted, if not enough available */
  6583. dd->cspec->num_msix_entries = tabsize;
  6584. /* setup interrupt handler */
  6585. qib_setup_7322_interrupt(dd, 1);
  6586. /* clear diagctrl register, in case diags were running and crashed */
  6587. qib_write_kreg(dd, kr_hwdiagctrl, 0);
  6588. #ifdef CONFIG_INFINIBAND_QIB_DCA
  6589. if (!dca_add_requester(&pdev->dev)) {
  6590. qib_devinfo(dd->pcidev, "DCA enabled\n");
  6591. dd->flags |= QIB_DCA_ENABLED;
  6592. qib_setup_dca(dd);
  6593. }
  6594. #endif
  6595. goto bail;
  6596. bail_cleanup:
  6597. qib_pcie_ddcleanup(dd);
  6598. bail_free:
  6599. qib_free_devdata(dd);
  6600. dd = ERR_PTR(ret);
  6601. bail:
  6602. return dd;
  6603. }
  6604. /*
  6605. * Set the table entry at the specified index from the table specifed.
  6606. * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
  6607. * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
  6608. * 'idx' below addresses the correct entry, while its 4 LSBs select the
  6609. * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
  6610. */
  6611. #define DDS_ENT_AMP_LSB 14
  6612. #define DDS_ENT_MAIN_LSB 9
  6613. #define DDS_ENT_POST_LSB 5
  6614. #define DDS_ENT_PRE_XTRA_LSB 3
  6615. #define DDS_ENT_PRE_LSB 0
  6616. /*
  6617. * Set one entry in the TxDDS table for spec'd port
  6618. * ridx picks one of the entries, while tp points
  6619. * to the appropriate table entry.
  6620. */
  6621. static void set_txdds(struct qib_pportdata *ppd, int ridx,
  6622. const struct txdds_ent *tp)
  6623. {
  6624. struct qib_devdata *dd = ppd->dd;
  6625. u32 pack_ent;
  6626. int regidx;
  6627. /* Get correct offset in chip-space, and in source table */
  6628. regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
  6629. /*
  6630. * We do not use qib_write_kreg_port() because it was intended
  6631. * only for registers in the lower "port specific" pages.
  6632. * So do index calculation by hand.
  6633. */
  6634. if (ppd->hw_pidx)
  6635. regidx += (dd->palign / sizeof(u64));
  6636. pack_ent = tp->amp << DDS_ENT_AMP_LSB;
  6637. pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
  6638. pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
  6639. pack_ent |= tp->post << DDS_ENT_POST_LSB;
  6640. qib_write_kreg(dd, regidx, pack_ent);
  6641. /* Prevent back-to-back writes by hitting scratch */
  6642. qib_write_kreg(ppd->dd, kr_scratch, 0);
  6643. }
  6644. static const struct vendor_txdds_ent vendor_txdds[] = {
  6645. { /* Amphenol 1m 30awg NoEq */
  6646. { 0x41, 0x50, 0x48 }, "584470002 ",
  6647. { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
  6648. },
  6649. { /* Amphenol 3m 28awg NoEq */
  6650. { 0x41, 0x50, 0x48 }, "584470004 ",
  6651. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
  6652. },
  6653. { /* Finisar 3m OM2 Optical */
  6654. { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
  6655. { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
  6656. },
  6657. { /* Finisar 30m OM2 Optical */
  6658. { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
  6659. { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
  6660. },
  6661. { /* Finisar Default OM2 Optical */
  6662. { 0x00, 0x90, 0x65 }, NULL,
  6663. { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
  6664. },
  6665. { /* Gore 1m 30awg NoEq */
  6666. { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
  6667. { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
  6668. },
  6669. { /* Gore 2m 30awg NoEq */
  6670. { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
  6671. { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
  6672. },
  6673. { /* Gore 1m 28awg NoEq */
  6674. { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
  6675. { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
  6676. },
  6677. { /* Gore 3m 28awg NoEq */
  6678. { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
  6679. { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
  6680. },
  6681. { /* Gore 5m 24awg Eq */
  6682. { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
  6683. { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
  6684. },
  6685. { /* Gore 7m 24awg Eq */
  6686. { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
  6687. { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
  6688. },
  6689. { /* Gore 5m 26awg Eq */
  6690. { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
  6691. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
  6692. },
  6693. { /* Gore 7m 26awg Eq */
  6694. { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
  6695. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
  6696. },
  6697. { /* Intersil 12m 24awg Active */
  6698. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
  6699. { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
  6700. },
  6701. { /* Intersil 10m 28awg Active */
  6702. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
  6703. { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
  6704. },
  6705. { /* Intersil 7m 30awg Active */
  6706. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
  6707. { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
  6708. },
  6709. { /* Intersil 5m 32awg Active */
  6710. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
  6711. { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
  6712. },
  6713. { /* Intersil Default Active */
  6714. { 0x00, 0x30, 0xB4 }, NULL,
  6715. { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
  6716. },
  6717. { /* Luxtera 20m Active Optical */
  6718. { 0x00, 0x25, 0x63 }, NULL,
  6719. { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
  6720. },
  6721. { /* Molex 1M Cu loopback */
  6722. { 0x00, 0x09, 0x3A }, "74763-0025 ",
  6723. { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
  6724. },
  6725. { /* Molex 2m 28awg NoEq */
  6726. { 0x00, 0x09, 0x3A }, "74757-2201 ",
  6727. { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
  6728. },
  6729. };
  6730. static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
  6731. /* amp, pre, main, post */
  6732. { 2, 2, 15, 6 }, /* Loopback */
  6733. { 0, 0, 0, 1 }, /* 2 dB */
  6734. { 0, 0, 0, 2 }, /* 3 dB */
  6735. { 0, 0, 0, 3 }, /* 4 dB */
  6736. { 0, 0, 0, 4 }, /* 5 dB */
  6737. { 0, 0, 0, 5 }, /* 6 dB */
  6738. { 0, 0, 0, 6 }, /* 7 dB */
  6739. { 0, 0, 0, 7 }, /* 8 dB */
  6740. { 0, 0, 0, 8 }, /* 9 dB */
  6741. { 0, 0, 0, 9 }, /* 10 dB */
  6742. { 0, 0, 0, 10 }, /* 11 dB */
  6743. { 0, 0, 0, 11 }, /* 12 dB */
  6744. { 0, 0, 0, 12 }, /* 13 dB */
  6745. { 0, 0, 0, 13 }, /* 14 dB */
  6746. { 0, 0, 0, 14 }, /* 15 dB */
  6747. { 0, 0, 0, 15 }, /* 16 dB */
  6748. };
  6749. static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
  6750. /* amp, pre, main, post */
  6751. { 2, 2, 15, 6 }, /* Loopback */
  6752. { 0, 0, 0, 8 }, /* 2 dB */
  6753. { 0, 0, 0, 8 }, /* 3 dB */
  6754. { 0, 0, 0, 9 }, /* 4 dB */
  6755. { 0, 0, 0, 9 }, /* 5 dB */
  6756. { 0, 0, 0, 10 }, /* 6 dB */
  6757. { 0, 0, 0, 10 }, /* 7 dB */
  6758. { 0, 0, 0, 11 }, /* 8 dB */
  6759. { 0, 0, 0, 11 }, /* 9 dB */
  6760. { 0, 0, 0, 12 }, /* 10 dB */
  6761. { 0, 0, 0, 12 }, /* 11 dB */
  6762. { 0, 0, 0, 13 }, /* 12 dB */
  6763. { 0, 0, 0, 13 }, /* 13 dB */
  6764. { 0, 0, 0, 14 }, /* 14 dB */
  6765. { 0, 0, 0, 14 }, /* 15 dB */
  6766. { 0, 0, 0, 15 }, /* 16 dB */
  6767. };
  6768. static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
  6769. /* amp, pre, main, post */
  6770. { 2, 2, 15, 6 }, /* Loopback */
  6771. { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
  6772. { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
  6773. { 0, 1, 0, 11 }, /* 4 dB */
  6774. { 0, 1, 0, 13 }, /* 5 dB */
  6775. { 0, 1, 0, 15 }, /* 6 dB */
  6776. { 0, 1, 3, 15 }, /* 7 dB */
  6777. { 0, 1, 7, 15 }, /* 8 dB */
  6778. { 0, 1, 7, 15 }, /* 9 dB */
  6779. { 0, 1, 8, 15 }, /* 10 dB */
  6780. { 0, 1, 9, 15 }, /* 11 dB */
  6781. { 0, 1, 10, 15 }, /* 12 dB */
  6782. { 0, 2, 6, 15 }, /* 13 dB */
  6783. { 0, 2, 7, 15 }, /* 14 dB */
  6784. { 0, 2, 8, 15 }, /* 15 dB */
  6785. { 0, 2, 9, 15 }, /* 16 dB */
  6786. };
  6787. /*
  6788. * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
  6789. * These are mostly used for mez cards going through connectors
  6790. * and backplane traces, but can be used to add other "unusual"
  6791. * table values as well.
  6792. */
  6793. static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
  6794. /* amp, pre, main, post */
  6795. { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
  6796. { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
  6797. { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
  6798. { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
  6799. { 0, 0, 0, 3 }, /* QMH7342 backplane settings */
  6800. { 0, 0, 0, 4 }, /* QMH7342 backplane settings */
  6801. { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
  6802. { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
  6803. { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
  6804. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
  6805. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
  6806. { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
  6807. { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
  6808. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
  6809. { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
  6810. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
  6811. { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
  6812. { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
  6813. };
  6814. static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
  6815. /* amp, pre, main, post */
  6816. { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
  6817. { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
  6818. { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
  6819. { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
  6820. { 0, 0, 0, 9 }, /* QMH7342 backplane settings */
  6821. { 0, 0, 0, 10 }, /* QMH7342 backplane settings */
  6822. { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
  6823. { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
  6824. { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
  6825. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
  6826. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
  6827. { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
  6828. { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
  6829. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
  6830. { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
  6831. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
  6832. { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
  6833. { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
  6834. };
  6835. static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
  6836. /* amp, pre, main, post */
  6837. { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
  6838. { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
  6839. { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
  6840. { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
  6841. { 0, 1, 0, 10 }, /* QMH7342 backplane settings */
  6842. { 0, 1, 0, 12 }, /* QMH7342 backplane settings */
  6843. { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
  6844. { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
  6845. { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
  6846. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
  6847. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
  6848. { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
  6849. { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
  6850. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
  6851. { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
  6852. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
  6853. { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
  6854. { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
  6855. };
  6856. static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
  6857. /* amp, pre, main, post */
  6858. { 0, 0, 0, 0 }, /* QME7342 mfg settings */
  6859. { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
  6860. };
  6861. static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
  6862. unsigned atten)
  6863. {
  6864. /*
  6865. * The attenuation table starts at 2dB for entry 1,
  6866. * with entry 0 being the loopback entry.
  6867. */
  6868. if (atten <= 2)
  6869. atten = 1;
  6870. else if (atten > TXDDS_TABLE_SZ)
  6871. atten = TXDDS_TABLE_SZ - 1;
  6872. else
  6873. atten--;
  6874. return txdds + atten;
  6875. }
  6876. /*
  6877. * if override is set, the module parameter txselect has a value
  6878. * for this specific port, so use it, rather than our normal mechanism.
  6879. */
  6880. static void find_best_ent(struct qib_pportdata *ppd,
  6881. const struct txdds_ent **sdr_dds,
  6882. const struct txdds_ent **ddr_dds,
  6883. const struct txdds_ent **qdr_dds, int override)
  6884. {
  6885. struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
  6886. int idx;
  6887. /* Search table of known cables */
  6888. for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
  6889. const struct vendor_txdds_ent *v = vendor_txdds + idx;
  6890. if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
  6891. (!v->partnum ||
  6892. !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
  6893. *sdr_dds = &v->sdr;
  6894. *ddr_dds = &v->ddr;
  6895. *qdr_dds = &v->qdr;
  6896. return;
  6897. }
  6898. }
  6899. /* Active cables don't have attenuation so we only set SERDES
  6900. * settings to account for the attenuation of the board traces. */
  6901. if (!override && QSFP_IS_ACTIVE(qd->tech)) {
  6902. *sdr_dds = txdds_sdr + ppd->dd->board_atten;
  6903. *ddr_dds = txdds_ddr + ppd->dd->board_atten;
  6904. *qdr_dds = txdds_qdr + ppd->dd->board_atten;
  6905. return;
  6906. }
  6907. if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
  6908. qd->atten[1])) {
  6909. *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
  6910. *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
  6911. *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
  6912. return;
  6913. } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
  6914. /*
  6915. * If we have no (or incomplete) data from the cable
  6916. * EEPROM, or no QSFP, or override is set, use the
  6917. * module parameter value to index into the attentuation
  6918. * table.
  6919. */
  6920. idx = ppd->cpspec->no_eep;
  6921. *sdr_dds = &txdds_sdr[idx];
  6922. *ddr_dds = &txdds_ddr[idx];
  6923. *qdr_dds = &txdds_qdr[idx];
  6924. } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
  6925. /* similar to above, but index into the "extra" table. */
  6926. idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
  6927. *sdr_dds = &txdds_extra_sdr[idx];
  6928. *ddr_dds = &txdds_extra_ddr[idx];
  6929. *qdr_dds = &txdds_extra_qdr[idx];
  6930. } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
  6931. ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
  6932. TXDDS_MFG_SZ)) {
  6933. idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
  6934. pr_info("IB%u:%u use idx %u into txdds_mfg\n",
  6935. ppd->dd->unit, ppd->port, idx);
  6936. *sdr_dds = &txdds_extra_mfg[idx];
  6937. *ddr_dds = &txdds_extra_mfg[idx];
  6938. *qdr_dds = &txdds_extra_mfg[idx];
  6939. } else {
  6940. /* this shouldn't happen, it's range checked */
  6941. *sdr_dds = txdds_sdr + qib_long_atten;
  6942. *ddr_dds = txdds_ddr + qib_long_atten;
  6943. *qdr_dds = txdds_qdr + qib_long_atten;
  6944. }
  6945. }
  6946. static void init_txdds_table(struct qib_pportdata *ppd, int override)
  6947. {
  6948. const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
  6949. struct txdds_ent *dds;
  6950. int idx;
  6951. int single_ent = 0;
  6952. find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
  6953. /* for mez cards or override, use the selected value for all entries */
  6954. if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
  6955. single_ent = 1;
  6956. /* Fill in the first entry with the best entry found. */
  6957. set_txdds(ppd, 0, sdr_dds);
  6958. set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
  6959. set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
  6960. if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
  6961. QIBL_LINKACTIVE)) {
  6962. dds = (struct txdds_ent *)(ppd->link_speed_active ==
  6963. QIB_IB_QDR ? qdr_dds :
  6964. (ppd->link_speed_active ==
  6965. QIB_IB_DDR ? ddr_dds : sdr_dds));
  6966. write_tx_serdes_param(ppd, dds);
  6967. }
  6968. /* Fill in the remaining entries with the default table values. */
  6969. for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
  6970. set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
  6971. set_txdds(ppd, idx + TXDDS_TABLE_SZ,
  6972. single_ent ? ddr_dds : txdds_ddr + idx);
  6973. set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
  6974. single_ent ? qdr_dds : txdds_qdr + idx);
  6975. }
  6976. }
  6977. #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
  6978. #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
  6979. #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
  6980. #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
  6981. #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
  6982. #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
  6983. #define AHB_TRANS_TRIES 10
  6984. /*
  6985. * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
  6986. * 5=subsystem which is why most calls have "chan + chan >> 1"
  6987. * for the channel argument.
  6988. */
  6989. static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
  6990. u32 data, u32 mask)
  6991. {
  6992. u32 rd_data, wr_data, sz_mask;
  6993. u64 trans, acc, prev_acc;
  6994. u32 ret = 0xBAD0BAD;
  6995. int tries;
  6996. prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
  6997. /* From this point on, make sure we return access */
  6998. acc = (quad << 1) | 1;
  6999. qib_write_kreg(dd, KR_AHB_ACC, acc);
  7000. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  7001. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  7002. if (trans & AHB_TRANS_RDY)
  7003. break;
  7004. }
  7005. if (tries >= AHB_TRANS_TRIES) {
  7006. qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
  7007. goto bail;
  7008. }
  7009. /* If mask is not all 1s, we need to read, but different SerDes
  7010. * entities have different sizes
  7011. */
  7012. sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
  7013. wr_data = data & mask & sz_mask;
  7014. if ((~mask & sz_mask) != 0) {
  7015. trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
  7016. qib_write_kreg(dd, KR_AHB_TRANS, trans);
  7017. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  7018. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  7019. if (trans & AHB_TRANS_RDY)
  7020. break;
  7021. }
  7022. if (tries >= AHB_TRANS_TRIES) {
  7023. qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
  7024. AHB_TRANS_TRIES);
  7025. goto bail;
  7026. }
  7027. /* Re-read in case host split reads and read data first */
  7028. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  7029. rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
  7030. wr_data |= (rd_data & ~mask & sz_mask);
  7031. }
  7032. /* If mask is not zero, we need to write. */
  7033. if (mask & sz_mask) {
  7034. trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
  7035. trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
  7036. trans |= AHB_WR;
  7037. qib_write_kreg(dd, KR_AHB_TRANS, trans);
  7038. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  7039. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  7040. if (trans & AHB_TRANS_RDY)
  7041. break;
  7042. }
  7043. if (tries >= AHB_TRANS_TRIES) {
  7044. qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
  7045. AHB_TRANS_TRIES);
  7046. goto bail;
  7047. }
  7048. }
  7049. ret = wr_data;
  7050. bail:
  7051. qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
  7052. return ret;
  7053. }
  7054. static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
  7055. unsigned mask)
  7056. {
  7057. struct qib_devdata *dd = ppd->dd;
  7058. int chan;
  7059. u32 rbc;
  7060. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  7061. ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
  7062. data, mask);
  7063. rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7064. addr, 0, 0);
  7065. }
  7066. }
  7067. static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
  7068. {
  7069. u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
  7070. u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
  7071. if (enable && !state) {
  7072. pr_info("IB%u:%u Turning LOS on\n",
  7073. ppd->dd->unit, ppd->port);
  7074. data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
  7075. } else if (!enable && state) {
  7076. pr_info("IB%u:%u Turning LOS off\n",
  7077. ppd->dd->unit, ppd->port);
  7078. data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
  7079. }
  7080. qib_write_kreg_port(ppd, krp_serdesctrl, data);
  7081. }
  7082. static int serdes_7322_init(struct qib_pportdata *ppd)
  7083. {
  7084. int ret = 0;
  7085. if (ppd->dd->cspec->r1)
  7086. ret = serdes_7322_init_old(ppd);
  7087. else
  7088. ret = serdes_7322_init_new(ppd);
  7089. return ret;
  7090. }
  7091. static int serdes_7322_init_old(struct qib_pportdata *ppd)
  7092. {
  7093. u32 le_val;
  7094. /*
  7095. * Initialize the Tx DDS tables. Also done every QSFP event,
  7096. * for adapters with QSFP
  7097. */
  7098. init_txdds_table(ppd, 0);
  7099. /* ensure no tx overrides from earlier driver loads */
  7100. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  7101. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7102. reset_tx_deemphasis_override));
  7103. /* Patch some SerDes defaults to "Better for IB" */
  7104. /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
  7105. ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
  7106. /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
  7107. ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
  7108. /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
  7109. ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
  7110. /* May be overridden in qsfp_7322_event */
  7111. le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
  7112. ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
  7113. /* enable LE1 adaptation for all but QME, which is disabled */
  7114. le_val = IS_QME(ppd->dd) ? 0 : 1;
  7115. ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
  7116. /* Clear cmode-override, may be set from older driver */
  7117. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
  7118. /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
  7119. ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
  7120. /* setup LoS params; these are subsystem, so chan == 5 */
  7121. /* LoS filter threshold_count on, ch 0-3, set to 8 */
  7122. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
  7123. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
  7124. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
  7125. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
  7126. /* LoS filter threshold_count off, ch 0-3, set to 4 */
  7127. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
  7128. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
  7129. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
  7130. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
  7131. /* LoS filter select enabled */
  7132. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
  7133. /* LoS target data: SDR=4, DDR=2, QDR=1 */
  7134. ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
  7135. ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
  7136. ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
  7137. serdes_7322_los_enable(ppd, 1);
  7138. /* rxbistena; set 0 to avoid effects of it switch later */
  7139. ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
  7140. /* Configure 4 DFE taps, and only they adapt */
  7141. ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
  7142. /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
  7143. le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
  7144. ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
  7145. /*
  7146. * Set receive adaptation mode. SDR and DDR adaptation are
  7147. * always on, and QDR is initially enabled; later disabled.
  7148. */
  7149. qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
  7150. qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
  7151. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  7152. ppd->dd->cspec->r1 ?
  7153. QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
  7154. ppd->cpspec->qdr_dfe_on = 1;
  7155. /* FLoop LOS gate: PPM filter enabled */
  7156. ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
  7157. /* rx offset center enabled */
  7158. ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
  7159. if (!ppd->dd->cspec->r1) {
  7160. ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
  7161. ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
  7162. }
  7163. /* Set the frequency loop bandwidth to 15 */
  7164. ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
  7165. return 0;
  7166. }
  7167. static int serdes_7322_init_new(struct qib_pportdata *ppd)
  7168. {
  7169. unsigned long tend;
  7170. u32 le_val, rxcaldone;
  7171. int chan, chan_done = (1 << SERDES_CHANS) - 1;
  7172. /* Clear cmode-override, may be set from older driver */
  7173. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
  7174. /* ensure no tx overrides from earlier driver loads */
  7175. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  7176. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7177. reset_tx_deemphasis_override));
  7178. /* START OF LSI SUGGESTED SERDES BRINGUP */
  7179. /* Reset - Calibration Setup */
  7180. /* Stop DFE adaptaion */
  7181. ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
  7182. /* Disable LE1 */
  7183. ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
  7184. /* Disable autoadapt for LE1 */
  7185. ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
  7186. /* Disable LE2 */
  7187. ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
  7188. /* Disable VGA */
  7189. ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
  7190. /* Disable AFE Offset Cancel */
  7191. ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
  7192. /* Disable Timing Loop */
  7193. ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
  7194. /* Disable Frequency Loop */
  7195. ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
  7196. /* Disable Baseline Wander Correction */
  7197. ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
  7198. /* Disable RX Calibration */
  7199. ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
  7200. /* Disable RX Offset Calibration */
  7201. ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
  7202. /* Select BB CDR */
  7203. ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
  7204. /* CDR Step Size */
  7205. ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
  7206. /* Enable phase Calibration */
  7207. ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
  7208. /* DFE Bandwidth [2:14-12] */
  7209. ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
  7210. /* DFE Config (4 taps only) */
  7211. ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
  7212. /* Gain Loop Bandwidth */
  7213. if (!ppd->dd->cspec->r1) {
  7214. ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
  7215. ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
  7216. } else {
  7217. ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
  7218. }
  7219. /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
  7220. /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
  7221. /* Data Rate Select [5:7-6] (leave as default) */
  7222. /* RX Parallel Word Width [3:10-8] (leave as default) */
  7223. /* RX REST */
  7224. /* Single- or Multi-channel reset */
  7225. /* RX Analog reset */
  7226. /* RX Digital reset */
  7227. ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
  7228. msleep(20);
  7229. /* RX Analog reset */
  7230. ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
  7231. msleep(20);
  7232. /* RX Digital reset */
  7233. ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
  7234. msleep(20);
  7235. /* setup LoS params; these are subsystem, so chan == 5 */
  7236. /* LoS filter threshold_count on, ch 0-3, set to 8 */
  7237. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
  7238. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
  7239. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
  7240. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
  7241. /* LoS filter threshold_count off, ch 0-3, set to 4 */
  7242. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
  7243. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
  7244. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
  7245. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
  7246. /* LoS filter select enabled */
  7247. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
  7248. /* LoS target data: SDR=4, DDR=2, QDR=1 */
  7249. ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
  7250. ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
  7251. ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
  7252. /* Turn on LOS on initial SERDES init */
  7253. serdes_7322_los_enable(ppd, 1);
  7254. /* FLoop LOS gate: PPM filter enabled */
  7255. ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
  7256. /* RX LATCH CALIBRATION */
  7257. /* Enable Eyefinder Phase Calibration latch */
  7258. ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
  7259. /* Enable RX Offset Calibration latch */
  7260. ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
  7261. msleep(20);
  7262. /* Start Calibration */
  7263. ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
  7264. tend = jiffies + msecs_to_jiffies(500);
  7265. while (chan_done && !time_is_before_jiffies(tend)) {
  7266. msleep(20);
  7267. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  7268. rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
  7269. (chan + (chan >> 1)),
  7270. 25, 0, 0);
  7271. if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
  7272. (~chan_done & (1 << chan)) == 0)
  7273. chan_done &= ~(1 << chan);
  7274. }
  7275. }
  7276. if (chan_done) {
  7277. pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
  7278. IBSD(ppd->hw_pidx), chan_done);
  7279. } else {
  7280. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  7281. rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
  7282. (chan + (chan >> 1)),
  7283. 25, 0, 0);
  7284. if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
  7285. pr_info("Serdes %d chan %d calibration failed\n",
  7286. IBSD(ppd->hw_pidx), chan);
  7287. }
  7288. }
  7289. /* Turn off Calibration */
  7290. ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
  7291. msleep(20);
  7292. /* BRING RX UP */
  7293. /* Set LE2 value (May be overridden in qsfp_7322_event) */
  7294. le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
  7295. ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
  7296. /* Set LE2 Loop bandwidth */
  7297. ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
  7298. /* Enable LE2 */
  7299. ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
  7300. msleep(20);
  7301. /* Enable H0 only */
  7302. ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
  7303. /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
  7304. le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
  7305. ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
  7306. /* Enable VGA */
  7307. ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
  7308. msleep(20);
  7309. /* Set Frequency Loop Bandwidth */
  7310. ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
  7311. /* Enable Frequency Loop */
  7312. ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
  7313. /* Set Timing Loop Bandwidth */
  7314. ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
  7315. /* Enable Timing Loop */
  7316. ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
  7317. msleep(50);
  7318. /* Enable DFE
  7319. * Set receive adaptation mode. SDR and DDR adaptation are
  7320. * always on, and QDR is initially enabled; later disabled.
  7321. */
  7322. qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
  7323. qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
  7324. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  7325. ppd->dd->cspec->r1 ?
  7326. QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
  7327. ppd->cpspec->qdr_dfe_on = 1;
  7328. /* Disable LE1 */
  7329. ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
  7330. /* Disable auto adapt for LE1 */
  7331. ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
  7332. msleep(20);
  7333. /* Enable AFE Offset Cancel */
  7334. ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
  7335. /* Enable Baseline Wander Correction */
  7336. ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
  7337. /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
  7338. ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
  7339. /* VGA output common mode */
  7340. ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
  7341. /*
  7342. * Initialize the Tx DDS tables. Also done every QSFP event,
  7343. * for adapters with QSFP
  7344. */
  7345. init_txdds_table(ppd, 0);
  7346. return 0;
  7347. }
  7348. /* start adjust QMH serdes parameters */
  7349. static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
  7350. {
  7351. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7352. 9, code << 9, 0x3f << 9);
  7353. }
  7354. static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
  7355. int enable, u32 tapenable)
  7356. {
  7357. if (enable)
  7358. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7359. 1, 3 << 10, 0x1f << 10);
  7360. else
  7361. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7362. 1, 0, 0x1f << 10);
  7363. }
  7364. /* Set clock to 1, 0, 1, 0 */
  7365. static void clock_man(struct qib_pportdata *ppd, int chan)
  7366. {
  7367. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7368. 4, 0x4000, 0x4000);
  7369. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7370. 4, 0, 0x4000);
  7371. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7372. 4, 0x4000, 0x4000);
  7373. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7374. 4, 0, 0x4000);
  7375. }
  7376. /*
  7377. * write the current Tx serdes pre,post,main,amp settings into the serdes.
  7378. * The caller must pass the settings appropriate for the current speed,
  7379. * or not care if they are correct for the current speed.
  7380. */
  7381. static void write_tx_serdes_param(struct qib_pportdata *ppd,
  7382. struct txdds_ent *txdds)
  7383. {
  7384. u64 deemph;
  7385. deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
  7386. /* field names for amp, main, post, pre, respectively */
  7387. deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
  7388. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
  7389. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
  7390. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
  7391. deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7392. tx_override_deemphasis_select);
  7393. deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7394. txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7395. txampcntl_d2a);
  7396. deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7397. txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7398. txc0_ena);
  7399. deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7400. txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7401. txcp1_ena);
  7402. deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7403. txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7404. txcn1_ena);
  7405. qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
  7406. }
  7407. /*
  7408. * Set the parameters for mez cards on link bounce, so they are
  7409. * always exactly what was requested. Similar logic to init_txdds
  7410. * but does just the serdes.
  7411. */
  7412. static void adj_tx_serdes(struct qib_pportdata *ppd)
  7413. {
  7414. const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
  7415. struct txdds_ent *dds;
  7416. find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
  7417. dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
  7418. qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
  7419. ddr_dds : sdr_dds));
  7420. write_tx_serdes_param(ppd, dds);
  7421. }
  7422. /* set QDR forced value for H1, if needed */
  7423. static void force_h1(struct qib_pportdata *ppd)
  7424. {
  7425. int chan;
  7426. ppd->cpspec->qdr_reforce = 0;
  7427. if (!ppd->dd->cspec->r1)
  7428. return;
  7429. for (chan = 0; chan < SERDES_CHANS; chan++) {
  7430. set_man_mode_h1(ppd, chan, 1, 0);
  7431. set_man_code(ppd, chan, ppd->cpspec->h1_val);
  7432. clock_man(ppd, chan);
  7433. set_man_mode_h1(ppd, chan, 0, 0);
  7434. }
  7435. }
  7436. #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
  7437. #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
  7438. #define R_OPCODE_LSB 3
  7439. #define R_OP_NOP 0
  7440. #define R_OP_SHIFT 2
  7441. #define R_OP_UPDATE 3
  7442. #define R_TDI_LSB 2
  7443. #define R_TDO_LSB 1
  7444. #define R_RDY 1
  7445. static int qib_r_grab(struct qib_devdata *dd)
  7446. {
  7447. u64 val;
  7448. val = SJA_EN;
  7449. qib_write_kreg(dd, kr_r_access, val);
  7450. qib_read_kreg32(dd, kr_scratch);
  7451. return 0;
  7452. }
  7453. /* qib_r_wait_for_rdy() not only waits for the ready bit, it
  7454. * returns the current state of R_TDO
  7455. */
  7456. static int qib_r_wait_for_rdy(struct qib_devdata *dd)
  7457. {
  7458. u64 val;
  7459. int timeout;
  7460. for (timeout = 0; timeout < 100 ; ++timeout) {
  7461. val = qib_read_kreg32(dd, kr_r_access);
  7462. if (val & R_RDY)
  7463. return (val >> R_TDO_LSB) & 1;
  7464. }
  7465. return -1;
  7466. }
  7467. static int qib_r_shift(struct qib_devdata *dd, int bisten,
  7468. int len, u8 *inp, u8 *outp)
  7469. {
  7470. u64 valbase, val;
  7471. int ret, pos;
  7472. valbase = SJA_EN | (bisten << BISTEN_LSB) |
  7473. (R_OP_SHIFT << R_OPCODE_LSB);
  7474. ret = qib_r_wait_for_rdy(dd);
  7475. if (ret < 0)
  7476. goto bail;
  7477. for (pos = 0; pos < len; ++pos) {
  7478. val = valbase;
  7479. if (outp) {
  7480. outp[pos >> 3] &= ~(1 << (pos & 7));
  7481. outp[pos >> 3] |= (ret << (pos & 7));
  7482. }
  7483. if (inp) {
  7484. int tdi = inp[pos >> 3] >> (pos & 7);
  7485. val |= ((tdi & 1) << R_TDI_LSB);
  7486. }
  7487. qib_write_kreg(dd, kr_r_access, val);
  7488. qib_read_kreg32(dd, kr_scratch);
  7489. ret = qib_r_wait_for_rdy(dd);
  7490. if (ret < 0)
  7491. break;
  7492. }
  7493. /* Restore to NOP between operations. */
  7494. val = SJA_EN | (bisten << BISTEN_LSB);
  7495. qib_write_kreg(dd, kr_r_access, val);
  7496. qib_read_kreg32(dd, kr_scratch);
  7497. ret = qib_r_wait_for_rdy(dd);
  7498. if (ret >= 0)
  7499. ret = pos;
  7500. bail:
  7501. return ret;
  7502. }
  7503. static int qib_r_update(struct qib_devdata *dd, int bisten)
  7504. {
  7505. u64 val;
  7506. int ret;
  7507. val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
  7508. ret = qib_r_wait_for_rdy(dd);
  7509. if (ret >= 0) {
  7510. qib_write_kreg(dd, kr_r_access, val);
  7511. qib_read_kreg32(dd, kr_scratch);
  7512. }
  7513. return ret;
  7514. }
  7515. #define BISTEN_PORT_SEL 15
  7516. #define LEN_PORT_SEL 625
  7517. #define BISTEN_AT 17
  7518. #define LEN_AT 156
  7519. #define BISTEN_ETM 16
  7520. #define LEN_ETM 632
  7521. #define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
  7522. /* these are common for all IB port use cases. */
  7523. static u8 reset_at[BIT2BYTE(LEN_AT)] = {
  7524. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7525. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
  7526. };
  7527. static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
  7528. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7529. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7530. 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
  7531. 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
  7532. 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
  7533. 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
  7534. 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7535. 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
  7536. };
  7537. static u8 at[BIT2BYTE(LEN_AT)] = {
  7538. 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
  7539. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
  7540. };
  7541. /* used for IB1 or IB2, only one in use */
  7542. static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
  7543. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7544. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7545. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7546. 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
  7547. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7548. 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
  7549. 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
  7550. 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
  7551. };
  7552. /* used when both IB1 and IB2 are in use */
  7553. static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
  7554. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7555. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
  7556. 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7557. 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
  7558. 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
  7559. 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
  7560. 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
  7561. 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
  7562. };
  7563. /* used when only IB1 is in use */
  7564. static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
  7565. 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
  7566. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
  7567. 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7568. 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7569. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
  7570. 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7571. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7572. 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
  7573. };
  7574. /* used when only IB2 is in use */
  7575. static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
  7576. 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
  7577. 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
  7578. 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
  7579. 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
  7580. 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
  7581. 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
  7582. 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
  7583. 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
  7584. };
  7585. /* used when both IB1 and IB2 are in use */
  7586. static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
  7587. 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
  7588. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
  7589. 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7590. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7591. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
  7592. 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
  7593. 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7594. 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
  7595. };
  7596. /*
  7597. * Do setup to properly handle IB link recovery; if port is zero, we
  7598. * are initializing to cover both ports; otherwise we are initializing
  7599. * to cover a single port card, or the port has reached INIT and we may
  7600. * need to switch coverage types.
  7601. */
  7602. static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
  7603. {
  7604. u8 *portsel, *etm;
  7605. struct qib_devdata *dd = ppd->dd;
  7606. if (!ppd->dd->cspec->r1)
  7607. return;
  7608. if (!both) {
  7609. dd->cspec->recovery_ports_initted++;
  7610. ppd->cpspec->recovery_init = 1;
  7611. }
  7612. if (!both && dd->cspec->recovery_ports_initted == 1) {
  7613. portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
  7614. etm = atetm_1port;
  7615. } else {
  7616. portsel = portsel_2port;
  7617. etm = atetm_2port;
  7618. }
  7619. if (qib_r_grab(dd) < 0 ||
  7620. qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
  7621. qib_r_update(dd, BISTEN_ETM) < 0 ||
  7622. qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
  7623. qib_r_update(dd, BISTEN_AT) < 0 ||
  7624. qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
  7625. portsel, NULL) < 0 ||
  7626. qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
  7627. qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
  7628. qib_r_update(dd, BISTEN_AT) < 0 ||
  7629. qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
  7630. qib_r_update(dd, BISTEN_ETM) < 0)
  7631. qib_dev_err(dd, "Failed IB link recovery setup\n");
  7632. }
  7633. static void check_7322_rxe_status(struct qib_pportdata *ppd)
  7634. {
  7635. struct qib_devdata *dd = ppd->dd;
  7636. u64 fmask;
  7637. if (dd->cspec->recovery_ports_initted != 1)
  7638. return; /* rest doesn't apply to dualport */
  7639. qib_write_kreg(dd, kr_control, dd->control |
  7640. SYM_MASK(Control, FreezeMode));
  7641. (void)qib_read_kreg64(dd, kr_scratch);
  7642. udelay(3); /* ibcreset asserted 400ns, be sure that's over */
  7643. fmask = qib_read_kreg64(dd, kr_act_fmask);
  7644. if (!fmask) {
  7645. /*
  7646. * require a powercycle before we'll work again, and make
  7647. * sure we get no more interrupts, and don't turn off
  7648. * freeze.
  7649. */
  7650. ppd->dd->cspec->stay_in_freeze = 1;
  7651. qib_7322_set_intr_state(ppd->dd, 0);
  7652. qib_write_kreg(dd, kr_fmask, 0ULL);
  7653. qib_dev_err(dd, "HCA unusable until powercycled\n");
  7654. return; /* eventually reset */
  7655. }
  7656. qib_write_kreg(ppd->dd, kr_hwerrclear,
  7657. SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
  7658. /* don't do the full clear_freeze(), not needed for this */
  7659. qib_write_kreg(dd, kr_control, dd->control);
  7660. qib_read_kreg32(dd, kr_scratch);
  7661. /* take IBC out of reset */
  7662. if (ppd->link_speed_supported) {
  7663. ppd->cpspec->ibcctrl_a &=
  7664. ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  7665. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  7666. ppd->cpspec->ibcctrl_a);
  7667. qib_read_kreg32(dd, kr_scratch);
  7668. if (ppd->lflags & QIBL_IB_LINK_DISABLED)
  7669. qib_set_ib_7322_lstate(ppd, 0,
  7670. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  7671. }
  7672. }