qib_iba7322.c 250 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066
  1. /*
  2. * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. /*
  33. * This file contains all of the code that is specific to the
  34. * InfiniPath 7322 chip
  35. */
  36. #include <linux/interrupt.h>
  37. #include <linux/pci.h>
  38. #include <linux/delay.h>
  39. #include <linux/io.h>
  40. #include <linux/jiffies.h>
  41. #include <linux/module.h>
  42. #include <rdma/ib_verbs.h>
  43. #include <rdma/ib_smi.h>
  44. #include "qib.h"
  45. #include "qib_7322_regs.h"
  46. #include "qib_qsfp.h"
  47. #include "qib_mad.h"
  48. static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
  49. static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
  50. static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
  51. static irqreturn_t qib_7322intr(int irq, void *data);
  52. static irqreturn_t qib_7322bufavail(int irq, void *data);
  53. static irqreturn_t sdma_intr(int irq, void *data);
  54. static irqreturn_t sdma_idle_intr(int irq, void *data);
  55. static irqreturn_t sdma_progress_intr(int irq, void *data);
  56. static irqreturn_t sdma_cleanup_intr(int irq, void *data);
  57. static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
  58. struct qib_ctxtdata *rcd);
  59. static u8 qib_7322_phys_portstate(u64);
  60. static u32 qib_7322_iblink_state(u64);
  61. static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  62. u16 linitcmd);
  63. static void force_h1(struct qib_pportdata *);
  64. static void adj_tx_serdes(struct qib_pportdata *);
  65. static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
  66. static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
  67. static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
  68. static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
  69. static void serdes_7322_los_enable(struct qib_pportdata *, int);
  70. static int serdes_7322_init_old(struct qib_pportdata *);
  71. static int serdes_7322_init_new(struct qib_pportdata *);
  72. #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
  73. /* LE2 serdes values for different cases */
  74. #define LE2_DEFAULT 5
  75. #define LE2_5m 4
  76. #define LE2_QME 0
  77. /* Below is special-purpose, so only really works for the IB SerDes blocks. */
  78. #define IBSD(hw_pidx) (hw_pidx + 2)
  79. /* these are variables for documentation and experimentation purposes */
  80. static const unsigned rcv_int_timeout = 375;
  81. static const unsigned rcv_int_count = 16;
  82. static const unsigned sdma_idle_cnt = 64;
  83. /* Time to stop altering Rx Equalization parameters, after link up. */
  84. #define RXEQ_DISABLE_MSECS 2500
  85. /*
  86. * Number of VLs we are configured to use (to allow for more
  87. * credits per vl, etc.)
  88. */
  89. ushort qib_num_cfg_vls = 2;
  90. module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
  91. MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
  92. static ushort qib_chase = 1;
  93. module_param_named(chase, qib_chase, ushort, S_IRUGO);
  94. MODULE_PARM_DESC(chase, "Enable state chase handling");
  95. static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
  96. module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
  97. MODULE_PARM_DESC(long_attenuation, \
  98. "attenuation cutoff (dB) for long copper cable setup");
  99. static ushort qib_singleport;
  100. module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
  101. MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
  102. static ushort qib_krcvq01_no_msi;
  103. module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
  104. MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
  105. /*
  106. * Receive header queue sizes
  107. */
  108. static unsigned qib_rcvhdrcnt;
  109. module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
  110. MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
  111. static unsigned qib_rcvhdrsize;
  112. module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
  113. MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
  114. static unsigned qib_rcvhdrentsize;
  115. module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
  116. MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
  117. #define MAX_ATTEN_LEN 64 /* plenty for any real system */
  118. /* for read back, default index is ~5m copper cable */
  119. static char txselect_list[MAX_ATTEN_LEN] = "10";
  120. static struct kparam_string kp_txselect = {
  121. .string = txselect_list,
  122. .maxlen = MAX_ATTEN_LEN
  123. };
  124. static int setup_txselect(const char *, struct kernel_param *);
  125. module_param_call(txselect, setup_txselect, param_get_string,
  126. &kp_txselect, S_IWUSR | S_IRUGO);
  127. MODULE_PARM_DESC(txselect, \
  128. "Tx serdes indices (for no QSFP or invalid QSFP data)");
  129. #define BOARD_QME7342 5
  130. #define BOARD_QMH7342 6
  131. #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
  132. BOARD_QMH7342)
  133. #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
  134. BOARD_QME7342)
  135. #define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
  136. #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
  137. #define MASK_ACROSS(lsb, msb) \
  138. (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
  139. #define SYM_RMASK(regname, fldname) ((u64) \
  140. QIB_7322_##regname##_##fldname##_RMASK)
  141. #define SYM_MASK(regname, fldname) ((u64) \
  142. QIB_7322_##regname##_##fldname##_RMASK << \
  143. QIB_7322_##regname##_##fldname##_LSB)
  144. #define SYM_FIELD(value, regname, fldname) ((u64) \
  145. (((value) >> SYM_LSB(regname, fldname)) & \
  146. SYM_RMASK(regname, fldname)))
  147. /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
  148. #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
  149. (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
  150. #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
  151. #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
  152. #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
  153. #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
  154. #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
  155. /* Below because most, but not all, fields of IntMask have that full suffix */
  156. #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
  157. #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
  158. /*
  159. * the size bits give us 2^N, in KB units. 0 marks as invalid,
  160. * and 7 is reserved. We currently use only 2KB and 4KB
  161. */
  162. #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
  163. #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
  164. #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
  165. #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
  166. #define SendIBSLIDAssignMask \
  167. QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
  168. #define SendIBSLMCMask \
  169. QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
  170. #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
  171. #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
  172. #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
  173. #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
  174. #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
  175. #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
  176. #define _QIB_GPIO_SDA_NUM 1
  177. #define _QIB_GPIO_SCL_NUM 0
  178. #define QIB_EEPROM_WEN_NUM 14
  179. #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
  180. /* HW counter clock is at 4nsec */
  181. #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
  182. /* full speed IB port 1 only */
  183. #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
  184. #define PORT_SPD_CAP_SHIFT 3
  185. /* full speed featuremask, both ports */
  186. #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
  187. /*
  188. * This file contains almost all the chip-specific register information and
  189. * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
  190. */
  191. /* Use defines to tie machine-generated names to lower-case names */
  192. #define kr_contextcnt KREG_IDX(ContextCnt)
  193. #define kr_control KREG_IDX(Control)
  194. #define kr_counterregbase KREG_IDX(CntrRegBase)
  195. #define kr_errclear KREG_IDX(ErrClear)
  196. #define kr_errmask KREG_IDX(ErrMask)
  197. #define kr_errstatus KREG_IDX(ErrStatus)
  198. #define kr_extctrl KREG_IDX(EXTCtrl)
  199. #define kr_extstatus KREG_IDX(EXTStatus)
  200. #define kr_gpio_clear KREG_IDX(GPIOClear)
  201. #define kr_gpio_mask KREG_IDX(GPIOMask)
  202. #define kr_gpio_out KREG_IDX(GPIOOut)
  203. #define kr_gpio_status KREG_IDX(GPIOStatus)
  204. #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
  205. #define kr_debugportval KREG_IDX(DebugPortValueReg)
  206. #define kr_fmask KREG_IDX(feature_mask)
  207. #define kr_act_fmask KREG_IDX(active_feature_mask)
  208. #define kr_hwerrclear KREG_IDX(HwErrClear)
  209. #define kr_hwerrmask KREG_IDX(HwErrMask)
  210. #define kr_hwerrstatus KREG_IDX(HwErrStatus)
  211. #define kr_intclear KREG_IDX(IntClear)
  212. #define kr_intmask KREG_IDX(IntMask)
  213. #define kr_intredirect KREG_IDX(IntRedirect0)
  214. #define kr_intstatus KREG_IDX(IntStatus)
  215. #define kr_pagealign KREG_IDX(PageAlign)
  216. #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
  217. #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
  218. #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
  219. #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
  220. #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
  221. #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
  222. #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
  223. #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
  224. #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
  225. #define kr_revision KREG_IDX(Revision)
  226. #define kr_scratch KREG_IDX(Scratch)
  227. #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
  228. #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
  229. #define kr_sendctrl KREG_IDX(SendCtrl)
  230. #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
  231. #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
  232. #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
  233. #define kr_sendpiobufbase KREG_IDX(SendBufBase)
  234. #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
  235. #define kr_sendpiosize KREG_IDX(SendBufSize)
  236. #define kr_sendregbase KREG_IDX(SendRegBase)
  237. #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
  238. #define kr_userregbase KREG_IDX(UserRegBase)
  239. #define kr_intgranted KREG_IDX(Int_Granted)
  240. #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
  241. #define kr_intblocked KREG_IDX(IntBlocked)
  242. #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
  243. /*
  244. * per-port kernel registers. Access only with qib_read_kreg_port()
  245. * or qib_write_kreg_port()
  246. */
  247. #define krp_errclear KREG_IBPORT_IDX(ErrClear)
  248. #define krp_errmask KREG_IBPORT_IDX(ErrMask)
  249. #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
  250. #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
  251. #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
  252. #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
  253. #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
  254. #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
  255. #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
  256. #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
  257. #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
  258. #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
  259. #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
  260. #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
  261. #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
  262. #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
  263. #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
  264. #define krp_psstart KREG_IBPORT_IDX(PSStart)
  265. #define krp_psstat KREG_IBPORT_IDX(PSStat)
  266. #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
  267. #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
  268. #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
  269. #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
  270. #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
  271. #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
  272. #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
  273. #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
  274. #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
  275. #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
  276. #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
  277. #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
  278. #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
  279. #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
  280. #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
  281. #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
  282. #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
  283. #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
  284. #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
  285. #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
  286. #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
  287. #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
  288. #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
  289. #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
  290. #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
  291. #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
  292. #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
  293. #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
  294. #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
  295. #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
  296. #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
  297. /*
  298. * Per-context kernel registers. Access only with qib_read_kreg_ctxt()
  299. * or qib_write_kreg_ctxt()
  300. */
  301. #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
  302. #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
  303. /*
  304. * TID Flow table, per context. Reduces
  305. * number of hdrq updates to one per flow (or on errors).
  306. * context 0 and 1 share same memory, but have distinct
  307. * addresses. Since for now, we never use expected sends
  308. * on kernel contexts, we don't worry about that (we initialize
  309. * those entries for ctxt 0/1 on driver load twice, for example).
  310. */
  311. #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
  312. #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
  313. /* these are the error bits in the tid flows, and are W1C */
  314. #define TIDFLOW_ERRBITS ( \
  315. (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
  316. SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
  317. (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
  318. SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
  319. /* Most (not all) Counters are per-IBport.
  320. * Requires LBIntCnt is at offset 0 in the group
  321. */
  322. #define CREG_IDX(regname) \
  323. ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
  324. #define crp_badformat CREG_IDX(RxVersionErrCnt)
  325. #define crp_err_rlen CREG_IDX(RxLenErrCnt)
  326. #define crp_erricrc CREG_IDX(RxICRCErrCnt)
  327. #define crp_errlink CREG_IDX(RxLinkMalformCnt)
  328. #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
  329. #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
  330. #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
  331. #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
  332. #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
  333. #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
  334. #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
  335. #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
  336. #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
  337. #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
  338. #define crp_pktrcv CREG_IDX(RxDataPktCnt)
  339. #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
  340. #define crp_pktsend CREG_IDX(TxDataPktCnt)
  341. #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
  342. #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
  343. #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
  344. #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
  345. #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
  346. #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
  347. #define crp_rcvebp CREG_IDX(RxEBPCnt)
  348. #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
  349. #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
  350. #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
  351. #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
  352. #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
  353. #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
  354. #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
  355. #define crp_sendstall CREG_IDX(TxFlowStallCnt)
  356. #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
  357. #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
  358. #define crp_txlenerr CREG_IDX(TxLenErrCnt)
  359. #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
  360. #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
  361. #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
  362. #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
  363. #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
  364. #define crp_wordrcv CREG_IDX(RxDwordCnt)
  365. #define crp_wordsend CREG_IDX(TxDwordCnt)
  366. #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
  367. /* these are the (few) counters that are not port-specific */
  368. #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
  369. QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
  370. #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
  371. #define cr_lbint CREG_DEVIDX(LBIntCnt)
  372. #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
  373. #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
  374. #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
  375. #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
  376. #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
  377. /* no chip register for # of IB ports supported, so define */
  378. #define NUM_IB_PORTS 2
  379. /* 1 VL15 buffer per hardware IB port, no register for this, so define */
  380. #define NUM_VL15_BUFS NUM_IB_PORTS
  381. /*
  382. * context 0 and 1 are special, and there is no chip register that
  383. * defines this value, so we have to define it here.
  384. * These are all allocated to either 0 or 1 for single port
  385. * hardware configuration, otherwise each gets half
  386. */
  387. #define KCTXT0_EGRCNT 2048
  388. /* values for vl and port fields in PBC, 7322-specific */
  389. #define PBC_PORT_SEL_LSB 26
  390. #define PBC_PORT_SEL_RMASK 1
  391. #define PBC_VL_NUM_LSB 27
  392. #define PBC_VL_NUM_RMASK 7
  393. #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
  394. #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
  395. static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
  396. [IB_RATE_2_5_GBPS] = 16,
  397. [IB_RATE_5_GBPS] = 8,
  398. [IB_RATE_10_GBPS] = 4,
  399. [IB_RATE_20_GBPS] = 2,
  400. [IB_RATE_30_GBPS] = 2,
  401. [IB_RATE_40_GBPS] = 1
  402. };
  403. #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
  404. #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
  405. /* link training states, from IBC */
  406. #define IB_7322_LT_STATE_DISABLED 0x00
  407. #define IB_7322_LT_STATE_LINKUP 0x01
  408. #define IB_7322_LT_STATE_POLLACTIVE 0x02
  409. #define IB_7322_LT_STATE_POLLQUIET 0x03
  410. #define IB_7322_LT_STATE_SLEEPDELAY 0x04
  411. #define IB_7322_LT_STATE_SLEEPQUIET 0x05
  412. #define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
  413. #define IB_7322_LT_STATE_CFGRCVFCFG 0x09
  414. #define IB_7322_LT_STATE_CFGWAITRMT 0x0a
  415. #define IB_7322_LT_STATE_CFGIDLE 0x0b
  416. #define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
  417. #define IB_7322_LT_STATE_TXREVLANES 0x0d
  418. #define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
  419. #define IB_7322_LT_STATE_RECOVERIDLE 0x0f
  420. #define IB_7322_LT_STATE_CFGENH 0x10
  421. #define IB_7322_LT_STATE_CFGTEST 0x11
  422. #define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
  423. #define IB_7322_LT_STATE_CFGWAITENH 0x13
  424. /* link state machine states from IBC */
  425. #define IB_7322_L_STATE_DOWN 0x0
  426. #define IB_7322_L_STATE_INIT 0x1
  427. #define IB_7322_L_STATE_ARM 0x2
  428. #define IB_7322_L_STATE_ACTIVE 0x3
  429. #define IB_7322_L_STATE_ACT_DEFER 0x4
  430. static const u8 qib_7322_physportstate[0x20] = {
  431. [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
  432. [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
  433. [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
  434. [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
  435. [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
  436. [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
  437. [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
  438. [IB_7322_LT_STATE_CFGRCVFCFG] =
  439. IB_PHYSPORTSTATE_CFG_TRAIN,
  440. [IB_7322_LT_STATE_CFGWAITRMT] =
  441. IB_PHYSPORTSTATE_CFG_TRAIN,
  442. [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
  443. [IB_7322_LT_STATE_RECOVERRETRAIN] =
  444. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  445. [IB_7322_LT_STATE_RECOVERWAITRMT] =
  446. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  447. [IB_7322_LT_STATE_RECOVERIDLE] =
  448. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  449. [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
  450. [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
  451. [IB_7322_LT_STATE_CFGWAITRMTTEST] =
  452. IB_PHYSPORTSTATE_CFG_TRAIN,
  453. [IB_7322_LT_STATE_CFGWAITENH] =
  454. IB_PHYSPORTSTATE_CFG_WAIT_ENH,
  455. [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
  456. [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
  457. [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
  458. [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
  459. };
  460. struct qib_chip_specific {
  461. u64 __iomem *cregbase;
  462. u64 *cntrs;
  463. spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
  464. spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
  465. u64 main_int_mask; /* clear bits which have dedicated handlers */
  466. u64 int_enable_mask; /* for per port interrupts in single port mode */
  467. u64 errormask;
  468. u64 hwerrmask;
  469. u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
  470. u64 gpio_mask; /* shadow the gpio mask register */
  471. u64 extctrl; /* shadow the gpio output enable, etc... */
  472. u32 ncntrs;
  473. u32 nportcntrs;
  474. u32 cntrnamelen;
  475. u32 portcntrnamelen;
  476. u32 numctxts;
  477. u32 rcvegrcnt;
  478. u32 updthresh; /* current AvailUpdThld */
  479. u32 updthresh_dflt; /* default AvailUpdThld */
  480. u32 r1;
  481. int irq;
  482. u32 num_msix_entries;
  483. u32 sdmabufcnt;
  484. u32 lastbuf_for_pio;
  485. u32 stay_in_freeze;
  486. u32 recovery_ports_initted;
  487. struct msix_entry *msix_entries;
  488. void **msix_arg;
  489. unsigned long *sendchkenable;
  490. unsigned long *sendgrhchk;
  491. unsigned long *sendibchk;
  492. u32 rcvavail_timeout[18];
  493. char emsgbuf[128]; /* for device error interrupt msg buffer */
  494. };
  495. /* Table of entries in "human readable" form Tx Emphasis. */
  496. struct txdds_ent {
  497. u8 amp;
  498. u8 pre;
  499. u8 main;
  500. u8 post;
  501. };
  502. struct vendor_txdds_ent {
  503. u8 oui[QSFP_VOUI_LEN];
  504. u8 *partnum;
  505. struct txdds_ent sdr;
  506. struct txdds_ent ddr;
  507. struct txdds_ent qdr;
  508. };
  509. static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
  510. #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
  511. #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
  512. #define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
  513. #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
  514. #define H1_FORCE_VAL 8
  515. #define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
  516. #define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
  517. /* The static and dynamic registers are paired, and the pairs indexed by spd */
  518. #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
  519. + ((spd) * 2))
  520. #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
  521. #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
  522. #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
  523. #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
  524. #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
  525. struct qib_chippport_specific {
  526. u64 __iomem *kpregbase;
  527. u64 __iomem *cpregbase;
  528. u64 *portcntrs;
  529. struct qib_pportdata *ppd;
  530. wait_queue_head_t autoneg_wait;
  531. struct delayed_work autoneg_work;
  532. struct delayed_work ipg_work;
  533. struct timer_list chase_timer;
  534. /*
  535. * these 5 fields are used to establish deltas for IB symbol
  536. * errors and linkrecovery errors. They can be reported on
  537. * some chips during link negotiation prior to INIT, and with
  538. * DDR when faking DDR negotiations with non-IBTA switches.
  539. * The chip counters are adjusted at driver unload if there is
  540. * a non-zero delta.
  541. */
  542. u64 ibdeltainprog;
  543. u64 ibsymdelta;
  544. u64 ibsymsnap;
  545. u64 iblnkerrdelta;
  546. u64 iblnkerrsnap;
  547. u64 iblnkdownsnap;
  548. u64 iblnkdowndelta;
  549. u64 ibmalfdelta;
  550. u64 ibmalfsnap;
  551. u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
  552. u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
  553. unsigned long qdr_dfe_time;
  554. unsigned long chase_end;
  555. u32 autoneg_tries;
  556. u32 recovery_init;
  557. u32 qdr_dfe_on;
  558. u32 qdr_reforce;
  559. /*
  560. * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
  561. * entry zero is unused, to simplify indexing
  562. */
  563. u8 h1_val;
  564. u8 no_eep; /* txselect table index to use if no qsfp info */
  565. u8 ipg_tries;
  566. u8 ibmalfusesnap;
  567. struct qib_qsfp_data qsfp_data;
  568. char epmsgbuf[192]; /* for port error interrupt msg buffer */
  569. };
  570. static struct {
  571. const char *name;
  572. irq_handler_t handler;
  573. int lsb;
  574. int port; /* 0 if not port-specific, else port # */
  575. } irq_table[] = {
  576. { QIB_DRV_NAME, qib_7322intr, -1, 0 },
  577. { QIB_DRV_NAME " (buf avail)", qib_7322bufavail,
  578. SYM_LSB(IntStatus, SendBufAvail), 0 },
  579. { QIB_DRV_NAME " (sdma 0)", sdma_intr,
  580. SYM_LSB(IntStatus, SDmaInt_0), 1 },
  581. { QIB_DRV_NAME " (sdma 1)", sdma_intr,
  582. SYM_LSB(IntStatus, SDmaInt_1), 2 },
  583. { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr,
  584. SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
  585. { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr,
  586. SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
  587. { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr,
  588. SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
  589. { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr,
  590. SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
  591. { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr,
  592. SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
  593. { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr,
  594. SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
  595. };
  596. /* ibcctrl bits */
  597. #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
  598. /* cycle through TS1/TS2 till OK */
  599. #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
  600. /* wait for TS1, then go on */
  601. #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
  602. #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
  603. #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
  604. #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
  605. #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
  606. #define BLOB_7322_IBCHG 0x101
  607. static inline void qib_write_kreg(const struct qib_devdata *dd,
  608. const u32 regno, u64 value);
  609. static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
  610. static void write_7322_initregs(struct qib_devdata *);
  611. static void write_7322_init_portregs(struct qib_pportdata *);
  612. static void setup_7322_link_recovery(struct qib_pportdata *, u32);
  613. static void check_7322_rxe_status(struct qib_pportdata *);
  614. static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
  615. /**
  616. * qib_read_ureg32 - read 32-bit virtualized per-context register
  617. * @dd: device
  618. * @regno: register number
  619. * @ctxt: context number
  620. *
  621. * Return the contents of a register that is virtualized to be per context.
  622. * Returns -1 on errors (not distinguishable from valid contents at
  623. * runtime; we may add a separate error variable at some point).
  624. */
  625. static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
  626. enum qib_ureg regno, int ctxt)
  627. {
  628. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  629. return 0;
  630. return readl(regno + (u64 __iomem *)(
  631. (dd->ureg_align * ctxt) + (dd->userbase ?
  632. (char __iomem *)dd->userbase :
  633. (char __iomem *)dd->kregbase + dd->uregbase)));
  634. }
  635. /**
  636. * qib_read_ureg - read virtualized per-context register
  637. * @dd: device
  638. * @regno: register number
  639. * @ctxt: context number
  640. *
  641. * Return the contents of a register that is virtualized to be per context.
  642. * Returns -1 on errors (not distinguishable from valid contents at
  643. * runtime; we may add a separate error variable at some point).
  644. */
  645. static inline u64 qib_read_ureg(const struct qib_devdata *dd,
  646. enum qib_ureg regno, int ctxt)
  647. {
  648. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  649. return 0;
  650. return readq(regno + (u64 __iomem *)(
  651. (dd->ureg_align * ctxt) + (dd->userbase ?
  652. (char __iomem *)dd->userbase :
  653. (char __iomem *)dd->kregbase + dd->uregbase)));
  654. }
  655. /**
  656. * qib_write_ureg - write virtualized per-context register
  657. * @dd: device
  658. * @regno: register number
  659. * @value: value
  660. * @ctxt: context
  661. *
  662. * Write the contents of a register that is virtualized to be per context.
  663. */
  664. static inline void qib_write_ureg(const struct qib_devdata *dd,
  665. enum qib_ureg regno, u64 value, int ctxt)
  666. {
  667. u64 __iomem *ubase;
  668. if (dd->userbase)
  669. ubase = (u64 __iomem *)
  670. ((char __iomem *) dd->userbase +
  671. dd->ureg_align * ctxt);
  672. else
  673. ubase = (u64 __iomem *)
  674. (dd->uregbase +
  675. (char __iomem *) dd->kregbase +
  676. dd->ureg_align * ctxt);
  677. if (dd->kregbase && (dd->flags & QIB_PRESENT))
  678. writeq(value, &ubase[regno]);
  679. }
  680. static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
  681. const u32 regno)
  682. {
  683. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  684. return -1;
  685. return readl((u32 __iomem *) &dd->kregbase[regno]);
  686. }
  687. static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
  688. const u32 regno)
  689. {
  690. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  691. return -1;
  692. return readq(&dd->kregbase[regno]);
  693. }
  694. static inline void qib_write_kreg(const struct qib_devdata *dd,
  695. const u32 regno, u64 value)
  696. {
  697. if (dd->kregbase && (dd->flags & QIB_PRESENT))
  698. writeq(value, &dd->kregbase[regno]);
  699. }
  700. /*
  701. * not many sanity checks for the port-specific kernel register routines,
  702. * since they are only used when it's known to be safe.
  703. */
  704. static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
  705. const u16 regno)
  706. {
  707. if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
  708. return 0ULL;
  709. return readq(&ppd->cpspec->kpregbase[regno]);
  710. }
  711. static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
  712. const u16 regno, u64 value)
  713. {
  714. if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
  715. (ppd->dd->flags & QIB_PRESENT))
  716. writeq(value, &ppd->cpspec->kpregbase[regno]);
  717. }
  718. /**
  719. * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
  720. * @dd: the qlogic_ib device
  721. * @regno: the register number to write
  722. * @ctxt: the context containing the register
  723. * @value: the value to write
  724. */
  725. static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
  726. const u16 regno, unsigned ctxt,
  727. u64 value)
  728. {
  729. qib_write_kreg(dd, regno + ctxt, value);
  730. }
  731. static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
  732. {
  733. if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
  734. return 0;
  735. return readq(&dd->cspec->cregbase[regno]);
  736. }
  737. static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
  738. {
  739. if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
  740. return 0;
  741. return readl(&dd->cspec->cregbase[regno]);
  742. }
  743. static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
  744. u16 regno, u64 value)
  745. {
  746. if (ppd->cpspec && ppd->cpspec->cpregbase &&
  747. (ppd->dd->flags & QIB_PRESENT))
  748. writeq(value, &ppd->cpspec->cpregbase[regno]);
  749. }
  750. static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
  751. u16 regno)
  752. {
  753. if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
  754. !(ppd->dd->flags & QIB_PRESENT))
  755. return 0;
  756. return readq(&ppd->cpspec->cpregbase[regno]);
  757. }
  758. static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
  759. u16 regno)
  760. {
  761. if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
  762. !(ppd->dd->flags & QIB_PRESENT))
  763. return 0;
  764. return readl(&ppd->cpspec->cpregbase[regno]);
  765. }
  766. /* bits in Control register */
  767. #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
  768. #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
  769. /* bits in general interrupt regs */
  770. #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
  771. #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
  772. #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
  773. #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
  774. #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
  775. #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
  776. #define QIB_I_C_ERROR INT_MASK(Err)
  777. #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
  778. #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
  779. #define QIB_I_GPIO INT_MASK(AssertGPIO)
  780. #define QIB_I_P_SDMAINT(pidx) \
  781. (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
  782. INT_MASK_P(SDmaProgress, pidx) | \
  783. INT_MASK_PM(SDmaCleanupDone, pidx))
  784. /* Interrupt bits that are "per port" */
  785. #define QIB_I_P_BITSEXTANT(pidx) \
  786. (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
  787. INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
  788. INT_MASK_P(SDmaProgress, pidx) | \
  789. INT_MASK_PM(SDmaCleanupDone, pidx))
  790. /* Interrupt bits that are common to a device */
  791. /* currently unused: QIB_I_SPIOSENT */
  792. #define QIB_I_C_BITSEXTANT \
  793. (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
  794. QIB_I_SPIOSENT | \
  795. QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
  796. #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
  797. QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
  798. /*
  799. * Error bits that are "per port".
  800. */
  801. #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
  802. #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
  803. #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
  804. #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
  805. #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
  806. #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
  807. #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
  808. #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
  809. #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
  810. #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
  811. #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
  812. #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
  813. #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
  814. #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
  815. #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
  816. #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
  817. #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
  818. #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
  819. #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
  820. #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
  821. #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
  822. #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
  823. #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
  824. #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
  825. #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
  826. #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
  827. #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
  828. #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
  829. #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
  830. #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
  831. #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
  832. #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
  833. #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
  834. #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
  835. #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
  836. #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
  837. #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
  838. #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
  839. #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
  840. /* Error bits that are common to a device */
  841. #define QIB_E_RESET ERR_MASK(ResetNegated)
  842. #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
  843. #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
  844. /*
  845. * Per chip (rather than per-port) errors. Most either do
  846. * nothing but trigger a print (because they self-recover, or
  847. * always occur in tandem with other errors that handle the
  848. * issue), or because they indicate errors with no recovery,
  849. * but we want to know that they happened.
  850. */
  851. #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
  852. #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
  853. #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
  854. #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
  855. #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
  856. #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
  857. #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
  858. #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
  859. /* SDMA chip errors (not per port)
  860. * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
  861. * the SDMAHALT error immediately, so we just print the dup error via the
  862. * E_AUTO mechanism. This is true of most of the per-port fatal errors
  863. * as well, but since this is port-independent, by definition, it's
  864. * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
  865. * packet send errors, and so are handled in the same manner as other
  866. * per-packet errors.
  867. */
  868. #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
  869. #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
  870. #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
  871. /*
  872. * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
  873. * it is used to print "common" packet errors.
  874. */
  875. #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
  876. QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
  877. QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
  878. QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
  879. QIB_E_P_REBP)
  880. /* Error Bits that Packet-related (Receive, per-port) */
  881. #define QIB_E_P_RPKTERRS (\
  882. QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
  883. QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
  884. QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
  885. QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
  886. QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
  887. QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
  888. /*
  889. * Error bits that are Send-related (per port)
  890. * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
  891. * All of these potentially need to have a buffer disarmed
  892. */
  893. #define QIB_E_P_SPKTERRS (\
  894. QIB_E_P_SUNEXP_PKTNUM |\
  895. QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
  896. QIB_E_P_SMAXPKTLEN |\
  897. QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
  898. QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
  899. QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
  900. #define QIB_E_SPKTERRS ( \
  901. QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
  902. ERR_MASK_N(SendUnsupportedVLErr) | \
  903. QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
  904. #define QIB_E_P_SDMAERRS ( \
  905. QIB_E_P_SDMAHALT | \
  906. QIB_E_P_SDMADESCADDRMISALIGN | \
  907. QIB_E_P_SDMAUNEXPDATA | \
  908. QIB_E_P_SDMAMISSINGDW | \
  909. QIB_E_P_SDMADWEN | \
  910. QIB_E_P_SDMARPYTAG | \
  911. QIB_E_P_SDMA1STDESC | \
  912. QIB_E_P_SDMABASE | \
  913. QIB_E_P_SDMATAILOUTOFBOUND | \
  914. QIB_E_P_SDMAOUTOFBOUND | \
  915. QIB_E_P_SDMAGENMISMATCH)
  916. /*
  917. * This sets some bits more than once, but makes it more obvious which
  918. * bits are not handled under other categories, and the repeat definition
  919. * is not a problem.
  920. */
  921. #define QIB_E_P_BITSEXTANT ( \
  922. QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
  923. QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
  924. QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
  925. QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
  926. )
  927. /*
  928. * These are errors that can occur when the link
  929. * changes state while a packet is being sent or received. This doesn't
  930. * cover things like EBP or VCRC that can be the result of a sending
  931. * having the link change state, so we receive a "known bad" packet.
  932. * All of these are "per port", so renamed:
  933. */
  934. #define QIB_E_P_LINK_PKTERRS (\
  935. QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
  936. QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
  937. QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
  938. QIB_E_P_RUNEXPCHAR)
  939. /*
  940. * This sets some bits more than once, but makes it more obvious which
  941. * bits are not handled under other categories (such as QIB_E_SPKTERRS),
  942. * and the repeat definition is not a problem.
  943. */
  944. #define QIB_E_C_BITSEXTANT (\
  945. QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
  946. QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
  947. QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
  948. /* Likewise Neuter E_SPKT_ERRS_IGNORE */
  949. #define E_SPKT_ERRS_IGNORE 0
  950. #define QIB_EXTS_MEMBIST_DISABLED \
  951. SYM_MASK(EXTStatus, MemBISTDisabled)
  952. #define QIB_EXTS_MEMBIST_ENDTEST \
  953. SYM_MASK(EXTStatus, MemBISTEndTest)
  954. #define QIB_E_SPIOARMLAUNCH \
  955. ERR_MASK(SendArmLaunchErr)
  956. #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
  957. #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
  958. /*
  959. * IBTA_1_2 is set when multiple speeds are enabled (normal),
  960. * and also if forced QDR (only QDR enabled). It's enabled for the
  961. * forced QDR case so that scrambling will be enabled by the TS3
  962. * exchange, when supported by both sides of the link.
  963. */
  964. #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
  965. #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
  966. #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
  967. #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
  968. #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
  969. #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
  970. SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
  971. #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
  972. #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
  973. #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
  974. #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
  975. #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
  976. #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
  977. #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
  978. #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
  979. #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
  980. SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
  981. #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
  982. SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
  983. #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
  984. #define IBA7322_REDIRECT_VEC_PER_REG 12
  985. #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
  986. #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
  987. #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
  988. #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
  989. #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
  990. #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
  991. #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
  992. .msg = #fldname , .sz = sizeof(#fldname) }
  993. #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
  994. fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
  995. static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
  996. HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
  997. HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
  998. HWE_AUTO(PCIESerdesPClkNotDetect),
  999. HWE_AUTO(PowerOnBISTFailed),
  1000. HWE_AUTO(TempsenseTholdReached),
  1001. HWE_AUTO(MemoryErr),
  1002. HWE_AUTO(PCIeBusParityErr),
  1003. HWE_AUTO(PcieCplTimeout),
  1004. HWE_AUTO(PciePoisonedTLP),
  1005. HWE_AUTO_P(SDmaMemReadErr, 1),
  1006. HWE_AUTO_P(SDmaMemReadErr, 0),
  1007. HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
  1008. HWE_AUTO_P(IBCBusToSPCParityErr, 1),
  1009. HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
  1010. HWE_AUTO(statusValidNoEop),
  1011. HWE_AUTO(LATriggered),
  1012. { .mask = 0, .sz = 0 }
  1013. };
  1014. #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
  1015. .msg = #fldname, .sz = sizeof(#fldname) }
  1016. #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
  1017. .msg = #fldname, .sz = sizeof(#fldname) }
  1018. static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
  1019. E_AUTO(RcvEgrFullErr),
  1020. E_AUTO(RcvHdrFullErr),
  1021. E_AUTO(ResetNegated),
  1022. E_AUTO(HardwareErr),
  1023. E_AUTO(InvalidAddrErr),
  1024. E_AUTO(SDmaVL15Err),
  1025. E_AUTO(SBufVL15MisUseErr),
  1026. E_AUTO(InvalidEEPCmd),
  1027. E_AUTO(RcvContextShareErr),
  1028. E_AUTO(SendVLMismatchErr),
  1029. E_AUTO(SendArmLaunchErr),
  1030. E_AUTO(SendSpecialTriggerErr),
  1031. E_AUTO(SDmaWrongPortErr),
  1032. E_AUTO(SDmaBufMaskDuplicateErr),
  1033. { .mask = 0, .sz = 0 }
  1034. };
  1035. static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
  1036. E_P_AUTO(IBStatusChanged),
  1037. E_P_AUTO(SHeadersErr),
  1038. E_P_AUTO(VL15BufMisuseErr),
  1039. /*
  1040. * SDmaHaltErr is not really an error, make it clearer;
  1041. */
  1042. {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
  1043. .sz = 11},
  1044. E_P_AUTO(SDmaDescAddrMisalignErr),
  1045. E_P_AUTO(SDmaUnexpDataErr),
  1046. E_P_AUTO(SDmaMissingDwErr),
  1047. E_P_AUTO(SDmaDwEnErr),
  1048. E_P_AUTO(SDmaRpyTagErr),
  1049. E_P_AUTO(SDma1stDescErr),
  1050. E_P_AUTO(SDmaBaseErr),
  1051. E_P_AUTO(SDmaTailOutOfBoundErr),
  1052. E_P_AUTO(SDmaOutOfBoundErr),
  1053. E_P_AUTO(SDmaGenMismatchErr),
  1054. E_P_AUTO(SendBufMisuseErr),
  1055. E_P_AUTO(SendUnsupportedVLErr),
  1056. E_P_AUTO(SendUnexpectedPktNumErr),
  1057. E_P_AUTO(SendDroppedDataPktErr),
  1058. E_P_AUTO(SendDroppedSmpPktErr),
  1059. E_P_AUTO(SendPktLenErr),
  1060. E_P_AUTO(SendUnderRunErr),
  1061. E_P_AUTO(SendMaxPktLenErr),
  1062. E_P_AUTO(SendMinPktLenErr),
  1063. E_P_AUTO(RcvIBLostLinkErr),
  1064. E_P_AUTO(RcvHdrErr),
  1065. E_P_AUTO(RcvHdrLenErr),
  1066. E_P_AUTO(RcvBadTidErr),
  1067. E_P_AUTO(RcvBadVersionErr),
  1068. E_P_AUTO(RcvIBFlowErr),
  1069. E_P_AUTO(RcvEBPErr),
  1070. E_P_AUTO(RcvUnsupportedVLErr),
  1071. E_P_AUTO(RcvUnexpectedCharErr),
  1072. E_P_AUTO(RcvShortPktLenErr),
  1073. E_P_AUTO(RcvLongPktLenErr),
  1074. E_P_AUTO(RcvMaxPktLenErr),
  1075. E_P_AUTO(RcvMinPktLenErr),
  1076. E_P_AUTO(RcvICRCErr),
  1077. E_P_AUTO(RcvVCRCErr),
  1078. E_P_AUTO(RcvFormatErr),
  1079. { .mask = 0, .sz = 0 }
  1080. };
  1081. /*
  1082. * Below generates "auto-message" for interrupts not specific to any port or
  1083. * context
  1084. */
  1085. #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
  1086. .msg = #fldname, .sz = sizeof(#fldname) }
  1087. /* Below generates "auto-message" for interrupts specific to a port */
  1088. #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
  1089. SYM_LSB(IntMask, fldname##Mask##_0), \
  1090. SYM_LSB(IntMask, fldname##Mask##_1)), \
  1091. .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
  1092. /* For some reason, the SerDesTrimDone bits are reversed */
  1093. #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
  1094. SYM_LSB(IntMask, fldname##Mask##_1), \
  1095. SYM_LSB(IntMask, fldname##Mask##_0)), \
  1096. .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
  1097. /*
  1098. * Below generates "auto-message" for interrupts specific to a context,
  1099. * with ctxt-number appended
  1100. */
  1101. #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
  1102. SYM_LSB(IntMask, fldname##0IntMask), \
  1103. SYM_LSB(IntMask, fldname##17IntMask)), \
  1104. .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
  1105. static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
  1106. INTR_AUTO_P(SDmaInt),
  1107. INTR_AUTO_P(SDmaProgressInt),
  1108. INTR_AUTO_P(SDmaIdleInt),
  1109. INTR_AUTO_P(SDmaCleanupDone),
  1110. INTR_AUTO_C(RcvUrg),
  1111. INTR_AUTO_P(ErrInt),
  1112. INTR_AUTO(ErrInt), /* non-port-specific errs */
  1113. INTR_AUTO(AssertGPIOInt),
  1114. INTR_AUTO_P(SendDoneInt),
  1115. INTR_AUTO(SendBufAvailInt),
  1116. INTR_AUTO_C(RcvAvail),
  1117. { .mask = 0, .sz = 0 }
  1118. };
  1119. #define TXSYMPTOM_AUTO_P(fldname) \
  1120. { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
  1121. .msg = #fldname, .sz = sizeof(#fldname) }
  1122. static const struct qib_hwerror_msgs hdrchk_msgs[] = {
  1123. TXSYMPTOM_AUTO_P(NonKeyPacket),
  1124. TXSYMPTOM_AUTO_P(GRHFail),
  1125. TXSYMPTOM_AUTO_P(PkeyFail),
  1126. TXSYMPTOM_AUTO_P(QPFail),
  1127. TXSYMPTOM_AUTO_P(SLIDFail),
  1128. TXSYMPTOM_AUTO_P(RawIPV6),
  1129. TXSYMPTOM_AUTO_P(PacketTooSmall),
  1130. { .mask = 0, .sz = 0 }
  1131. };
  1132. #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
  1133. /*
  1134. * Called when we might have an error that is specific to a particular
  1135. * PIO buffer, and may need to cancel that buffer, so it can be re-used,
  1136. * because we don't need to force the update of pioavail
  1137. */
  1138. static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
  1139. {
  1140. struct qib_devdata *dd = ppd->dd;
  1141. u32 i;
  1142. int any;
  1143. u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  1144. u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
  1145. unsigned long sbuf[4];
  1146. /*
  1147. * It's possible that sendbuffererror could have bits set; might
  1148. * have already done this as a result of hardware error handling.
  1149. */
  1150. any = 0;
  1151. for (i = 0; i < regcnt; ++i) {
  1152. sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
  1153. if (sbuf[i]) {
  1154. any = 1;
  1155. qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
  1156. }
  1157. }
  1158. if (any)
  1159. qib_disarm_piobufs_set(dd, sbuf, piobcnt);
  1160. }
  1161. /* No txe_recover yet, if ever */
  1162. /* No decode__errors yet */
  1163. static void err_decode(char *msg, size_t len, u64 errs,
  1164. const struct qib_hwerror_msgs *msp)
  1165. {
  1166. u64 these, lmask;
  1167. int took, multi, n = 0;
  1168. while (errs && msp && msp->mask) {
  1169. multi = (msp->mask & (msp->mask - 1));
  1170. while (errs & msp->mask) {
  1171. these = (errs & msp->mask);
  1172. lmask = (these & (these - 1)) ^ these;
  1173. if (len) {
  1174. if (n++) {
  1175. /* separate the strings */
  1176. *msg++ = ',';
  1177. len--;
  1178. }
  1179. BUG_ON(!msp->sz);
  1180. /* msp->sz counts the nul */
  1181. took = min_t(size_t, msp->sz - (size_t)1, len);
  1182. memcpy(msg, msp->msg, took);
  1183. len -= took;
  1184. msg += took;
  1185. if (len)
  1186. *msg = '\0';
  1187. }
  1188. errs &= ~lmask;
  1189. if (len && multi) {
  1190. /* More than one bit this mask */
  1191. int idx = -1;
  1192. while (lmask & msp->mask) {
  1193. ++idx;
  1194. lmask >>= 1;
  1195. }
  1196. took = scnprintf(msg, len, "_%d", idx);
  1197. len -= took;
  1198. msg += took;
  1199. }
  1200. }
  1201. ++msp;
  1202. }
  1203. /* If some bits are left, show in hex. */
  1204. if (len && errs)
  1205. snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
  1206. (unsigned long long) errs);
  1207. }
  1208. /* only called if r1 set */
  1209. static void flush_fifo(struct qib_pportdata *ppd)
  1210. {
  1211. struct qib_devdata *dd = ppd->dd;
  1212. u32 __iomem *piobuf;
  1213. u32 bufn;
  1214. u32 *hdr;
  1215. u64 pbc;
  1216. const unsigned hdrwords = 7;
  1217. static struct qib_ib_header ibhdr = {
  1218. .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
  1219. .lrh[1] = IB_LID_PERMISSIVE,
  1220. .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
  1221. .lrh[3] = IB_LID_PERMISSIVE,
  1222. .u.oth.bth[0] = cpu_to_be32(
  1223. (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
  1224. .u.oth.bth[1] = cpu_to_be32(0),
  1225. .u.oth.bth[2] = cpu_to_be32(0),
  1226. .u.oth.u.ud.deth[0] = cpu_to_be32(0),
  1227. .u.oth.u.ud.deth[1] = cpu_to_be32(0),
  1228. };
  1229. /*
  1230. * Send a dummy VL15 packet to flush the launch FIFO.
  1231. * This will not actually be sent since the TxeBypassIbc bit is set.
  1232. */
  1233. pbc = PBC_7322_VL15_SEND |
  1234. (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
  1235. (hdrwords + SIZE_OF_CRC);
  1236. piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
  1237. if (!piobuf)
  1238. return;
  1239. writeq(pbc, piobuf);
  1240. hdr = (u32 *) &ibhdr;
  1241. if (dd->flags & QIB_PIO_FLUSH_WC) {
  1242. qib_flush_wc();
  1243. qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
  1244. qib_flush_wc();
  1245. __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
  1246. qib_flush_wc();
  1247. } else
  1248. qib_pio_copy(piobuf + 2, hdr, hdrwords);
  1249. qib_sendbuf_done(dd, bufn);
  1250. }
  1251. /*
  1252. * This is called with interrupts disabled and sdma_lock held.
  1253. */
  1254. static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
  1255. {
  1256. struct qib_devdata *dd = ppd->dd;
  1257. u64 set_sendctrl = 0;
  1258. u64 clr_sendctrl = 0;
  1259. if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
  1260. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
  1261. else
  1262. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
  1263. if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
  1264. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
  1265. else
  1266. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
  1267. if (op & QIB_SDMA_SENDCTRL_OP_HALT)
  1268. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
  1269. else
  1270. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
  1271. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
  1272. set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
  1273. SYM_MASK(SendCtrl_0, TxeAbortIbc) |
  1274. SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
  1275. else
  1276. clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
  1277. SYM_MASK(SendCtrl_0, TxeAbortIbc) |
  1278. SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
  1279. spin_lock(&dd->sendctrl_lock);
  1280. /* If we are draining everything, block sends first */
  1281. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
  1282. ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
  1283. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1284. qib_write_kreg(dd, kr_scratch, 0);
  1285. }
  1286. ppd->p_sendctrl |= set_sendctrl;
  1287. ppd->p_sendctrl &= ~clr_sendctrl;
  1288. if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
  1289. qib_write_kreg_port(ppd, krp_sendctrl,
  1290. ppd->p_sendctrl |
  1291. SYM_MASK(SendCtrl_0, SDmaCleanup));
  1292. else
  1293. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1294. qib_write_kreg(dd, kr_scratch, 0);
  1295. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
  1296. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
  1297. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1298. qib_write_kreg(dd, kr_scratch, 0);
  1299. }
  1300. spin_unlock(&dd->sendctrl_lock);
  1301. if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
  1302. flush_fifo(ppd);
  1303. }
  1304. static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
  1305. {
  1306. __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
  1307. }
  1308. static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
  1309. {
  1310. /*
  1311. * Set SendDmaLenGen and clear and set
  1312. * the MSB of the generation count to enable generation checking
  1313. * and load the internal generation counter.
  1314. */
  1315. qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
  1316. qib_write_kreg_port(ppd, krp_senddmalengen,
  1317. ppd->sdma_descq_cnt |
  1318. (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
  1319. }
  1320. /*
  1321. * Must be called with sdma_lock held, or before init finished.
  1322. */
  1323. static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
  1324. {
  1325. /* Commit writes to memory and advance the tail on the chip */
  1326. wmb();
  1327. ppd->sdma_descq_tail = tail;
  1328. qib_write_kreg_port(ppd, krp_senddmatail, tail);
  1329. }
  1330. /*
  1331. * This is called with interrupts disabled and sdma_lock held.
  1332. */
  1333. static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
  1334. {
  1335. /*
  1336. * Drain all FIFOs.
  1337. * The hardware doesn't require this but we do it so that verbs
  1338. * and user applications don't wait for link active to send stale
  1339. * data.
  1340. */
  1341. sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
  1342. qib_sdma_7322_setlengen(ppd);
  1343. qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
  1344. ppd->sdma_head_dma[0] = 0;
  1345. qib_7322_sdma_sendctrl(ppd,
  1346. ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
  1347. }
  1348. #define DISABLES_SDMA ( \
  1349. QIB_E_P_SDMAHALT | \
  1350. QIB_E_P_SDMADESCADDRMISALIGN | \
  1351. QIB_E_P_SDMAMISSINGDW | \
  1352. QIB_E_P_SDMADWEN | \
  1353. QIB_E_P_SDMARPYTAG | \
  1354. QIB_E_P_SDMA1STDESC | \
  1355. QIB_E_P_SDMABASE | \
  1356. QIB_E_P_SDMATAILOUTOFBOUND | \
  1357. QIB_E_P_SDMAOUTOFBOUND | \
  1358. QIB_E_P_SDMAGENMISMATCH)
  1359. static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
  1360. {
  1361. unsigned long flags;
  1362. struct qib_devdata *dd = ppd->dd;
  1363. errs &= QIB_E_P_SDMAERRS;
  1364. if (errs & QIB_E_P_SDMAUNEXPDATA)
  1365. qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
  1366. ppd->port);
  1367. spin_lock_irqsave(&ppd->sdma_lock, flags);
  1368. switch (ppd->sdma_state.current_state) {
  1369. case qib_sdma_state_s00_hw_down:
  1370. break;
  1371. case qib_sdma_state_s10_hw_start_up_wait:
  1372. if (errs & QIB_E_P_SDMAHALT)
  1373. __qib_sdma_process_event(ppd,
  1374. qib_sdma_event_e20_hw_started);
  1375. break;
  1376. case qib_sdma_state_s20_idle:
  1377. break;
  1378. case qib_sdma_state_s30_sw_clean_up_wait:
  1379. break;
  1380. case qib_sdma_state_s40_hw_clean_up_wait:
  1381. if (errs & QIB_E_P_SDMAHALT)
  1382. __qib_sdma_process_event(ppd,
  1383. qib_sdma_event_e50_hw_cleaned);
  1384. break;
  1385. case qib_sdma_state_s50_hw_halt_wait:
  1386. if (errs & QIB_E_P_SDMAHALT)
  1387. __qib_sdma_process_event(ppd,
  1388. qib_sdma_event_e60_hw_halted);
  1389. break;
  1390. case qib_sdma_state_s99_running:
  1391. __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
  1392. __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
  1393. break;
  1394. }
  1395. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  1396. }
  1397. /*
  1398. * handle per-device errors (not per-port errors)
  1399. */
  1400. static noinline void handle_7322_errors(struct qib_devdata *dd)
  1401. {
  1402. char *msg;
  1403. u64 iserr = 0;
  1404. u64 errs;
  1405. u64 mask;
  1406. int log_idx;
  1407. qib_stats.sps_errints++;
  1408. errs = qib_read_kreg64(dd, kr_errstatus);
  1409. if (!errs) {
  1410. qib_devinfo(dd->pcidev, "device error interrupt, "
  1411. "but no error bits set!\n");
  1412. goto done;
  1413. }
  1414. /* don't report errors that are masked */
  1415. errs &= dd->cspec->errormask;
  1416. msg = dd->cspec->emsgbuf;
  1417. /* do these first, they are most important */
  1418. if (errs & QIB_E_HARDWARE) {
  1419. *msg = '\0';
  1420. qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
  1421. } else
  1422. for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
  1423. if (errs & dd->eep_st_masks[log_idx].errs_to_log)
  1424. qib_inc_eeprom_err(dd, log_idx, 1);
  1425. if (errs & QIB_E_SPKTERRS) {
  1426. qib_disarm_7322_senderrbufs(dd->pport);
  1427. qib_stats.sps_txerrs++;
  1428. } else if (errs & QIB_E_INVALIDADDR)
  1429. qib_stats.sps_txerrs++;
  1430. else if (errs & QIB_E_ARMLAUNCH) {
  1431. qib_stats.sps_txerrs++;
  1432. qib_disarm_7322_senderrbufs(dd->pport);
  1433. }
  1434. qib_write_kreg(dd, kr_errclear, errs);
  1435. /*
  1436. * The ones we mask off are handled specially below
  1437. * or above. Also mask SDMADISABLED by default as it
  1438. * is too chatty.
  1439. */
  1440. mask = QIB_E_HARDWARE;
  1441. *msg = '\0';
  1442. err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
  1443. qib_7322error_msgs);
  1444. /*
  1445. * Getting reset is a tragedy for all ports. Mark the device
  1446. * _and_ the ports as "offline" in way meaningful to each.
  1447. */
  1448. if (errs & QIB_E_RESET) {
  1449. int pidx;
  1450. qib_dev_err(dd, "Got reset, requires re-init "
  1451. "(unload and reload driver)\n");
  1452. dd->flags &= ~QIB_INITTED; /* needs re-init */
  1453. /* mark as having had error */
  1454. *dd->devstatusp |= QIB_STATUS_HWERROR;
  1455. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  1456. if (dd->pport[pidx].link_speed_supported)
  1457. *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
  1458. }
  1459. if (*msg && iserr)
  1460. qib_dev_err(dd, "%s error\n", msg);
  1461. /*
  1462. * If there were hdrq or egrfull errors, wake up any processes
  1463. * waiting in poll. We used to try to check which contexts had
  1464. * the overflow, but given the cost of that and the chip reads
  1465. * to support it, it's better to just wake everybody up if we
  1466. * get an overflow; waiters can poll again if it's not them.
  1467. */
  1468. if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
  1469. qib_handle_urcv(dd, ~0U);
  1470. if (errs & ERR_MASK(RcvEgrFullErr))
  1471. qib_stats.sps_buffull++;
  1472. else
  1473. qib_stats.sps_hdrfull++;
  1474. }
  1475. done:
  1476. return;
  1477. }
  1478. static void qib_error_tasklet(unsigned long data)
  1479. {
  1480. struct qib_devdata *dd = (struct qib_devdata *)data;
  1481. handle_7322_errors(dd);
  1482. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  1483. }
  1484. static void reenable_chase(unsigned long opaque)
  1485. {
  1486. struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
  1487. ppd->cpspec->chase_timer.expires = 0;
  1488. qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
  1489. QLOGIC_IB_IBCC_LINKINITCMD_POLL);
  1490. }
  1491. static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
  1492. u8 ibclt)
  1493. {
  1494. ppd->cpspec->chase_end = 0;
  1495. if (!qib_chase)
  1496. return;
  1497. qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
  1498. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  1499. ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
  1500. add_timer(&ppd->cpspec->chase_timer);
  1501. }
  1502. static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
  1503. {
  1504. u8 ibclt;
  1505. unsigned long tnow;
  1506. ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
  1507. /*
  1508. * Detect and handle the state chase issue, where we can
  1509. * get stuck if we are unlucky on timing on both sides of
  1510. * the link. If we are, we disable, set a timer, and
  1511. * then re-enable.
  1512. */
  1513. switch (ibclt) {
  1514. case IB_7322_LT_STATE_CFGRCVFCFG:
  1515. case IB_7322_LT_STATE_CFGWAITRMT:
  1516. case IB_7322_LT_STATE_TXREVLANES:
  1517. case IB_7322_LT_STATE_CFGENH:
  1518. tnow = jiffies;
  1519. if (ppd->cpspec->chase_end &&
  1520. time_after(tnow, ppd->cpspec->chase_end))
  1521. disable_chase(ppd, tnow, ibclt);
  1522. else if (!ppd->cpspec->chase_end)
  1523. ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
  1524. break;
  1525. default:
  1526. ppd->cpspec->chase_end = 0;
  1527. break;
  1528. }
  1529. if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
  1530. ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
  1531. ibclt == IB_7322_LT_STATE_LINKUP) &&
  1532. (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
  1533. force_h1(ppd);
  1534. ppd->cpspec->qdr_reforce = 1;
  1535. if (!ppd->dd->cspec->r1)
  1536. serdes_7322_los_enable(ppd, 0);
  1537. } else if (ppd->cpspec->qdr_reforce &&
  1538. (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
  1539. (ibclt == IB_7322_LT_STATE_CFGENH ||
  1540. ibclt == IB_7322_LT_STATE_CFGIDLE ||
  1541. ibclt == IB_7322_LT_STATE_LINKUP))
  1542. force_h1(ppd);
  1543. if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
  1544. ppd->link_speed_enabled == QIB_IB_QDR &&
  1545. (ibclt == IB_7322_LT_STATE_CFGTEST ||
  1546. ibclt == IB_7322_LT_STATE_CFGENH ||
  1547. (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
  1548. ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
  1549. adj_tx_serdes(ppd);
  1550. if (ibclt != IB_7322_LT_STATE_LINKUP) {
  1551. u8 ltstate = qib_7322_phys_portstate(ibcst);
  1552. u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
  1553. LinkTrainingState);
  1554. if (!ppd->dd->cspec->r1 &&
  1555. pibclt == IB_7322_LT_STATE_LINKUP &&
  1556. ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
  1557. ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
  1558. ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
  1559. ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
  1560. /* If the link went down (but no into recovery,
  1561. * turn LOS back on */
  1562. serdes_7322_los_enable(ppd, 1);
  1563. if (!ppd->cpspec->qdr_dfe_on &&
  1564. ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
  1565. ppd->cpspec->qdr_dfe_on = 1;
  1566. ppd->cpspec->qdr_dfe_time = 0;
  1567. /* On link down, reenable QDR adaptation */
  1568. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  1569. ppd->dd->cspec->r1 ?
  1570. QDR_STATIC_ADAPT_DOWN_R1 :
  1571. QDR_STATIC_ADAPT_DOWN);
  1572. printk(KERN_INFO QIB_DRV_NAME
  1573. " IB%u:%u re-enabled QDR adaptation "
  1574. "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
  1575. }
  1576. }
  1577. }
  1578. static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
  1579. /*
  1580. * This is per-pport error handling.
  1581. * will likely get it's own MSIx interrupt (one for each port,
  1582. * although just a single handler).
  1583. */
  1584. static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
  1585. {
  1586. char *msg;
  1587. u64 ignore_this_time = 0, iserr = 0, errs, fmask;
  1588. struct qib_devdata *dd = ppd->dd;
  1589. /* do this as soon as possible */
  1590. fmask = qib_read_kreg64(dd, kr_act_fmask);
  1591. if (!fmask)
  1592. check_7322_rxe_status(ppd);
  1593. errs = qib_read_kreg_port(ppd, krp_errstatus);
  1594. if (!errs)
  1595. qib_devinfo(dd->pcidev,
  1596. "Port%d error interrupt, but no error bits set!\n",
  1597. ppd->port);
  1598. if (!fmask)
  1599. errs &= ~QIB_E_P_IBSTATUSCHANGED;
  1600. if (!errs)
  1601. goto done;
  1602. msg = ppd->cpspec->epmsgbuf;
  1603. *msg = '\0';
  1604. if (errs & ~QIB_E_P_BITSEXTANT) {
  1605. err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
  1606. errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
  1607. if (!*msg)
  1608. snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
  1609. "no others");
  1610. qib_dev_porterr(dd, ppd->port, "error interrupt with unknown"
  1611. " errors 0x%016Lx set (and %s)\n",
  1612. (errs & ~QIB_E_P_BITSEXTANT), msg);
  1613. *msg = '\0';
  1614. }
  1615. if (errs & QIB_E_P_SHDR) {
  1616. u64 symptom;
  1617. /* determine cause, then write to clear */
  1618. symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
  1619. qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
  1620. err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
  1621. hdrchk_msgs);
  1622. *msg = '\0';
  1623. /* senderrbuf cleared in SPKTERRS below */
  1624. }
  1625. if (errs & QIB_E_P_SPKTERRS) {
  1626. if ((errs & QIB_E_P_LINK_PKTERRS) &&
  1627. !(ppd->lflags & QIBL_LINKACTIVE)) {
  1628. /*
  1629. * This can happen when trying to bring the link
  1630. * up, but the IB link changes state at the "wrong"
  1631. * time. The IB logic then complains that the packet
  1632. * isn't valid. We don't want to confuse people, so
  1633. * we just don't print them, except at debug
  1634. */
  1635. err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
  1636. (errs & QIB_E_P_LINK_PKTERRS),
  1637. qib_7322p_error_msgs);
  1638. *msg = '\0';
  1639. ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
  1640. }
  1641. qib_disarm_7322_senderrbufs(ppd);
  1642. } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
  1643. !(ppd->lflags & QIBL_LINKACTIVE)) {
  1644. /*
  1645. * This can happen when SMA is trying to bring the link
  1646. * up, but the IB link changes state at the "wrong" time.
  1647. * The IB logic then complains that the packet isn't
  1648. * valid. We don't want to confuse people, so we just
  1649. * don't print them, except at debug
  1650. */
  1651. err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
  1652. qib_7322p_error_msgs);
  1653. ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
  1654. *msg = '\0';
  1655. }
  1656. qib_write_kreg_port(ppd, krp_errclear, errs);
  1657. errs &= ~ignore_this_time;
  1658. if (!errs)
  1659. goto done;
  1660. if (errs & QIB_E_P_RPKTERRS)
  1661. qib_stats.sps_rcverrs++;
  1662. if (errs & QIB_E_P_SPKTERRS)
  1663. qib_stats.sps_txerrs++;
  1664. iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
  1665. if (errs & QIB_E_P_SDMAERRS)
  1666. sdma_7322_p_errors(ppd, errs);
  1667. if (errs & QIB_E_P_IBSTATUSCHANGED) {
  1668. u64 ibcs;
  1669. u8 ltstate;
  1670. ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
  1671. ltstate = qib_7322_phys_portstate(ibcs);
  1672. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
  1673. handle_serdes_issues(ppd, ibcs);
  1674. if (!(ppd->cpspec->ibcctrl_a &
  1675. SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
  1676. /*
  1677. * We got our interrupt, so init code should be
  1678. * happy and not try alternatives. Now squelch
  1679. * other "chatter" from link-negotiation (pre Init)
  1680. */
  1681. ppd->cpspec->ibcctrl_a |=
  1682. SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  1683. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  1684. ppd->cpspec->ibcctrl_a);
  1685. }
  1686. /* Update our picture of width and speed from chip */
  1687. ppd->link_width_active =
  1688. (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
  1689. IB_WIDTH_4X : IB_WIDTH_1X;
  1690. ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
  1691. LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
  1692. SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
  1693. QIB_IB_DDR : QIB_IB_SDR;
  1694. if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
  1695. IB_PHYSPORTSTATE_DISABLED)
  1696. qib_set_ib_7322_lstate(ppd, 0,
  1697. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  1698. else
  1699. /*
  1700. * Since going into a recovery state causes the link
  1701. * state to go down and since recovery is transitory,
  1702. * it is better if we "miss" ever seeing the link
  1703. * training state go into recovery (i.e., ignore this
  1704. * transition for link state special handling purposes)
  1705. * without updating lastibcstat.
  1706. */
  1707. if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
  1708. ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
  1709. ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
  1710. ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
  1711. qib_handle_e_ibstatuschanged(ppd, ibcs);
  1712. }
  1713. if (*msg && iserr)
  1714. qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
  1715. if (ppd->state_wanted & ppd->lflags)
  1716. wake_up_interruptible(&ppd->state_wait);
  1717. done:
  1718. return;
  1719. }
  1720. /* enable/disable chip from delivering interrupts */
  1721. static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
  1722. {
  1723. if (enable) {
  1724. if (dd->flags & QIB_BADINTR)
  1725. return;
  1726. qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
  1727. /* cause any pending enabled interrupts to be re-delivered */
  1728. qib_write_kreg(dd, kr_intclear, 0ULL);
  1729. if (dd->cspec->num_msix_entries) {
  1730. /* and same for MSIx */
  1731. u64 val = qib_read_kreg64(dd, kr_intgranted);
  1732. if (val)
  1733. qib_write_kreg(dd, kr_intgranted, val);
  1734. }
  1735. } else
  1736. qib_write_kreg(dd, kr_intmask, 0ULL);
  1737. }
  1738. /*
  1739. * Try to cleanup as much as possible for anything that might have gone
  1740. * wrong while in freeze mode, such as pio buffers being written by user
  1741. * processes (causing armlaunch), send errors due to going into freeze mode,
  1742. * etc., and try to avoid causing extra interrupts while doing so.
  1743. * Forcibly update the in-memory pioavail register copies after cleanup
  1744. * because the chip won't do it while in freeze mode (the register values
  1745. * themselves are kept correct).
  1746. * Make sure that we don't lose any important interrupts by using the chip
  1747. * feature that says that writing 0 to a bit in *clear that is set in
  1748. * *status will cause an interrupt to be generated again (if allowed by
  1749. * the *mask value).
  1750. * This is in chip-specific code because of all of the register accesses,
  1751. * even though the details are similar on most chips.
  1752. */
  1753. static void qib_7322_clear_freeze(struct qib_devdata *dd)
  1754. {
  1755. int pidx;
  1756. /* disable error interrupts, to avoid confusion */
  1757. qib_write_kreg(dd, kr_errmask, 0ULL);
  1758. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  1759. if (dd->pport[pidx].link_speed_supported)
  1760. qib_write_kreg_port(dd->pport + pidx, krp_errmask,
  1761. 0ULL);
  1762. /* also disable interrupts; errormask is sometimes overwriten */
  1763. qib_7322_set_intr_state(dd, 0);
  1764. /* clear the freeze, and be sure chip saw it */
  1765. qib_write_kreg(dd, kr_control, dd->control);
  1766. qib_read_kreg32(dd, kr_scratch);
  1767. /*
  1768. * Force new interrupt if any hwerr, error or interrupt bits are
  1769. * still set, and clear "safe" send packet errors related to freeze
  1770. * and cancelling sends. Re-enable error interrupts before possible
  1771. * force of re-interrupt on pending interrupts.
  1772. */
  1773. qib_write_kreg(dd, kr_hwerrclear, 0ULL);
  1774. qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
  1775. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  1776. /* We need to purge per-port errs and reset mask, too */
  1777. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  1778. if (!dd->pport[pidx].link_speed_supported)
  1779. continue;
  1780. qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
  1781. qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
  1782. }
  1783. qib_7322_set_intr_state(dd, 1);
  1784. }
  1785. /* no error handling to speak of */
  1786. /**
  1787. * qib_7322_handle_hwerrors - display hardware errors.
  1788. * @dd: the qlogic_ib device
  1789. * @msg: the output buffer
  1790. * @msgl: the size of the output buffer
  1791. *
  1792. * Use same msg buffer as regular errors to avoid excessive stack
  1793. * use. Most hardware errors are catastrophic, but for right now,
  1794. * we'll print them and continue. We reuse the same message buffer as
  1795. * qib_handle_errors() to avoid excessive stack usage.
  1796. */
  1797. static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
  1798. size_t msgl)
  1799. {
  1800. u64 hwerrs;
  1801. u32 ctrl;
  1802. int isfatal = 0;
  1803. hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
  1804. if (!hwerrs)
  1805. goto bail;
  1806. if (hwerrs == ~0ULL) {
  1807. qib_dev_err(dd, "Read of hardware error status failed "
  1808. "(all bits set); ignoring\n");
  1809. goto bail;
  1810. }
  1811. qib_stats.sps_hwerrs++;
  1812. /* Always clear the error status register, except BIST fail */
  1813. qib_write_kreg(dd, kr_hwerrclear, hwerrs &
  1814. ~HWE_MASK(PowerOnBISTFailed));
  1815. hwerrs &= dd->cspec->hwerrmask;
  1816. /* no EEPROM logging, yet */
  1817. if (hwerrs)
  1818. qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
  1819. "(cleared)\n", (unsigned long long) hwerrs);
  1820. ctrl = qib_read_kreg32(dd, kr_control);
  1821. if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
  1822. /*
  1823. * No recovery yet...
  1824. */
  1825. if ((hwerrs & ~HWE_MASK(LATriggered)) ||
  1826. dd->cspec->stay_in_freeze) {
  1827. /*
  1828. * If any set that we aren't ignoring only make the
  1829. * complaint once, in case it's stuck or recurring,
  1830. * and we get here multiple times
  1831. * Force link down, so switch knows, and
  1832. * LEDs are turned off.
  1833. */
  1834. if (dd->flags & QIB_INITTED)
  1835. isfatal = 1;
  1836. } else
  1837. qib_7322_clear_freeze(dd);
  1838. }
  1839. if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
  1840. isfatal = 1;
  1841. strlcpy(msg, "[Memory BIST test failed, "
  1842. "InfiniPath hardware unusable]", msgl);
  1843. /* ignore from now on, so disable until driver reloaded */
  1844. dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
  1845. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  1846. }
  1847. err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
  1848. /* Ignore esoteric PLL failures et al. */
  1849. qib_dev_err(dd, "%s hardware error\n", msg);
  1850. if (isfatal && !dd->diag_client) {
  1851. qib_dev_err(dd, "Fatal Hardware Error, no longer"
  1852. " usable, SN %.16s\n", dd->serial);
  1853. /*
  1854. * for /sys status file and user programs to print; if no
  1855. * trailing brace is copied, we'll know it was truncated.
  1856. */
  1857. if (dd->freezemsg)
  1858. snprintf(dd->freezemsg, dd->freezelen,
  1859. "{%s}", msg);
  1860. qib_disable_after_error(dd);
  1861. }
  1862. bail:;
  1863. }
  1864. /**
  1865. * qib_7322_init_hwerrors - enable hardware errors
  1866. * @dd: the qlogic_ib device
  1867. *
  1868. * now that we have finished initializing everything that might reasonably
  1869. * cause a hardware error, and cleared those errors bits as they occur,
  1870. * we can enable hardware errors in the mask (potentially enabling
  1871. * freeze mode), and enable hardware errors as errors (along with
  1872. * everything else) in errormask
  1873. */
  1874. static void qib_7322_init_hwerrors(struct qib_devdata *dd)
  1875. {
  1876. int pidx;
  1877. u64 extsval;
  1878. extsval = qib_read_kreg64(dd, kr_extstatus);
  1879. if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
  1880. QIB_EXTS_MEMBIST_ENDTEST)))
  1881. qib_dev_err(dd, "MemBIST did not complete!\n");
  1882. /* never clear BIST failure, so reported on each driver load */
  1883. qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
  1884. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  1885. /* clear all */
  1886. qib_write_kreg(dd, kr_errclear, ~0ULL);
  1887. /* enable errors that are masked, at least this first time. */
  1888. qib_write_kreg(dd, kr_errmask, ~0ULL);
  1889. dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
  1890. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  1891. if (dd->pport[pidx].link_speed_supported)
  1892. qib_write_kreg_port(dd->pport + pidx, krp_errmask,
  1893. ~0ULL);
  1894. }
  1895. /*
  1896. * Disable and enable the armlaunch error. Used for PIO bandwidth testing
  1897. * on chips that are count-based, rather than trigger-based. There is no
  1898. * reference counting, but that's also fine, given the intended use.
  1899. * Only chip-specific because it's all register accesses
  1900. */
  1901. static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
  1902. {
  1903. if (enable) {
  1904. qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
  1905. dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
  1906. } else
  1907. dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
  1908. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  1909. }
  1910. /*
  1911. * Formerly took parameter <which> in pre-shifted,
  1912. * pre-merged form with LinkCmd and LinkInitCmd
  1913. * together, and assuming the zero was NOP.
  1914. */
  1915. static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  1916. u16 linitcmd)
  1917. {
  1918. u64 mod_wd;
  1919. struct qib_devdata *dd = ppd->dd;
  1920. unsigned long flags;
  1921. if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
  1922. /*
  1923. * If we are told to disable, note that so link-recovery
  1924. * code does not attempt to bring us back up.
  1925. * Also reset everything that we can, so we start
  1926. * completely clean when re-enabled (before we
  1927. * actually issue the disable to the IBC)
  1928. */
  1929. qib_7322_mini_pcs_reset(ppd);
  1930. spin_lock_irqsave(&ppd->lflags_lock, flags);
  1931. ppd->lflags |= QIBL_IB_LINK_DISABLED;
  1932. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  1933. } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
  1934. /*
  1935. * Any other linkinitcmd will lead to LINKDOWN and then
  1936. * to INIT (if all is well), so clear flag to let
  1937. * link-recovery code attempt to bring us back up.
  1938. */
  1939. spin_lock_irqsave(&ppd->lflags_lock, flags);
  1940. ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
  1941. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  1942. /*
  1943. * Clear status change interrupt reduction so the
  1944. * new state is seen.
  1945. */
  1946. ppd->cpspec->ibcctrl_a &=
  1947. ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  1948. }
  1949. mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
  1950. (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
  1951. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
  1952. mod_wd);
  1953. /* write to chip to prevent back-to-back writes of ibc reg */
  1954. qib_write_kreg(dd, kr_scratch, 0);
  1955. }
  1956. /*
  1957. * The total RCV buffer memory is 64KB, used for both ports, and is
  1958. * in units of 64 bytes (same as IB flow control credit unit).
  1959. * The consumedVL unit in the same registers are in 32 byte units!
  1960. * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
  1961. * and we can therefore allocate just 9 IB credits for 2 VL15 packets
  1962. * in krp_rxcreditvl15, rather than 10.
  1963. */
  1964. #define RCV_BUF_UNITSZ 64
  1965. #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
  1966. static void set_vls(struct qib_pportdata *ppd)
  1967. {
  1968. int i, numvls, totcred, cred_vl, vl0extra;
  1969. struct qib_devdata *dd = ppd->dd;
  1970. u64 val;
  1971. numvls = qib_num_vls(ppd->vls_operational);
  1972. /*
  1973. * Set up per-VL credits. Below is kluge based on these assumptions:
  1974. * 1) port is disabled at the time early_init is called.
  1975. * 2) give VL15 17 credits, for two max-plausible packets.
  1976. * 3) Give VL0-N the rest, with any rounding excess used for VL0
  1977. */
  1978. /* 2 VL15 packets @ 288 bytes each (including IB headers) */
  1979. totcred = NUM_RCV_BUF_UNITS(dd);
  1980. cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
  1981. totcred -= cred_vl;
  1982. qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
  1983. cred_vl = totcred / numvls;
  1984. vl0extra = totcred - cred_vl * numvls;
  1985. qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
  1986. for (i = 1; i < numvls; i++)
  1987. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
  1988. for (; i < 8; i++) /* no buffer space for other VLs */
  1989. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
  1990. /* Notify IBC that credits need to be recalculated */
  1991. val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
  1992. val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
  1993. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  1994. qib_write_kreg(dd, kr_scratch, 0ULL);
  1995. val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
  1996. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  1997. for (i = 0; i < numvls; i++)
  1998. val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
  1999. val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
  2000. /* Change the number of operational VLs */
  2001. ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
  2002. ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
  2003. ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
  2004. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  2005. qib_write_kreg(dd, kr_scratch, 0ULL);
  2006. }
  2007. /*
  2008. * The code that deals with actual SerDes is in serdes_7322_init().
  2009. * Compared to the code for iba7220, it is minimal.
  2010. */
  2011. static int serdes_7322_init(struct qib_pportdata *ppd);
  2012. /**
  2013. * qib_7322_bringup_serdes - bring up the serdes
  2014. * @ppd: physical port on the qlogic_ib device
  2015. */
  2016. static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
  2017. {
  2018. struct qib_devdata *dd = ppd->dd;
  2019. u64 val, guid, ibc;
  2020. unsigned long flags;
  2021. int ret = 0;
  2022. /*
  2023. * SerDes model not in Pd, but still need to
  2024. * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
  2025. * eventually.
  2026. */
  2027. /* Put IBC in reset, sends disabled (should be in reset already) */
  2028. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2029. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  2030. qib_write_kreg(dd, kr_scratch, 0ULL);
  2031. if (qib_compat_ddr_negotiate) {
  2032. ppd->cpspec->ibdeltainprog = 1;
  2033. ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
  2034. crp_ibsymbolerr);
  2035. ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
  2036. crp_iblinkerrrecov);
  2037. }
  2038. /* flowcontrolwatermark is in units of KBytes */
  2039. ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
  2040. /*
  2041. * Flow control is sent this often, even if no changes in
  2042. * buffer space occur. Units are 128ns for this chip.
  2043. * Set to 3usec.
  2044. */
  2045. ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
  2046. /* max error tolerance */
  2047. ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
  2048. /* IB credit flow control. */
  2049. ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
  2050. /*
  2051. * set initial max size pkt IBC will send, including ICRC; it's the
  2052. * PIO buffer size in dwords, less 1; also see qib_set_mtu()
  2053. */
  2054. ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
  2055. SYM_LSB(IBCCtrlA_0, MaxPktLen);
  2056. ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
  2057. /*
  2058. * Reset the PCS interface to the serdes (and also ibc, which is still
  2059. * in reset from above). Writes new value of ibcctrl_a as last step.
  2060. */
  2061. qib_7322_mini_pcs_reset(ppd);
  2062. if (!ppd->cpspec->ibcctrl_b) {
  2063. unsigned lse = ppd->link_speed_enabled;
  2064. /*
  2065. * Not on re-init after reset, establish shadow
  2066. * and force initial config.
  2067. */
  2068. ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
  2069. krp_ibcctrl_b);
  2070. ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
  2071. IBA7322_IBC_SPEED_DDR |
  2072. IBA7322_IBC_SPEED_SDR |
  2073. IBA7322_IBC_WIDTH_AUTONEG |
  2074. SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
  2075. if (lse & (lse - 1)) /* Muliple speeds enabled */
  2076. ppd->cpspec->ibcctrl_b |=
  2077. (lse << IBA7322_IBC_SPEED_LSB) |
  2078. IBA7322_IBC_IBTA_1_2_MASK |
  2079. IBA7322_IBC_MAX_SPEED_MASK;
  2080. else
  2081. ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
  2082. IBA7322_IBC_SPEED_QDR |
  2083. IBA7322_IBC_IBTA_1_2_MASK :
  2084. (lse == QIB_IB_DDR) ?
  2085. IBA7322_IBC_SPEED_DDR :
  2086. IBA7322_IBC_SPEED_SDR;
  2087. if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
  2088. (IB_WIDTH_1X | IB_WIDTH_4X))
  2089. ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
  2090. else
  2091. ppd->cpspec->ibcctrl_b |=
  2092. ppd->link_width_enabled == IB_WIDTH_4X ?
  2093. IBA7322_IBC_WIDTH_4X_ONLY :
  2094. IBA7322_IBC_WIDTH_1X_ONLY;
  2095. /* always enable these on driver reload, not sticky */
  2096. ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
  2097. IBA7322_IBC_HRTBT_MASK);
  2098. }
  2099. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  2100. /* setup so we have more time at CFGTEST to change H1 */
  2101. val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
  2102. val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
  2103. val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
  2104. qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
  2105. serdes_7322_init(ppd);
  2106. guid = be64_to_cpu(ppd->guid);
  2107. if (!guid) {
  2108. if (dd->base_guid)
  2109. guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
  2110. ppd->guid = cpu_to_be64(guid);
  2111. }
  2112. qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
  2113. /* write to chip to prevent back-to-back writes of ibc reg */
  2114. qib_write_kreg(dd, kr_scratch, 0);
  2115. /* Enable port */
  2116. ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2117. set_vls(ppd);
  2118. /* initially come up DISABLED, without sending anything. */
  2119. val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
  2120. QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
  2121. qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
  2122. qib_write_kreg(dd, kr_scratch, 0ULL);
  2123. /* clear the linkinit cmds */
  2124. ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
  2125. /* be paranoid against later code motion, etc. */
  2126. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  2127. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
  2128. qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
  2129. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  2130. /* Also enable IBSTATUSCHG interrupt. */
  2131. val = qib_read_kreg_port(ppd, krp_errmask);
  2132. qib_write_kreg_port(ppd, krp_errmask,
  2133. val | ERR_MASK_N(IBStatusChanged));
  2134. /* Always zero until we start messing with SerDes for real */
  2135. return ret;
  2136. }
  2137. /**
  2138. * qib_7322_quiet_serdes - set serdes to txidle
  2139. * @dd: the qlogic_ib device
  2140. * Called when driver is being unloaded
  2141. */
  2142. static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
  2143. {
  2144. u64 val;
  2145. unsigned long flags;
  2146. qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  2147. spin_lock_irqsave(&ppd->lflags_lock, flags);
  2148. ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
  2149. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  2150. wake_up(&ppd->cpspec->autoneg_wait);
  2151. cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
  2152. if (ppd->dd->cspec->r1)
  2153. cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
  2154. ppd->cpspec->chase_end = 0;
  2155. if (ppd->cpspec->chase_timer.data) /* if initted */
  2156. del_timer_sync(&ppd->cpspec->chase_timer);
  2157. /*
  2158. * Despite the name, actually disables IBC as well. Do it when
  2159. * we are as sure as possible that no more packets can be
  2160. * received, following the down and the PCS reset.
  2161. * The actual disabling happens in qib_7322_mini_pci_reset(),
  2162. * along with the PCS being reset.
  2163. */
  2164. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2165. qib_7322_mini_pcs_reset(ppd);
  2166. /*
  2167. * Update the adjusted counters so the adjustment persists
  2168. * across driver reload.
  2169. */
  2170. if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
  2171. ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
  2172. struct qib_devdata *dd = ppd->dd;
  2173. u64 diagc;
  2174. /* enable counter writes */
  2175. diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
  2176. qib_write_kreg(dd, kr_hwdiagctrl,
  2177. diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
  2178. if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
  2179. val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
  2180. if (ppd->cpspec->ibdeltainprog)
  2181. val -= val - ppd->cpspec->ibsymsnap;
  2182. val -= ppd->cpspec->ibsymdelta;
  2183. write_7322_creg_port(ppd, crp_ibsymbolerr, val);
  2184. }
  2185. if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
  2186. val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
  2187. if (ppd->cpspec->ibdeltainprog)
  2188. val -= val - ppd->cpspec->iblnkerrsnap;
  2189. val -= ppd->cpspec->iblnkerrdelta;
  2190. write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
  2191. }
  2192. if (ppd->cpspec->iblnkdowndelta) {
  2193. val = read_7322_creg32_port(ppd, crp_iblinkdown);
  2194. val += ppd->cpspec->iblnkdowndelta;
  2195. write_7322_creg_port(ppd, crp_iblinkdown, val);
  2196. }
  2197. /*
  2198. * No need to save ibmalfdelta since IB perfcounters
  2199. * are cleared on driver reload.
  2200. */
  2201. /* and disable counter writes */
  2202. qib_write_kreg(dd, kr_hwdiagctrl, diagc);
  2203. }
  2204. }
  2205. /**
  2206. * qib_setup_7322_setextled - set the state of the two external LEDs
  2207. * @ppd: physical port on the qlogic_ib device
  2208. * @on: whether the link is up or not
  2209. *
  2210. * The exact combo of LEDs if on is true is determined by looking
  2211. * at the ibcstatus.
  2212. *
  2213. * These LEDs indicate the physical and logical state of IB link.
  2214. * For this chip (at least with recommended board pinouts), LED1
  2215. * is Yellow (logical state) and LED2 is Green (physical state),
  2216. *
  2217. * Note: We try to match the Mellanox HCA LED behavior as best
  2218. * we can. Green indicates physical link state is OK (something is
  2219. * plugged in, and we can train).
  2220. * Amber indicates the link is logically up (ACTIVE).
  2221. * Mellanox further blinks the amber LED to indicate data packet
  2222. * activity, but we have no hardware support for that, so it would
  2223. * require waking up every 10-20 msecs and checking the counters
  2224. * on the chip, and then turning the LED off if appropriate. That's
  2225. * visible overhead, so not something we will do.
  2226. */
  2227. static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
  2228. {
  2229. struct qib_devdata *dd = ppd->dd;
  2230. u64 extctl, ledblink = 0, val;
  2231. unsigned long flags;
  2232. int yel, grn;
  2233. /*
  2234. * The diags use the LED to indicate diag info, so we leave
  2235. * the external LED alone when the diags are running.
  2236. */
  2237. if (dd->diag_client)
  2238. return;
  2239. /* Allow override of LED display for, e.g. Locating system in rack */
  2240. if (ppd->led_override) {
  2241. grn = (ppd->led_override & QIB_LED_PHYS);
  2242. yel = (ppd->led_override & QIB_LED_LOG);
  2243. } else if (on) {
  2244. val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
  2245. grn = qib_7322_phys_portstate(val) ==
  2246. IB_PHYSPORTSTATE_LINKUP;
  2247. yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
  2248. } else {
  2249. grn = 0;
  2250. yel = 0;
  2251. }
  2252. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  2253. extctl = dd->cspec->extctrl & (ppd->port == 1 ?
  2254. ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
  2255. if (grn) {
  2256. extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
  2257. /*
  2258. * Counts are in chip clock (4ns) periods.
  2259. * This is 1/16 sec (66.6ms) on,
  2260. * 3/16 sec (187.5 ms) off, with packets rcvd.
  2261. */
  2262. ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
  2263. ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
  2264. }
  2265. if (yel)
  2266. extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
  2267. dd->cspec->extctrl = extctl;
  2268. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  2269. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  2270. if (ledblink) /* blink the LED on packet receive */
  2271. qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
  2272. }
  2273. /*
  2274. * Disable MSIx interrupt if enabled, call generic MSIx code
  2275. * to cleanup, and clear pending MSIx interrupts.
  2276. * Used for fallback to INTx, after reset, and when MSIx setup fails.
  2277. */
  2278. static void qib_7322_nomsix(struct qib_devdata *dd)
  2279. {
  2280. u64 intgranted;
  2281. int n;
  2282. dd->cspec->main_int_mask = ~0ULL;
  2283. n = dd->cspec->num_msix_entries;
  2284. if (n) {
  2285. int i;
  2286. dd->cspec->num_msix_entries = 0;
  2287. for (i = 0; i < n; i++)
  2288. free_irq(dd->cspec->msix_entries[i].vector,
  2289. dd->cspec->msix_arg[i]);
  2290. qib_nomsix(dd);
  2291. }
  2292. /* make sure no MSIx interrupts are left pending */
  2293. intgranted = qib_read_kreg64(dd, kr_intgranted);
  2294. if (intgranted)
  2295. qib_write_kreg(dd, kr_intgranted, intgranted);
  2296. }
  2297. static void qib_7322_free_irq(struct qib_devdata *dd)
  2298. {
  2299. if (dd->cspec->irq) {
  2300. free_irq(dd->cspec->irq, dd);
  2301. dd->cspec->irq = 0;
  2302. }
  2303. qib_7322_nomsix(dd);
  2304. }
  2305. static void qib_setup_7322_cleanup(struct qib_devdata *dd)
  2306. {
  2307. int i;
  2308. qib_7322_free_irq(dd);
  2309. kfree(dd->cspec->cntrs);
  2310. kfree(dd->cspec->sendchkenable);
  2311. kfree(dd->cspec->sendgrhchk);
  2312. kfree(dd->cspec->sendibchk);
  2313. kfree(dd->cspec->msix_entries);
  2314. kfree(dd->cspec->msix_arg);
  2315. for (i = 0; i < dd->num_pports; i++) {
  2316. unsigned long flags;
  2317. u32 mask = QSFP_GPIO_MOD_PRS_N |
  2318. (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
  2319. kfree(dd->pport[i].cpspec->portcntrs);
  2320. if (dd->flags & QIB_HAS_QSFP) {
  2321. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  2322. dd->cspec->gpio_mask &= ~mask;
  2323. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  2324. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  2325. qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
  2326. }
  2327. if (dd->pport[i].ibport_data.smi_ah)
  2328. ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
  2329. }
  2330. }
  2331. /* handle SDMA interrupts */
  2332. static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
  2333. {
  2334. struct qib_pportdata *ppd0 = &dd->pport[0];
  2335. struct qib_pportdata *ppd1 = &dd->pport[1];
  2336. u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
  2337. INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
  2338. u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
  2339. INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
  2340. if (intr0)
  2341. qib_sdma_intr(ppd0);
  2342. if (intr1)
  2343. qib_sdma_intr(ppd1);
  2344. if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
  2345. qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
  2346. if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
  2347. qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
  2348. }
  2349. /*
  2350. * Set or clear the Send buffer available interrupt enable bit.
  2351. */
  2352. static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
  2353. {
  2354. unsigned long flags;
  2355. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  2356. if (needint)
  2357. dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
  2358. else
  2359. dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
  2360. qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
  2361. qib_write_kreg(dd, kr_scratch, 0ULL);
  2362. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  2363. }
  2364. /*
  2365. * Somehow got an interrupt with reserved bits set in interrupt status.
  2366. * Print a message so we know it happened, then clear them.
  2367. * keep mainline interrupt handler cache-friendly
  2368. */
  2369. static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
  2370. {
  2371. u64 kills;
  2372. char msg[128];
  2373. kills = istat & ~QIB_I_BITSEXTANT;
  2374. qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:"
  2375. " %s\n", (unsigned long long) kills, msg);
  2376. qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
  2377. }
  2378. /* keep mainline interrupt handler cache-friendly */
  2379. static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
  2380. {
  2381. u32 gpiostatus;
  2382. int handled = 0;
  2383. int pidx;
  2384. /*
  2385. * Boards for this chip currently don't use GPIO interrupts,
  2386. * so clear by writing GPIOstatus to GPIOclear, and complain
  2387. * to developer. To avoid endless repeats, clear
  2388. * the bits in the mask, since there is some kind of
  2389. * programming error or chip problem.
  2390. */
  2391. gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
  2392. /*
  2393. * In theory, writing GPIOstatus to GPIOclear could
  2394. * have a bad side-effect on some diagnostic that wanted
  2395. * to poll for a status-change, but the various shadows
  2396. * make that problematic at best. Diags will just suppress
  2397. * all GPIO interrupts during such tests.
  2398. */
  2399. qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
  2400. /*
  2401. * Check for QSFP MOD_PRS changes
  2402. * only works for single port if IB1 != pidx1
  2403. */
  2404. for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
  2405. ++pidx) {
  2406. struct qib_pportdata *ppd;
  2407. struct qib_qsfp_data *qd;
  2408. u32 mask;
  2409. if (!dd->pport[pidx].link_speed_supported)
  2410. continue;
  2411. mask = QSFP_GPIO_MOD_PRS_N;
  2412. ppd = dd->pport + pidx;
  2413. mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
  2414. if (gpiostatus & dd->cspec->gpio_mask & mask) {
  2415. u64 pins;
  2416. qd = &ppd->cpspec->qsfp_data;
  2417. gpiostatus &= ~mask;
  2418. pins = qib_read_kreg64(dd, kr_extstatus);
  2419. pins >>= SYM_LSB(EXTStatus, GPIOIn);
  2420. if (!(pins & mask)) {
  2421. ++handled;
  2422. qd->t_insert = jiffies;
  2423. queue_work(ib_wq, &qd->work);
  2424. }
  2425. }
  2426. }
  2427. if (gpiostatus && !handled) {
  2428. const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
  2429. u32 gpio_irq = mask & gpiostatus;
  2430. /*
  2431. * Clear any troublemakers, and update chip from shadow
  2432. */
  2433. dd->cspec->gpio_mask &= ~gpio_irq;
  2434. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  2435. }
  2436. }
  2437. /*
  2438. * Handle errors and unusual events first, separate function
  2439. * to improve cache hits for fast path interrupt handling.
  2440. */
  2441. static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
  2442. {
  2443. if (istat & ~QIB_I_BITSEXTANT)
  2444. unknown_7322_ibits(dd, istat);
  2445. if (istat & QIB_I_GPIO)
  2446. unknown_7322_gpio_intr(dd);
  2447. if (istat & QIB_I_C_ERROR) {
  2448. qib_write_kreg(dd, kr_errmask, 0ULL);
  2449. tasklet_schedule(&dd->error_tasklet);
  2450. }
  2451. if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
  2452. handle_7322_p_errors(dd->rcd[0]->ppd);
  2453. if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
  2454. handle_7322_p_errors(dd->rcd[1]->ppd);
  2455. }
  2456. /*
  2457. * Dynamically adjust the rcv int timeout for a context based on incoming
  2458. * packet rate.
  2459. */
  2460. static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
  2461. {
  2462. struct qib_devdata *dd = rcd->dd;
  2463. u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
  2464. /*
  2465. * Dynamically adjust idle timeout on chip
  2466. * based on number of packets processed.
  2467. */
  2468. if (npkts < rcv_int_count && timeout > 2)
  2469. timeout >>= 1;
  2470. else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
  2471. timeout = min(timeout << 1, rcv_int_timeout);
  2472. else
  2473. return;
  2474. dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
  2475. qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
  2476. }
  2477. /*
  2478. * This is the main interrupt handler.
  2479. * It will normally only be used for low frequency interrupts but may
  2480. * have to handle all interrupts if INTx is enabled or fewer than normal
  2481. * MSIx interrupts were allocated.
  2482. * This routine should ignore the interrupt bits for any of the
  2483. * dedicated MSIx handlers.
  2484. */
  2485. static irqreturn_t qib_7322intr(int irq, void *data)
  2486. {
  2487. struct qib_devdata *dd = data;
  2488. irqreturn_t ret;
  2489. u64 istat;
  2490. u64 ctxtrbits;
  2491. u64 rmask;
  2492. unsigned i;
  2493. u32 npkts;
  2494. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
  2495. /*
  2496. * This return value is not great, but we do not want the
  2497. * interrupt core code to remove our interrupt handler
  2498. * because we don't appear to be handling an interrupt
  2499. * during a chip reset.
  2500. */
  2501. ret = IRQ_HANDLED;
  2502. goto bail;
  2503. }
  2504. istat = qib_read_kreg64(dd, kr_intstatus);
  2505. if (unlikely(istat == ~0ULL)) {
  2506. qib_bad_intrstatus(dd);
  2507. qib_dev_err(dd, "Interrupt status all f's, skipping\n");
  2508. /* don't know if it was our interrupt or not */
  2509. ret = IRQ_NONE;
  2510. goto bail;
  2511. }
  2512. istat &= dd->cspec->main_int_mask;
  2513. if (unlikely(!istat)) {
  2514. /* already handled, or shared and not us */
  2515. ret = IRQ_NONE;
  2516. goto bail;
  2517. }
  2518. qib_stats.sps_ints++;
  2519. if (dd->int_counter != (u32) -1)
  2520. dd->int_counter++;
  2521. /* handle "errors" of various kinds first, device ahead of port */
  2522. if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
  2523. QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
  2524. INT_MASK_P(Err, 1))))
  2525. unlikely_7322_intr(dd, istat);
  2526. /*
  2527. * Clear the interrupt bits we found set, relatively early, so we
  2528. * "know" know the chip will have seen this by the time we process
  2529. * the queue, and will re-interrupt if necessary. The processor
  2530. * itself won't take the interrupt again until we return.
  2531. */
  2532. qib_write_kreg(dd, kr_intclear, istat);
  2533. /*
  2534. * Handle kernel receive queues before checking for pio buffers
  2535. * available since receives can overflow; piobuf waiters can afford
  2536. * a few extra cycles, since they were waiting anyway.
  2537. */
  2538. ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
  2539. if (ctxtrbits) {
  2540. rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
  2541. (1ULL << QIB_I_RCVURG_LSB);
  2542. for (i = 0; i < dd->first_user_ctxt; i++) {
  2543. if (ctxtrbits & rmask) {
  2544. ctxtrbits &= ~rmask;
  2545. if (dd->rcd[i])
  2546. qib_kreceive(dd->rcd[i], NULL, &npkts);
  2547. }
  2548. rmask <<= 1;
  2549. }
  2550. if (ctxtrbits) {
  2551. ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
  2552. (ctxtrbits >> QIB_I_RCVURG_LSB);
  2553. qib_handle_urcv(dd, ctxtrbits);
  2554. }
  2555. }
  2556. if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
  2557. sdma_7322_intr(dd, istat);
  2558. if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
  2559. qib_ib_piobufavail(dd);
  2560. ret = IRQ_HANDLED;
  2561. bail:
  2562. return ret;
  2563. }
  2564. /*
  2565. * Dedicated receive packet available interrupt handler.
  2566. */
  2567. static irqreturn_t qib_7322pintr(int irq, void *data)
  2568. {
  2569. struct qib_ctxtdata *rcd = data;
  2570. struct qib_devdata *dd = rcd->dd;
  2571. u32 npkts;
  2572. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2573. /*
  2574. * This return value is not great, but we do not want the
  2575. * interrupt core code to remove our interrupt handler
  2576. * because we don't appear to be handling an interrupt
  2577. * during a chip reset.
  2578. */
  2579. return IRQ_HANDLED;
  2580. qib_stats.sps_ints++;
  2581. if (dd->int_counter != (u32) -1)
  2582. dd->int_counter++;
  2583. /* Clear the interrupt bit we expect to be set. */
  2584. qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
  2585. (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
  2586. qib_kreceive(rcd, NULL, &npkts);
  2587. return IRQ_HANDLED;
  2588. }
  2589. /*
  2590. * Dedicated Send buffer available interrupt handler.
  2591. */
  2592. static irqreturn_t qib_7322bufavail(int irq, void *data)
  2593. {
  2594. struct qib_devdata *dd = data;
  2595. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2596. /*
  2597. * This return value is not great, but we do not want the
  2598. * interrupt core code to remove our interrupt handler
  2599. * because we don't appear to be handling an interrupt
  2600. * during a chip reset.
  2601. */
  2602. return IRQ_HANDLED;
  2603. qib_stats.sps_ints++;
  2604. if (dd->int_counter != (u32) -1)
  2605. dd->int_counter++;
  2606. /* Clear the interrupt bit we expect to be set. */
  2607. qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
  2608. /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
  2609. if (dd->flags & QIB_INITTED)
  2610. qib_ib_piobufavail(dd);
  2611. else
  2612. qib_wantpiobuf_7322_intr(dd, 0);
  2613. return IRQ_HANDLED;
  2614. }
  2615. /*
  2616. * Dedicated Send DMA interrupt handler.
  2617. */
  2618. static irqreturn_t sdma_intr(int irq, void *data)
  2619. {
  2620. struct qib_pportdata *ppd = data;
  2621. struct qib_devdata *dd = ppd->dd;
  2622. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2623. /*
  2624. * This return value is not great, but we do not want the
  2625. * interrupt core code to remove our interrupt handler
  2626. * because we don't appear to be handling an interrupt
  2627. * during a chip reset.
  2628. */
  2629. return IRQ_HANDLED;
  2630. qib_stats.sps_ints++;
  2631. if (dd->int_counter != (u32) -1)
  2632. dd->int_counter++;
  2633. /* Clear the interrupt bit we expect to be set. */
  2634. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2635. INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
  2636. qib_sdma_intr(ppd);
  2637. return IRQ_HANDLED;
  2638. }
  2639. /*
  2640. * Dedicated Send DMA idle interrupt handler.
  2641. */
  2642. static irqreturn_t sdma_idle_intr(int irq, void *data)
  2643. {
  2644. struct qib_pportdata *ppd = data;
  2645. struct qib_devdata *dd = ppd->dd;
  2646. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2647. /*
  2648. * This return value is not great, but we do not want the
  2649. * interrupt core code to remove our interrupt handler
  2650. * because we don't appear to be handling an interrupt
  2651. * during a chip reset.
  2652. */
  2653. return IRQ_HANDLED;
  2654. qib_stats.sps_ints++;
  2655. if (dd->int_counter != (u32) -1)
  2656. dd->int_counter++;
  2657. /* Clear the interrupt bit we expect to be set. */
  2658. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2659. INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
  2660. qib_sdma_intr(ppd);
  2661. return IRQ_HANDLED;
  2662. }
  2663. /*
  2664. * Dedicated Send DMA progress interrupt handler.
  2665. */
  2666. static irqreturn_t sdma_progress_intr(int irq, void *data)
  2667. {
  2668. struct qib_pportdata *ppd = data;
  2669. struct qib_devdata *dd = ppd->dd;
  2670. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2671. /*
  2672. * This return value is not great, but we do not want the
  2673. * interrupt core code to remove our interrupt handler
  2674. * because we don't appear to be handling an interrupt
  2675. * during a chip reset.
  2676. */
  2677. return IRQ_HANDLED;
  2678. qib_stats.sps_ints++;
  2679. if (dd->int_counter != (u32) -1)
  2680. dd->int_counter++;
  2681. /* Clear the interrupt bit we expect to be set. */
  2682. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2683. INT_MASK_P(SDmaProgress, 1) :
  2684. INT_MASK_P(SDmaProgress, 0));
  2685. qib_sdma_intr(ppd);
  2686. return IRQ_HANDLED;
  2687. }
  2688. /*
  2689. * Dedicated Send DMA cleanup interrupt handler.
  2690. */
  2691. static irqreturn_t sdma_cleanup_intr(int irq, void *data)
  2692. {
  2693. struct qib_pportdata *ppd = data;
  2694. struct qib_devdata *dd = ppd->dd;
  2695. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2696. /*
  2697. * This return value is not great, but we do not want the
  2698. * interrupt core code to remove our interrupt handler
  2699. * because we don't appear to be handling an interrupt
  2700. * during a chip reset.
  2701. */
  2702. return IRQ_HANDLED;
  2703. qib_stats.sps_ints++;
  2704. if (dd->int_counter != (u32) -1)
  2705. dd->int_counter++;
  2706. /* Clear the interrupt bit we expect to be set. */
  2707. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2708. INT_MASK_PM(SDmaCleanupDone, 1) :
  2709. INT_MASK_PM(SDmaCleanupDone, 0));
  2710. qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
  2711. return IRQ_HANDLED;
  2712. }
  2713. /*
  2714. * Set up our chip-specific interrupt handler.
  2715. * The interrupt type has already been setup, so
  2716. * we just need to do the registration and error checking.
  2717. * If we are using MSIx interrupts, we may fall back to
  2718. * INTx later, if the interrupt handler doesn't get called
  2719. * within 1/2 second (see verify_interrupt()).
  2720. */
  2721. static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
  2722. {
  2723. int ret, i, msixnum;
  2724. u64 redirect[6];
  2725. u64 mask;
  2726. if (!dd->num_pports)
  2727. return;
  2728. if (clearpend) {
  2729. /*
  2730. * if not switching interrupt types, be sure interrupts are
  2731. * disabled, and then clear anything pending at this point,
  2732. * because we are starting clean.
  2733. */
  2734. qib_7322_set_intr_state(dd, 0);
  2735. /* clear the reset error, init error/hwerror mask */
  2736. qib_7322_init_hwerrors(dd);
  2737. /* clear any interrupt bits that might be set */
  2738. qib_write_kreg(dd, kr_intclear, ~0ULL);
  2739. /* make sure no pending MSIx intr, and clear diag reg */
  2740. qib_write_kreg(dd, kr_intgranted, ~0ULL);
  2741. qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
  2742. }
  2743. if (!dd->cspec->num_msix_entries) {
  2744. /* Try to get INTx interrupt */
  2745. try_intx:
  2746. if (!dd->pcidev->irq) {
  2747. qib_dev_err(dd, "irq is 0, BIOS error? "
  2748. "Interrupts won't work\n");
  2749. goto bail;
  2750. }
  2751. ret = request_irq(dd->pcidev->irq, qib_7322intr,
  2752. IRQF_SHARED, QIB_DRV_NAME, dd);
  2753. if (ret) {
  2754. qib_dev_err(dd, "Couldn't setup INTx "
  2755. "interrupt (irq=%d): %d\n",
  2756. dd->pcidev->irq, ret);
  2757. goto bail;
  2758. }
  2759. dd->cspec->irq = dd->pcidev->irq;
  2760. dd->cspec->main_int_mask = ~0ULL;
  2761. goto bail;
  2762. }
  2763. /* Try to get MSIx interrupts */
  2764. memset(redirect, 0, sizeof redirect);
  2765. mask = ~0ULL;
  2766. msixnum = 0;
  2767. for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
  2768. irq_handler_t handler;
  2769. const char *name;
  2770. void *arg;
  2771. u64 val;
  2772. int lsb, reg, sh;
  2773. if (i < ARRAY_SIZE(irq_table)) {
  2774. if (irq_table[i].port) {
  2775. /* skip if for a non-configured port */
  2776. if (irq_table[i].port > dd->num_pports)
  2777. continue;
  2778. arg = dd->pport + irq_table[i].port - 1;
  2779. } else
  2780. arg = dd;
  2781. lsb = irq_table[i].lsb;
  2782. handler = irq_table[i].handler;
  2783. name = irq_table[i].name;
  2784. } else {
  2785. unsigned ctxt;
  2786. ctxt = i - ARRAY_SIZE(irq_table);
  2787. /* per krcvq context receive interrupt */
  2788. arg = dd->rcd[ctxt];
  2789. if (!arg)
  2790. continue;
  2791. if (qib_krcvq01_no_msi && ctxt < 2)
  2792. continue;
  2793. lsb = QIB_I_RCVAVAIL_LSB + ctxt;
  2794. handler = qib_7322pintr;
  2795. name = QIB_DRV_NAME " (kctx)";
  2796. }
  2797. ret = request_irq(dd->cspec->msix_entries[msixnum].vector,
  2798. handler, 0, name, arg);
  2799. if (ret) {
  2800. /*
  2801. * Shouldn't happen since the enable said we could
  2802. * have as many as we are trying to setup here.
  2803. */
  2804. qib_dev_err(dd, "Couldn't setup MSIx "
  2805. "interrupt (vec=%d, irq=%d): %d\n", msixnum,
  2806. dd->cspec->msix_entries[msixnum].vector,
  2807. ret);
  2808. qib_7322_nomsix(dd);
  2809. goto try_intx;
  2810. }
  2811. dd->cspec->msix_arg[msixnum] = arg;
  2812. if (lsb >= 0) {
  2813. reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
  2814. sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
  2815. SYM_LSB(IntRedirect0, vec1);
  2816. mask &= ~(1ULL << lsb);
  2817. redirect[reg] |= ((u64) msixnum) << sh;
  2818. }
  2819. val = qib_read_kreg64(dd, 2 * msixnum + 1 +
  2820. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  2821. msixnum++;
  2822. }
  2823. /* Initialize the vector mapping */
  2824. for (i = 0; i < ARRAY_SIZE(redirect); i++)
  2825. qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
  2826. dd->cspec->main_int_mask = mask;
  2827. tasklet_init(&dd->error_tasklet, qib_error_tasklet,
  2828. (unsigned long)dd);
  2829. bail:;
  2830. }
  2831. /**
  2832. * qib_7322_boardname - fill in the board name and note features
  2833. * @dd: the qlogic_ib device
  2834. *
  2835. * info will be based on the board revision register
  2836. */
  2837. static unsigned qib_7322_boardname(struct qib_devdata *dd)
  2838. {
  2839. /* Will need enumeration of board-types here */
  2840. char *n;
  2841. u32 boardid, namelen;
  2842. unsigned features = DUAL_PORT_CAP;
  2843. boardid = SYM_FIELD(dd->revision, Revision, BoardID);
  2844. switch (boardid) {
  2845. case 0:
  2846. n = "InfiniPath_QLE7342_Emulation";
  2847. break;
  2848. case 1:
  2849. n = "InfiniPath_QLE7340";
  2850. dd->flags |= QIB_HAS_QSFP;
  2851. features = PORT_SPD_CAP;
  2852. break;
  2853. case 2:
  2854. n = "InfiniPath_QLE7342";
  2855. dd->flags |= QIB_HAS_QSFP;
  2856. break;
  2857. case 3:
  2858. n = "InfiniPath_QMI7342";
  2859. break;
  2860. case 4:
  2861. n = "InfiniPath_Unsupported7342";
  2862. qib_dev_err(dd, "Unsupported version of QMH7342\n");
  2863. features = 0;
  2864. break;
  2865. case BOARD_QMH7342:
  2866. n = "InfiniPath_QMH7342";
  2867. features = 0x24;
  2868. break;
  2869. case BOARD_QME7342:
  2870. n = "InfiniPath_QME7342";
  2871. break;
  2872. case 8:
  2873. n = "InfiniPath_QME7362";
  2874. dd->flags |= QIB_HAS_QSFP;
  2875. break;
  2876. case 15:
  2877. n = "InfiniPath_QLE7342_TEST";
  2878. dd->flags |= QIB_HAS_QSFP;
  2879. break;
  2880. default:
  2881. n = "InfiniPath_QLE73xy_UNKNOWN";
  2882. qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
  2883. break;
  2884. }
  2885. dd->board_atten = 1; /* index into txdds_Xdr */
  2886. namelen = strlen(n) + 1;
  2887. dd->boardname = kmalloc(namelen, GFP_KERNEL);
  2888. if (!dd->boardname)
  2889. qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
  2890. else
  2891. snprintf(dd->boardname, namelen, "%s", n);
  2892. snprintf(dd->boardversion, sizeof(dd->boardversion),
  2893. "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
  2894. QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
  2895. (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
  2896. dd->majrev, dd->minrev,
  2897. (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
  2898. if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
  2899. qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode"
  2900. " by module parameter\n", dd->unit);
  2901. features &= PORT_SPD_CAP;
  2902. }
  2903. return features;
  2904. }
  2905. /*
  2906. * This routine sleeps, so it can only be called from user context, not
  2907. * from interrupt context.
  2908. */
  2909. static int qib_do_7322_reset(struct qib_devdata *dd)
  2910. {
  2911. u64 val;
  2912. u64 *msix_vecsave;
  2913. int i, msix_entries, ret = 1;
  2914. u16 cmdval;
  2915. u8 int_line, clinesz;
  2916. unsigned long flags;
  2917. /* Use dev_err so it shows up in logs, etc. */
  2918. qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
  2919. qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
  2920. msix_entries = dd->cspec->num_msix_entries;
  2921. /* no interrupts till re-initted */
  2922. qib_7322_set_intr_state(dd, 0);
  2923. if (msix_entries) {
  2924. qib_7322_nomsix(dd);
  2925. /* can be up to 512 bytes, too big for stack */
  2926. msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
  2927. sizeof(u64), GFP_KERNEL);
  2928. if (!msix_vecsave)
  2929. qib_dev_err(dd, "No mem to save MSIx data\n");
  2930. } else
  2931. msix_vecsave = NULL;
  2932. /*
  2933. * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
  2934. * info that is set up by the BIOS, so we have to save and restore
  2935. * it ourselves. There is some risk something could change it,
  2936. * after we save it, but since we have disabled the MSIx, it
  2937. * shouldn't be touched...
  2938. */
  2939. for (i = 0; i < msix_entries; i++) {
  2940. u64 vecaddr, vecdata;
  2941. vecaddr = qib_read_kreg64(dd, 2 * i +
  2942. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  2943. vecdata = qib_read_kreg64(dd, 1 + 2 * i +
  2944. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  2945. if (msix_vecsave) {
  2946. msix_vecsave[2 * i] = vecaddr;
  2947. /* save it without the masked bit set */
  2948. msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
  2949. }
  2950. }
  2951. dd->pport->cpspec->ibdeltainprog = 0;
  2952. dd->pport->cpspec->ibsymdelta = 0;
  2953. dd->pport->cpspec->iblnkerrdelta = 0;
  2954. dd->pport->cpspec->ibmalfdelta = 0;
  2955. dd->int_counter = 0; /* so we check interrupts work again */
  2956. /*
  2957. * Keep chip from being accessed until we are ready. Use
  2958. * writeq() directly, to allow the write even though QIB_PRESENT
  2959. * isn't set.
  2960. */
  2961. dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
  2962. dd->flags |= QIB_DOING_RESET;
  2963. val = dd->control | QLOGIC_IB_C_RESET;
  2964. writeq(val, &dd->kregbase[kr_control]);
  2965. for (i = 1; i <= 5; i++) {
  2966. /*
  2967. * Allow MBIST, etc. to complete; longer on each retry.
  2968. * We sometimes get machine checks from bus timeout if no
  2969. * response, so for now, make it *really* long.
  2970. */
  2971. msleep(1000 + (1 + i) * 3000);
  2972. qib_pcie_reenable(dd, cmdval, int_line, clinesz);
  2973. /*
  2974. * Use readq directly, so we don't need to mark it as PRESENT
  2975. * until we get a successful indication that all is well.
  2976. */
  2977. val = readq(&dd->kregbase[kr_revision]);
  2978. if (val == dd->revision)
  2979. break;
  2980. if (i == 5) {
  2981. qib_dev_err(dd, "Failed to initialize after reset, "
  2982. "unusable\n");
  2983. ret = 0;
  2984. goto bail;
  2985. }
  2986. }
  2987. dd->flags |= QIB_PRESENT; /* it's back */
  2988. if (msix_entries) {
  2989. /* restore the MSIx vector address and data if saved above */
  2990. for (i = 0; i < msix_entries; i++) {
  2991. dd->cspec->msix_entries[i].entry = i;
  2992. if (!msix_vecsave || !msix_vecsave[2 * i])
  2993. continue;
  2994. qib_write_kreg(dd, 2 * i +
  2995. (QIB_7322_MsixTable_OFFS / sizeof(u64)),
  2996. msix_vecsave[2 * i]);
  2997. qib_write_kreg(dd, 1 + 2 * i +
  2998. (QIB_7322_MsixTable_OFFS / sizeof(u64)),
  2999. msix_vecsave[1 + 2 * i]);
  3000. }
  3001. }
  3002. /* initialize the remaining registers. */
  3003. for (i = 0; i < dd->num_pports; ++i)
  3004. write_7322_init_portregs(&dd->pport[i]);
  3005. write_7322_initregs(dd);
  3006. if (qib_pcie_params(dd, dd->lbus_width,
  3007. &dd->cspec->num_msix_entries,
  3008. dd->cspec->msix_entries))
  3009. qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; "
  3010. "continuing anyway\n");
  3011. qib_setup_7322_interrupt(dd, 1);
  3012. for (i = 0; i < dd->num_pports; ++i) {
  3013. struct qib_pportdata *ppd = &dd->pport[i];
  3014. spin_lock_irqsave(&ppd->lflags_lock, flags);
  3015. ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
  3016. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  3017. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  3018. }
  3019. bail:
  3020. dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
  3021. kfree(msix_vecsave);
  3022. return ret;
  3023. }
  3024. /**
  3025. * qib_7322_put_tid - write a TID to the chip
  3026. * @dd: the qlogic_ib device
  3027. * @tidptr: pointer to the expected TID (in chip) to update
  3028. * @tidtype: 0 for eager, 1 for expected
  3029. * @pa: physical address of in memory buffer; tidinvalid if freeing
  3030. */
  3031. static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
  3032. u32 type, unsigned long pa)
  3033. {
  3034. if (!(dd->flags & QIB_PRESENT))
  3035. return;
  3036. if (pa != dd->tidinvalid) {
  3037. u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
  3038. /* paranoia checks */
  3039. if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
  3040. qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
  3041. pa);
  3042. return;
  3043. }
  3044. if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
  3045. qib_dev_err(dd, "Physical page address 0x%lx "
  3046. "larger than supported\n", pa);
  3047. return;
  3048. }
  3049. if (type == RCVHQ_RCV_TYPE_EAGER)
  3050. chippa |= dd->tidtemplate;
  3051. else /* for now, always full 4KB page */
  3052. chippa |= IBA7322_TID_SZ_4K;
  3053. pa = chippa;
  3054. }
  3055. writeq(pa, tidptr);
  3056. mmiowb();
  3057. }
  3058. /**
  3059. * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
  3060. * @dd: the qlogic_ib device
  3061. * @ctxt: the ctxt
  3062. *
  3063. * clear all TID entries for a ctxt, expected and eager.
  3064. * Used from qib_close().
  3065. */
  3066. static void qib_7322_clear_tids(struct qib_devdata *dd,
  3067. struct qib_ctxtdata *rcd)
  3068. {
  3069. u64 __iomem *tidbase;
  3070. unsigned long tidinv;
  3071. u32 ctxt;
  3072. int i;
  3073. if (!dd->kregbase || !rcd)
  3074. return;
  3075. ctxt = rcd->ctxt;
  3076. tidinv = dd->tidinvalid;
  3077. tidbase = (u64 __iomem *)
  3078. ((char __iomem *) dd->kregbase +
  3079. dd->rcvtidbase +
  3080. ctxt * dd->rcvtidcnt * sizeof(*tidbase));
  3081. for (i = 0; i < dd->rcvtidcnt; i++)
  3082. qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
  3083. tidinv);
  3084. tidbase = (u64 __iomem *)
  3085. ((char __iomem *) dd->kregbase +
  3086. dd->rcvegrbase +
  3087. rcd->rcvegr_tid_base * sizeof(*tidbase));
  3088. for (i = 0; i < rcd->rcvegrcnt; i++)
  3089. qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
  3090. tidinv);
  3091. }
  3092. /**
  3093. * qib_7322_tidtemplate - setup constants for TID updates
  3094. * @dd: the qlogic_ib device
  3095. *
  3096. * We setup stuff that we use a lot, to avoid calculating each time
  3097. */
  3098. static void qib_7322_tidtemplate(struct qib_devdata *dd)
  3099. {
  3100. /*
  3101. * For now, we always allocate 4KB buffers (at init) so we can
  3102. * receive max size packets. We may want a module parameter to
  3103. * specify 2KB or 4KB and/or make it per port instead of per device
  3104. * for those who want to reduce memory footprint. Note that the
  3105. * rcvhdrentsize size must be large enough to hold the largest
  3106. * IB header (currently 96 bytes) that we expect to handle (plus of
  3107. * course the 2 dwords of RHF).
  3108. */
  3109. if (dd->rcvegrbufsize == 2048)
  3110. dd->tidtemplate = IBA7322_TID_SZ_2K;
  3111. else if (dd->rcvegrbufsize == 4096)
  3112. dd->tidtemplate = IBA7322_TID_SZ_4K;
  3113. dd->tidinvalid = 0;
  3114. }
  3115. /**
  3116. * qib_init_7322_get_base_info - set chip-specific flags for user code
  3117. * @rcd: the qlogic_ib ctxt
  3118. * @kbase: qib_base_info pointer
  3119. *
  3120. * We set the PCIE flag because the lower bandwidth on PCIe vs
  3121. * HyperTransport can affect some user packet algorithims.
  3122. */
  3123. static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
  3124. struct qib_base_info *kinfo)
  3125. {
  3126. kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
  3127. QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
  3128. QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
  3129. if (rcd->dd->cspec->r1)
  3130. kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
  3131. if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
  3132. kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
  3133. return 0;
  3134. }
  3135. static struct qib_message_header *
  3136. qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
  3137. {
  3138. u32 offset = qib_hdrget_offset(rhf_addr);
  3139. return (struct qib_message_header *)
  3140. (rhf_addr - dd->rhf_offset + offset);
  3141. }
  3142. /*
  3143. * Configure number of contexts.
  3144. */
  3145. static void qib_7322_config_ctxts(struct qib_devdata *dd)
  3146. {
  3147. unsigned long flags;
  3148. u32 nchipctxts;
  3149. nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
  3150. dd->cspec->numctxts = nchipctxts;
  3151. if (qib_n_krcv_queues > 1 && dd->num_pports) {
  3152. dd->first_user_ctxt = NUM_IB_PORTS +
  3153. (qib_n_krcv_queues - 1) * dd->num_pports;
  3154. if (dd->first_user_ctxt > nchipctxts)
  3155. dd->first_user_ctxt = nchipctxts;
  3156. dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
  3157. } else {
  3158. dd->first_user_ctxt = NUM_IB_PORTS;
  3159. dd->n_krcv_queues = 1;
  3160. }
  3161. if (!qib_cfgctxts) {
  3162. int nctxts = dd->first_user_ctxt + num_online_cpus();
  3163. if (nctxts <= 6)
  3164. dd->ctxtcnt = 6;
  3165. else if (nctxts <= 10)
  3166. dd->ctxtcnt = 10;
  3167. else if (nctxts <= nchipctxts)
  3168. dd->ctxtcnt = nchipctxts;
  3169. } else if (qib_cfgctxts < dd->num_pports)
  3170. dd->ctxtcnt = dd->num_pports;
  3171. else if (qib_cfgctxts <= nchipctxts)
  3172. dd->ctxtcnt = qib_cfgctxts;
  3173. if (!dd->ctxtcnt) /* none of the above, set to max */
  3174. dd->ctxtcnt = nchipctxts;
  3175. /*
  3176. * Chip can be configured for 6, 10, or 18 ctxts, and choice
  3177. * affects number of eager TIDs per ctxt (1K, 2K, 4K).
  3178. * Lock to be paranoid about later motion, etc.
  3179. */
  3180. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  3181. if (dd->ctxtcnt > 10)
  3182. dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
  3183. else if (dd->ctxtcnt > 6)
  3184. dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
  3185. /* else configure for default 6 receive ctxts */
  3186. /* The XRC opcode is 5. */
  3187. dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
  3188. /*
  3189. * RcvCtrl *must* be written here so that the
  3190. * chip understands how to change rcvegrcnt below.
  3191. */
  3192. qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
  3193. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  3194. /* kr_rcvegrcnt changes based on the number of contexts enabled */
  3195. dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
  3196. if (qib_rcvhdrcnt)
  3197. dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
  3198. else
  3199. dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
  3200. dd->num_pports > 1 ? 1024U : 2048U);
  3201. }
  3202. static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
  3203. {
  3204. int lsb, ret = 0;
  3205. u64 maskr; /* right-justified mask */
  3206. switch (which) {
  3207. case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
  3208. ret = ppd->link_width_enabled;
  3209. goto done;
  3210. case QIB_IB_CFG_LWID: /* Get currently active Link-width */
  3211. ret = ppd->link_width_active;
  3212. goto done;
  3213. case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
  3214. ret = ppd->link_speed_enabled;
  3215. goto done;
  3216. case QIB_IB_CFG_SPD: /* Get current Link spd */
  3217. ret = ppd->link_speed_active;
  3218. goto done;
  3219. case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
  3220. lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3221. maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3222. break;
  3223. case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
  3224. lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3225. maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3226. break;
  3227. case QIB_IB_CFG_LINKLATENCY:
  3228. ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
  3229. SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
  3230. goto done;
  3231. case QIB_IB_CFG_OP_VLS:
  3232. ret = ppd->vls_operational;
  3233. goto done;
  3234. case QIB_IB_CFG_VL_HIGH_CAP:
  3235. ret = 16;
  3236. goto done;
  3237. case QIB_IB_CFG_VL_LOW_CAP:
  3238. ret = 16;
  3239. goto done;
  3240. case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  3241. ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3242. OverrunThreshold);
  3243. goto done;
  3244. case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  3245. ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3246. PhyerrThreshold);
  3247. goto done;
  3248. case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  3249. /* will only take effect when the link state changes */
  3250. ret = (ppd->cpspec->ibcctrl_a &
  3251. SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
  3252. IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
  3253. goto done;
  3254. case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
  3255. lsb = IBA7322_IBC_HRTBT_LSB;
  3256. maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
  3257. break;
  3258. case QIB_IB_CFG_PMA_TICKS:
  3259. /*
  3260. * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
  3261. * Since the clock is always 250MHz, the value is 3, 1 or 0.
  3262. */
  3263. if (ppd->link_speed_active == QIB_IB_QDR)
  3264. ret = 3;
  3265. else if (ppd->link_speed_active == QIB_IB_DDR)
  3266. ret = 1;
  3267. else
  3268. ret = 0;
  3269. goto done;
  3270. default:
  3271. ret = -EINVAL;
  3272. goto done;
  3273. }
  3274. ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
  3275. done:
  3276. return ret;
  3277. }
  3278. /*
  3279. * Below again cribbed liberally from older version. Do not lean
  3280. * heavily on it.
  3281. */
  3282. #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
  3283. #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
  3284. | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
  3285. static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
  3286. {
  3287. struct qib_devdata *dd = ppd->dd;
  3288. u64 maskr; /* right-justified mask */
  3289. int lsb, ret = 0;
  3290. u16 lcmd, licmd;
  3291. unsigned long flags;
  3292. switch (which) {
  3293. case QIB_IB_CFG_LIDLMC:
  3294. /*
  3295. * Set LID and LMC. Combined to avoid possible hazard
  3296. * caller puts LMC in 16MSbits, DLID in 16LSbits of val
  3297. */
  3298. lsb = IBA7322_IBC_DLIDLMC_SHIFT;
  3299. maskr = IBA7322_IBC_DLIDLMC_MASK;
  3300. /*
  3301. * For header-checking, the SLID in the packet will
  3302. * be masked with SendIBSLMCMask, and compared
  3303. * with SendIBSLIDAssignMask. Make sure we do not
  3304. * set any bits not covered by the mask, or we get
  3305. * false-positives.
  3306. */
  3307. qib_write_kreg_port(ppd, krp_sendslid,
  3308. val & (val >> 16) & SendIBSLIDAssignMask);
  3309. qib_write_kreg_port(ppd, krp_sendslidmask,
  3310. (val >> 16) & SendIBSLMCMask);
  3311. break;
  3312. case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
  3313. ppd->link_width_enabled = val;
  3314. /* convert IB value to chip register value */
  3315. if (val == IB_WIDTH_1X)
  3316. val = 0;
  3317. else if (val == IB_WIDTH_4X)
  3318. val = 1;
  3319. else
  3320. val = 3;
  3321. maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
  3322. lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
  3323. break;
  3324. case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
  3325. /*
  3326. * As with width, only write the actual register if the
  3327. * link is currently down, otherwise takes effect on next
  3328. * link change. Since setting is being explicitly requested
  3329. * (via MAD or sysfs), clear autoneg failure status if speed
  3330. * autoneg is enabled.
  3331. */
  3332. ppd->link_speed_enabled = val;
  3333. val <<= IBA7322_IBC_SPEED_LSB;
  3334. maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
  3335. IBA7322_IBC_MAX_SPEED_MASK;
  3336. if (val & (val - 1)) {
  3337. /* Muliple speeds enabled */
  3338. val |= IBA7322_IBC_IBTA_1_2_MASK |
  3339. IBA7322_IBC_MAX_SPEED_MASK;
  3340. spin_lock_irqsave(&ppd->lflags_lock, flags);
  3341. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  3342. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  3343. } else if (val & IBA7322_IBC_SPEED_QDR)
  3344. val |= IBA7322_IBC_IBTA_1_2_MASK;
  3345. /* IBTA 1.2 mode + min/max + speed bits are contiguous */
  3346. lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
  3347. break;
  3348. case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
  3349. lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3350. maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3351. break;
  3352. case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
  3353. lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3354. maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3355. break;
  3356. case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  3357. maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3358. OverrunThreshold);
  3359. if (maskr != val) {
  3360. ppd->cpspec->ibcctrl_a &=
  3361. ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
  3362. ppd->cpspec->ibcctrl_a |= (u64) val <<
  3363. SYM_LSB(IBCCtrlA_0, OverrunThreshold);
  3364. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3365. ppd->cpspec->ibcctrl_a);
  3366. qib_write_kreg(dd, kr_scratch, 0ULL);
  3367. }
  3368. goto bail;
  3369. case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  3370. maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3371. PhyerrThreshold);
  3372. if (maskr != val) {
  3373. ppd->cpspec->ibcctrl_a &=
  3374. ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
  3375. ppd->cpspec->ibcctrl_a |= (u64) val <<
  3376. SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
  3377. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3378. ppd->cpspec->ibcctrl_a);
  3379. qib_write_kreg(dd, kr_scratch, 0ULL);
  3380. }
  3381. goto bail;
  3382. case QIB_IB_CFG_PKEYS: /* update pkeys */
  3383. maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
  3384. ((u64) ppd->pkeys[2] << 32) |
  3385. ((u64) ppd->pkeys[3] << 48);
  3386. qib_write_kreg_port(ppd, krp_partitionkey, maskr);
  3387. goto bail;
  3388. case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  3389. /* will only take effect when the link state changes */
  3390. if (val == IB_LINKINITCMD_POLL)
  3391. ppd->cpspec->ibcctrl_a &=
  3392. ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
  3393. else /* SLEEP */
  3394. ppd->cpspec->ibcctrl_a |=
  3395. SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
  3396. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  3397. qib_write_kreg(dd, kr_scratch, 0ULL);
  3398. goto bail;
  3399. case QIB_IB_CFG_MTU: /* update the MTU in IBC */
  3400. /*
  3401. * Update our housekeeping variables, and set IBC max
  3402. * size, same as init code; max IBC is max we allow in
  3403. * buffer, less the qword pbc, plus 1 for ICRC, in dwords
  3404. * Set even if it's unchanged, print debug message only
  3405. * on changes.
  3406. */
  3407. val = (ppd->ibmaxlen >> 2) + 1;
  3408. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
  3409. ppd->cpspec->ibcctrl_a |= (u64)val <<
  3410. SYM_LSB(IBCCtrlA_0, MaxPktLen);
  3411. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3412. ppd->cpspec->ibcctrl_a);
  3413. qib_write_kreg(dd, kr_scratch, 0ULL);
  3414. goto bail;
  3415. case QIB_IB_CFG_LSTATE: /* set the IB link state */
  3416. switch (val & 0xffff0000) {
  3417. case IB_LINKCMD_DOWN:
  3418. lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
  3419. ppd->cpspec->ibmalfusesnap = 1;
  3420. ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
  3421. crp_errlink);
  3422. if (!ppd->cpspec->ibdeltainprog &&
  3423. qib_compat_ddr_negotiate) {
  3424. ppd->cpspec->ibdeltainprog = 1;
  3425. ppd->cpspec->ibsymsnap =
  3426. read_7322_creg32_port(ppd,
  3427. crp_ibsymbolerr);
  3428. ppd->cpspec->iblnkerrsnap =
  3429. read_7322_creg32_port(ppd,
  3430. crp_iblinkerrrecov);
  3431. }
  3432. break;
  3433. case IB_LINKCMD_ARMED:
  3434. lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
  3435. if (ppd->cpspec->ibmalfusesnap) {
  3436. ppd->cpspec->ibmalfusesnap = 0;
  3437. ppd->cpspec->ibmalfdelta +=
  3438. read_7322_creg32_port(ppd,
  3439. crp_errlink) -
  3440. ppd->cpspec->ibmalfsnap;
  3441. }
  3442. break;
  3443. case IB_LINKCMD_ACTIVE:
  3444. lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
  3445. break;
  3446. default:
  3447. ret = -EINVAL;
  3448. qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
  3449. goto bail;
  3450. }
  3451. switch (val & 0xffff) {
  3452. case IB_LINKINITCMD_NOP:
  3453. licmd = 0;
  3454. break;
  3455. case IB_LINKINITCMD_POLL:
  3456. licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
  3457. break;
  3458. case IB_LINKINITCMD_SLEEP:
  3459. licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
  3460. break;
  3461. case IB_LINKINITCMD_DISABLE:
  3462. licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
  3463. ppd->cpspec->chase_end = 0;
  3464. /*
  3465. * stop state chase counter and timer, if running.
  3466. * wait forpending timer, but don't clear .data (ppd)!
  3467. */
  3468. if (ppd->cpspec->chase_timer.expires) {
  3469. del_timer_sync(&ppd->cpspec->chase_timer);
  3470. ppd->cpspec->chase_timer.expires = 0;
  3471. }
  3472. break;
  3473. default:
  3474. ret = -EINVAL;
  3475. qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
  3476. val & 0xffff);
  3477. goto bail;
  3478. }
  3479. qib_set_ib_7322_lstate(ppd, lcmd, licmd);
  3480. goto bail;
  3481. case QIB_IB_CFG_OP_VLS:
  3482. if (ppd->vls_operational != val) {
  3483. ppd->vls_operational = val;
  3484. set_vls(ppd);
  3485. }
  3486. goto bail;
  3487. case QIB_IB_CFG_VL_HIGH_LIMIT:
  3488. qib_write_kreg_port(ppd, krp_highprio_limit, val);
  3489. goto bail;
  3490. case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
  3491. if (val > 3) {
  3492. ret = -EINVAL;
  3493. goto bail;
  3494. }
  3495. lsb = IBA7322_IBC_HRTBT_LSB;
  3496. maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
  3497. break;
  3498. case QIB_IB_CFG_PORT:
  3499. /* val is the port number of the switch we are connected to. */
  3500. if (ppd->dd->cspec->r1) {
  3501. cancel_delayed_work(&ppd->cpspec->ipg_work);
  3502. ppd->cpspec->ipg_tries = 0;
  3503. }
  3504. goto bail;
  3505. default:
  3506. ret = -EINVAL;
  3507. goto bail;
  3508. }
  3509. ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
  3510. ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
  3511. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  3512. qib_write_kreg(dd, kr_scratch, 0);
  3513. bail:
  3514. return ret;
  3515. }
  3516. static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
  3517. {
  3518. int ret = 0;
  3519. u64 val, ctrlb;
  3520. /* only IBC loopback, may add serdes and xgxs loopbacks later */
  3521. if (!strncmp(what, "ibc", 3)) {
  3522. ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
  3523. Loopback);
  3524. val = 0; /* disable heart beat, so link will come up */
  3525. qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
  3526. ppd->dd->unit, ppd->port);
  3527. } else if (!strncmp(what, "off", 3)) {
  3528. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
  3529. Loopback);
  3530. /* enable heart beat again */
  3531. val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
  3532. qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
  3533. "(normal)\n", ppd->dd->unit, ppd->port);
  3534. } else
  3535. ret = -EINVAL;
  3536. if (!ret) {
  3537. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3538. ppd->cpspec->ibcctrl_a);
  3539. ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
  3540. << IBA7322_IBC_HRTBT_LSB);
  3541. ppd->cpspec->ibcctrl_b = ctrlb | val;
  3542. qib_write_kreg_port(ppd, krp_ibcctrl_b,
  3543. ppd->cpspec->ibcctrl_b);
  3544. qib_write_kreg(ppd->dd, kr_scratch, 0);
  3545. }
  3546. return ret;
  3547. }
  3548. static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
  3549. struct ib_vl_weight_elem *vl)
  3550. {
  3551. unsigned i;
  3552. for (i = 0; i < 16; i++, regno++, vl++) {
  3553. u32 val = qib_read_kreg_port(ppd, regno);
  3554. vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
  3555. SYM_RMASK(LowPriority0_0, VirtualLane);
  3556. vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
  3557. SYM_RMASK(LowPriority0_0, Weight);
  3558. }
  3559. }
  3560. static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
  3561. struct ib_vl_weight_elem *vl)
  3562. {
  3563. unsigned i;
  3564. for (i = 0; i < 16; i++, regno++, vl++) {
  3565. u64 val;
  3566. val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
  3567. SYM_LSB(LowPriority0_0, VirtualLane)) |
  3568. ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
  3569. SYM_LSB(LowPriority0_0, Weight));
  3570. qib_write_kreg_port(ppd, regno, val);
  3571. }
  3572. if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
  3573. struct qib_devdata *dd = ppd->dd;
  3574. unsigned long flags;
  3575. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  3576. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
  3577. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  3578. qib_write_kreg(dd, kr_scratch, 0);
  3579. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  3580. }
  3581. }
  3582. static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
  3583. {
  3584. switch (which) {
  3585. case QIB_IB_TBL_VL_HIGH_ARB:
  3586. get_vl_weights(ppd, krp_highprio_0, t);
  3587. break;
  3588. case QIB_IB_TBL_VL_LOW_ARB:
  3589. get_vl_weights(ppd, krp_lowprio_0, t);
  3590. break;
  3591. default:
  3592. return -EINVAL;
  3593. }
  3594. return 0;
  3595. }
  3596. static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
  3597. {
  3598. switch (which) {
  3599. case QIB_IB_TBL_VL_HIGH_ARB:
  3600. set_vl_weights(ppd, krp_highprio_0, t);
  3601. break;
  3602. case QIB_IB_TBL_VL_LOW_ARB:
  3603. set_vl_weights(ppd, krp_lowprio_0, t);
  3604. break;
  3605. default:
  3606. return -EINVAL;
  3607. }
  3608. return 0;
  3609. }
  3610. static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
  3611. u32 updegr, u32 egrhd, u32 npkts)
  3612. {
  3613. /*
  3614. * Need to write timeout register before updating rcvhdrhead to ensure
  3615. * that the timer is enabled on reception of a packet.
  3616. */
  3617. if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
  3618. adjust_rcv_timeout(rcd, npkts);
  3619. if (updegr)
  3620. qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
  3621. mmiowb();
  3622. qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
  3623. qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
  3624. mmiowb();
  3625. }
  3626. static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
  3627. {
  3628. u32 head, tail;
  3629. head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
  3630. if (rcd->rcvhdrtail_kvaddr)
  3631. tail = qib_get_rcvhdrtail(rcd);
  3632. else
  3633. tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
  3634. return head == tail;
  3635. }
  3636. #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
  3637. QIB_RCVCTRL_CTXT_DIS | \
  3638. QIB_RCVCTRL_TIDFLOW_ENB | \
  3639. QIB_RCVCTRL_TIDFLOW_DIS | \
  3640. QIB_RCVCTRL_TAILUPD_ENB | \
  3641. QIB_RCVCTRL_TAILUPD_DIS | \
  3642. QIB_RCVCTRL_INTRAVAIL_ENB | \
  3643. QIB_RCVCTRL_INTRAVAIL_DIS | \
  3644. QIB_RCVCTRL_BP_ENB | \
  3645. QIB_RCVCTRL_BP_DIS)
  3646. #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
  3647. QIB_RCVCTRL_CTXT_DIS | \
  3648. QIB_RCVCTRL_PKEY_DIS | \
  3649. QIB_RCVCTRL_PKEY_ENB)
  3650. /*
  3651. * Modify the RCVCTRL register in chip-specific way. This
  3652. * is a function because bit positions and (future) register
  3653. * location is chip-specifc, but the needed operations are
  3654. * generic. <op> is a bit-mask because we often want to
  3655. * do multiple modifications.
  3656. */
  3657. static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
  3658. int ctxt)
  3659. {
  3660. struct qib_devdata *dd = ppd->dd;
  3661. struct qib_ctxtdata *rcd;
  3662. u64 mask, val;
  3663. unsigned long flags;
  3664. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  3665. if (op & QIB_RCVCTRL_TIDFLOW_ENB)
  3666. dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
  3667. if (op & QIB_RCVCTRL_TIDFLOW_DIS)
  3668. dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
  3669. if (op & QIB_RCVCTRL_TAILUPD_ENB)
  3670. dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
  3671. if (op & QIB_RCVCTRL_TAILUPD_DIS)
  3672. dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
  3673. if (op & QIB_RCVCTRL_PKEY_ENB)
  3674. ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
  3675. if (op & QIB_RCVCTRL_PKEY_DIS)
  3676. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
  3677. if (ctxt < 0) {
  3678. mask = (1ULL << dd->ctxtcnt) - 1;
  3679. rcd = NULL;
  3680. } else {
  3681. mask = (1ULL << ctxt);
  3682. rcd = dd->rcd[ctxt];
  3683. }
  3684. if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
  3685. ppd->p_rcvctrl |=
  3686. (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
  3687. if (!(dd->flags & QIB_NODMA_RTAIL)) {
  3688. op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
  3689. dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
  3690. }
  3691. /* Write these registers before the context is enabled. */
  3692. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
  3693. rcd->rcvhdrqtailaddr_phys);
  3694. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
  3695. rcd->rcvhdrq_phys);
  3696. rcd->seq_cnt = 1;
  3697. }
  3698. if (op & QIB_RCVCTRL_CTXT_DIS)
  3699. ppd->p_rcvctrl &=
  3700. ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
  3701. if (op & QIB_RCVCTRL_BP_ENB)
  3702. dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
  3703. if (op & QIB_RCVCTRL_BP_DIS)
  3704. dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
  3705. if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
  3706. dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
  3707. if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
  3708. dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
  3709. /*
  3710. * Decide which registers to write depending on the ops enabled.
  3711. * Special case is "flush" (no bits set at all)
  3712. * which needs to write both.
  3713. */
  3714. if (op == 0 || (op & RCVCTRL_COMMON_MODS))
  3715. qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
  3716. if (op == 0 || (op & RCVCTRL_PORT_MODS))
  3717. qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
  3718. if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
  3719. /*
  3720. * Init the context registers also; if we were
  3721. * disabled, tail and head should both be zero
  3722. * already from the enable, but since we don't
  3723. * know, we have to do it explicitly.
  3724. */
  3725. val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
  3726. qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
  3727. /* be sure enabling write seen; hd/tl should be 0 */
  3728. (void) qib_read_kreg32(dd, kr_scratch);
  3729. val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
  3730. dd->rcd[ctxt]->head = val;
  3731. /* If kctxt, interrupt on next receive. */
  3732. if (ctxt < dd->first_user_ctxt)
  3733. val |= dd->rhdrhead_intr_off;
  3734. qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
  3735. } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
  3736. dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
  3737. /* arm rcv interrupt */
  3738. val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
  3739. qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
  3740. }
  3741. if (op & QIB_RCVCTRL_CTXT_DIS) {
  3742. unsigned f;
  3743. /* Now that the context is disabled, clear these registers. */
  3744. if (ctxt >= 0) {
  3745. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
  3746. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
  3747. for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
  3748. qib_write_ureg(dd, ur_rcvflowtable + f,
  3749. TIDFLOW_ERRBITS, ctxt);
  3750. } else {
  3751. unsigned i;
  3752. for (i = 0; i < dd->cfgctxts; i++) {
  3753. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
  3754. i, 0);
  3755. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
  3756. for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
  3757. qib_write_ureg(dd, ur_rcvflowtable + f,
  3758. TIDFLOW_ERRBITS, i);
  3759. }
  3760. }
  3761. }
  3762. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  3763. }
  3764. /*
  3765. * Modify the SENDCTRL register in chip-specific way. This
  3766. * is a function where there are multiple such registers with
  3767. * slightly different layouts.
  3768. * The chip doesn't allow back-to-back sendctrl writes, so write
  3769. * the scratch register after writing sendctrl.
  3770. *
  3771. * Which register is written depends on the operation.
  3772. * Most operate on the common register, while
  3773. * SEND_ENB and SEND_DIS operate on the per-port ones.
  3774. * SEND_ENB is included in common because it can change SPCL_TRIG
  3775. */
  3776. #define SENDCTRL_COMMON_MODS (\
  3777. QIB_SENDCTRL_CLEAR | \
  3778. QIB_SENDCTRL_AVAIL_DIS | \
  3779. QIB_SENDCTRL_AVAIL_ENB | \
  3780. QIB_SENDCTRL_AVAIL_BLIP | \
  3781. QIB_SENDCTRL_DISARM | \
  3782. QIB_SENDCTRL_DISARM_ALL | \
  3783. QIB_SENDCTRL_SEND_ENB)
  3784. #define SENDCTRL_PORT_MODS (\
  3785. QIB_SENDCTRL_CLEAR | \
  3786. QIB_SENDCTRL_SEND_ENB | \
  3787. QIB_SENDCTRL_SEND_DIS | \
  3788. QIB_SENDCTRL_FLUSH)
  3789. static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
  3790. {
  3791. struct qib_devdata *dd = ppd->dd;
  3792. u64 tmp_dd_sendctrl;
  3793. unsigned long flags;
  3794. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  3795. /* First the dd ones that are "sticky", saved in shadow */
  3796. if (op & QIB_SENDCTRL_CLEAR)
  3797. dd->sendctrl = 0;
  3798. if (op & QIB_SENDCTRL_AVAIL_DIS)
  3799. dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  3800. else if (op & QIB_SENDCTRL_AVAIL_ENB) {
  3801. dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
  3802. if (dd->flags & QIB_USE_SPCL_TRIG)
  3803. dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
  3804. }
  3805. /* Then the ppd ones that are "sticky", saved in shadow */
  3806. if (op & QIB_SENDCTRL_SEND_DIS)
  3807. ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
  3808. else if (op & QIB_SENDCTRL_SEND_ENB)
  3809. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
  3810. if (op & QIB_SENDCTRL_DISARM_ALL) {
  3811. u32 i, last;
  3812. tmp_dd_sendctrl = dd->sendctrl;
  3813. last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  3814. /*
  3815. * Disarm any buffers that are not yet launched,
  3816. * disabling updates until done.
  3817. */
  3818. tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  3819. for (i = 0; i < last; i++) {
  3820. qib_write_kreg(dd, kr_sendctrl,
  3821. tmp_dd_sendctrl |
  3822. SYM_MASK(SendCtrl, Disarm) | i);
  3823. qib_write_kreg(dd, kr_scratch, 0);
  3824. }
  3825. }
  3826. if (op & QIB_SENDCTRL_FLUSH) {
  3827. u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
  3828. /*
  3829. * Now drain all the fifos. The Abort bit should never be
  3830. * needed, so for now, at least, we don't use it.
  3831. */
  3832. tmp_ppd_sendctrl |=
  3833. SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
  3834. SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
  3835. SYM_MASK(SendCtrl_0, TxeBypassIbc);
  3836. qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
  3837. qib_write_kreg(dd, kr_scratch, 0);
  3838. }
  3839. tmp_dd_sendctrl = dd->sendctrl;
  3840. if (op & QIB_SENDCTRL_DISARM)
  3841. tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
  3842. ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
  3843. SYM_LSB(SendCtrl, DisarmSendBuf));
  3844. if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
  3845. (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
  3846. tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  3847. if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
  3848. qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
  3849. qib_write_kreg(dd, kr_scratch, 0);
  3850. }
  3851. if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
  3852. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  3853. qib_write_kreg(dd, kr_scratch, 0);
  3854. }
  3855. if (op & QIB_SENDCTRL_AVAIL_BLIP) {
  3856. qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
  3857. qib_write_kreg(dd, kr_scratch, 0);
  3858. }
  3859. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  3860. if (op & QIB_SENDCTRL_FLUSH) {
  3861. u32 v;
  3862. /*
  3863. * ensure writes have hit chip, then do a few
  3864. * more reads, to allow DMA of pioavail registers
  3865. * to occur, so in-memory copy is in sync with
  3866. * the chip. Not always safe to sleep.
  3867. */
  3868. v = qib_read_kreg32(dd, kr_scratch);
  3869. qib_write_kreg(dd, kr_scratch, v);
  3870. v = qib_read_kreg32(dd, kr_scratch);
  3871. qib_write_kreg(dd, kr_scratch, v);
  3872. qib_read_kreg32(dd, kr_scratch);
  3873. }
  3874. }
  3875. #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
  3876. #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
  3877. #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
  3878. /**
  3879. * qib_portcntr_7322 - read a per-port chip counter
  3880. * @ppd: the qlogic_ib pport
  3881. * @creg: the counter to read (not a chip offset)
  3882. */
  3883. static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
  3884. {
  3885. struct qib_devdata *dd = ppd->dd;
  3886. u64 ret = 0ULL;
  3887. u16 creg;
  3888. /* 0xffff for unimplemented or synthesized counters */
  3889. static const u32 xlator[] = {
  3890. [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
  3891. [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
  3892. [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
  3893. [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
  3894. [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
  3895. [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
  3896. [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
  3897. [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
  3898. [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
  3899. [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
  3900. [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
  3901. [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
  3902. [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
  3903. [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
  3904. [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
  3905. [QIBPORTCNTR_ERRICRC] = crp_erricrc,
  3906. [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
  3907. [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
  3908. [QIBPORTCNTR_BADFORMAT] = crp_badformat,
  3909. [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
  3910. [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
  3911. [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
  3912. [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
  3913. [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
  3914. [QIBPORTCNTR_ERRLINK] = crp_errlink,
  3915. [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
  3916. [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
  3917. [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
  3918. [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
  3919. [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
  3920. /*
  3921. * the next 3 aren't really counters, but were implemented
  3922. * as counters in older chips, so still get accessed as
  3923. * though they were counters from this code.
  3924. */
  3925. [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
  3926. [QIBPORTCNTR_PSSTART] = krp_psstart,
  3927. [QIBPORTCNTR_PSSTAT] = krp_psstat,
  3928. /* pseudo-counter, summed for all ports */
  3929. [QIBPORTCNTR_KHDROVFL] = 0xffff,
  3930. };
  3931. if (reg >= ARRAY_SIZE(xlator)) {
  3932. qib_devinfo(ppd->dd->pcidev,
  3933. "Unimplemented portcounter %u\n", reg);
  3934. goto done;
  3935. }
  3936. creg = xlator[reg] & _PORT_CNTR_IDXMASK;
  3937. /* handle non-counters and special cases first */
  3938. if (reg == QIBPORTCNTR_KHDROVFL) {
  3939. int i;
  3940. /* sum over all kernel contexts (skip if mini_init) */
  3941. for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
  3942. struct qib_ctxtdata *rcd = dd->rcd[i];
  3943. if (!rcd || rcd->ppd != ppd)
  3944. continue;
  3945. ret += read_7322_creg32(dd, cr_base_egrovfl + i);
  3946. }
  3947. goto done;
  3948. } else if (reg == QIBPORTCNTR_RXDROPPKT) {
  3949. /*
  3950. * Used as part of the synthesis of port_rcv_errors
  3951. * in the verbs code for IBTA counters. Not needed for 7322,
  3952. * because all the errors are already counted by other cntrs.
  3953. */
  3954. goto done;
  3955. } else if (reg == QIBPORTCNTR_PSINTERVAL ||
  3956. reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
  3957. /* were counters in older chips, now per-port kernel regs */
  3958. ret = qib_read_kreg_port(ppd, creg);
  3959. goto done;
  3960. }
  3961. /*
  3962. * Only fast increment counters are 64 bits; use 32 bit reads to
  3963. * avoid two independent reads when on Opteron.
  3964. */
  3965. if (xlator[reg] & _PORT_64BIT_FLAG)
  3966. ret = read_7322_creg_port(ppd, creg);
  3967. else
  3968. ret = read_7322_creg32_port(ppd, creg);
  3969. if (creg == crp_ibsymbolerr) {
  3970. if (ppd->cpspec->ibdeltainprog)
  3971. ret -= ret - ppd->cpspec->ibsymsnap;
  3972. ret -= ppd->cpspec->ibsymdelta;
  3973. } else if (creg == crp_iblinkerrrecov) {
  3974. if (ppd->cpspec->ibdeltainprog)
  3975. ret -= ret - ppd->cpspec->iblnkerrsnap;
  3976. ret -= ppd->cpspec->iblnkerrdelta;
  3977. } else if (creg == crp_errlink)
  3978. ret -= ppd->cpspec->ibmalfdelta;
  3979. else if (creg == crp_iblinkdown)
  3980. ret += ppd->cpspec->iblnkdowndelta;
  3981. done:
  3982. return ret;
  3983. }
  3984. /*
  3985. * Device counter names (not port-specific), one line per stat,
  3986. * single string. Used by utilities like ipathstats to print the stats
  3987. * in a way which works for different versions of drivers, without changing
  3988. * the utility. Names need to be 12 chars or less (w/o newline), for proper
  3989. * display by utility.
  3990. * Non-error counters are first.
  3991. * Start of "error" conters is indicated by a leading "E " on the first
  3992. * "error" counter, and doesn't count in label length.
  3993. * The EgrOvfl list needs to be last so we truncate them at the configured
  3994. * context count for the device.
  3995. * cntr7322indices contains the corresponding register indices.
  3996. */
  3997. static const char cntr7322names[] =
  3998. "Interrupts\n"
  3999. "HostBusStall\n"
  4000. "E RxTIDFull\n"
  4001. "RxTIDInvalid\n"
  4002. "RxTIDFloDrop\n" /* 7322 only */
  4003. "Ctxt0EgrOvfl\n"
  4004. "Ctxt1EgrOvfl\n"
  4005. "Ctxt2EgrOvfl\n"
  4006. "Ctxt3EgrOvfl\n"
  4007. "Ctxt4EgrOvfl\n"
  4008. "Ctxt5EgrOvfl\n"
  4009. "Ctxt6EgrOvfl\n"
  4010. "Ctxt7EgrOvfl\n"
  4011. "Ctxt8EgrOvfl\n"
  4012. "Ctxt9EgrOvfl\n"
  4013. "Ctx10EgrOvfl\n"
  4014. "Ctx11EgrOvfl\n"
  4015. "Ctx12EgrOvfl\n"
  4016. "Ctx13EgrOvfl\n"
  4017. "Ctx14EgrOvfl\n"
  4018. "Ctx15EgrOvfl\n"
  4019. "Ctx16EgrOvfl\n"
  4020. "Ctx17EgrOvfl\n"
  4021. ;
  4022. static const u32 cntr7322indices[] = {
  4023. cr_lbint | _PORT_64BIT_FLAG,
  4024. cr_lbstall | _PORT_64BIT_FLAG,
  4025. cr_tidfull,
  4026. cr_tidinvalid,
  4027. cr_rxtidflowdrop,
  4028. cr_base_egrovfl + 0,
  4029. cr_base_egrovfl + 1,
  4030. cr_base_egrovfl + 2,
  4031. cr_base_egrovfl + 3,
  4032. cr_base_egrovfl + 4,
  4033. cr_base_egrovfl + 5,
  4034. cr_base_egrovfl + 6,
  4035. cr_base_egrovfl + 7,
  4036. cr_base_egrovfl + 8,
  4037. cr_base_egrovfl + 9,
  4038. cr_base_egrovfl + 10,
  4039. cr_base_egrovfl + 11,
  4040. cr_base_egrovfl + 12,
  4041. cr_base_egrovfl + 13,
  4042. cr_base_egrovfl + 14,
  4043. cr_base_egrovfl + 15,
  4044. cr_base_egrovfl + 16,
  4045. cr_base_egrovfl + 17,
  4046. };
  4047. /*
  4048. * same as cntr7322names and cntr7322indices, but for port-specific counters.
  4049. * portcntr7322indices is somewhat complicated by some registers needing
  4050. * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
  4051. */
  4052. static const char portcntr7322names[] =
  4053. "TxPkt\n"
  4054. "TxFlowPkt\n"
  4055. "TxWords\n"
  4056. "RxPkt\n"
  4057. "RxFlowPkt\n"
  4058. "RxWords\n"
  4059. "TxFlowStall\n"
  4060. "TxDmaDesc\n" /* 7220 and 7322-only */
  4061. "E RxDlidFltr\n" /* 7220 and 7322-only */
  4062. "IBStatusChng\n"
  4063. "IBLinkDown\n"
  4064. "IBLnkRecov\n"
  4065. "IBRxLinkErr\n"
  4066. "IBSymbolErr\n"
  4067. "RxLLIErr\n"
  4068. "RxBadFormat\n"
  4069. "RxBadLen\n"
  4070. "RxBufOvrfl\n"
  4071. "RxEBP\n"
  4072. "RxFlowCtlErr\n"
  4073. "RxICRCerr\n"
  4074. "RxLPCRCerr\n"
  4075. "RxVCRCerr\n"
  4076. "RxInvalLen\n"
  4077. "RxInvalPKey\n"
  4078. "RxPktDropped\n"
  4079. "TxBadLength\n"
  4080. "TxDropped\n"
  4081. "TxInvalLen\n"
  4082. "TxUnderrun\n"
  4083. "TxUnsupVL\n"
  4084. "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
  4085. "RxVL15Drop\n"
  4086. "RxVlErr\n"
  4087. "XcessBufOvfl\n"
  4088. "RxQPBadCtxt\n" /* 7322-only from here down */
  4089. "TXBadHeader\n"
  4090. ;
  4091. static const u32 portcntr7322indices[] = {
  4092. QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
  4093. crp_pktsendflow,
  4094. QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
  4095. QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
  4096. crp_pktrcvflowctrl,
  4097. QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
  4098. QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
  4099. crp_txsdmadesc | _PORT_64BIT_FLAG,
  4100. crp_rxdlidfltr,
  4101. crp_ibstatuschange,
  4102. QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
  4103. QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
  4104. QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
  4105. QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
  4106. QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
  4107. QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
  4108. QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
  4109. QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
  4110. QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
  4111. crp_rcvflowctrlviol,
  4112. QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
  4113. QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
  4114. QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
  4115. QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
  4116. QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
  4117. QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
  4118. crp_txminmaxlenerr,
  4119. crp_txdroppedpkt,
  4120. crp_txlenerr,
  4121. crp_txunderrun,
  4122. crp_txunsupvl,
  4123. QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
  4124. QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
  4125. QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
  4126. QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
  4127. crp_rxqpinvalidctxt,
  4128. crp_txhdrerr,
  4129. };
  4130. /* do all the setup to make the counter reads efficient later */
  4131. static void init_7322_cntrnames(struct qib_devdata *dd)
  4132. {
  4133. int i, j = 0;
  4134. char *s;
  4135. for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
  4136. i++) {
  4137. /* we always have at least one counter before the egrovfl */
  4138. if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
  4139. j = 1;
  4140. s = strchr(s + 1, '\n');
  4141. if (s && j)
  4142. j++;
  4143. }
  4144. dd->cspec->ncntrs = i;
  4145. if (!s)
  4146. /* full list; size is without terminating null */
  4147. dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
  4148. else
  4149. dd->cspec->cntrnamelen = 1 + s - cntr7322names;
  4150. dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
  4151. * sizeof(u64), GFP_KERNEL);
  4152. if (!dd->cspec->cntrs)
  4153. qib_dev_err(dd, "Failed allocation for counters\n");
  4154. for (i = 0, s = (char *)portcntr7322names; s; i++)
  4155. s = strchr(s + 1, '\n');
  4156. dd->cspec->nportcntrs = i - 1;
  4157. dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
  4158. for (i = 0; i < dd->num_pports; ++i) {
  4159. dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
  4160. * sizeof(u64), GFP_KERNEL);
  4161. if (!dd->pport[i].cpspec->portcntrs)
  4162. qib_dev_err(dd, "Failed allocation for"
  4163. " portcounters\n");
  4164. }
  4165. }
  4166. static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
  4167. u64 **cntrp)
  4168. {
  4169. u32 ret;
  4170. if (namep) {
  4171. ret = dd->cspec->cntrnamelen;
  4172. if (pos >= ret)
  4173. ret = 0; /* final read after getting everything */
  4174. else
  4175. *namep = (char *) cntr7322names;
  4176. } else {
  4177. u64 *cntr = dd->cspec->cntrs;
  4178. int i;
  4179. ret = dd->cspec->ncntrs * sizeof(u64);
  4180. if (!cntr || pos >= ret) {
  4181. /* everything read, or couldn't get memory */
  4182. ret = 0;
  4183. goto done;
  4184. }
  4185. *cntrp = cntr;
  4186. for (i = 0; i < dd->cspec->ncntrs; i++)
  4187. if (cntr7322indices[i] & _PORT_64BIT_FLAG)
  4188. *cntr++ = read_7322_creg(dd,
  4189. cntr7322indices[i] &
  4190. _PORT_CNTR_IDXMASK);
  4191. else
  4192. *cntr++ = read_7322_creg32(dd,
  4193. cntr7322indices[i]);
  4194. }
  4195. done:
  4196. return ret;
  4197. }
  4198. static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
  4199. char **namep, u64 **cntrp)
  4200. {
  4201. u32 ret;
  4202. if (namep) {
  4203. ret = dd->cspec->portcntrnamelen;
  4204. if (pos >= ret)
  4205. ret = 0; /* final read after getting everything */
  4206. else
  4207. *namep = (char *)portcntr7322names;
  4208. } else {
  4209. struct qib_pportdata *ppd = &dd->pport[port];
  4210. u64 *cntr = ppd->cpspec->portcntrs;
  4211. int i;
  4212. ret = dd->cspec->nportcntrs * sizeof(u64);
  4213. if (!cntr || pos >= ret) {
  4214. /* everything read, or couldn't get memory */
  4215. ret = 0;
  4216. goto done;
  4217. }
  4218. *cntrp = cntr;
  4219. for (i = 0; i < dd->cspec->nportcntrs; i++) {
  4220. if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
  4221. *cntr++ = qib_portcntr_7322(ppd,
  4222. portcntr7322indices[i] &
  4223. _PORT_CNTR_IDXMASK);
  4224. else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
  4225. *cntr++ = read_7322_creg_port(ppd,
  4226. portcntr7322indices[i] &
  4227. _PORT_CNTR_IDXMASK);
  4228. else
  4229. *cntr++ = read_7322_creg32_port(ppd,
  4230. portcntr7322indices[i]);
  4231. }
  4232. }
  4233. done:
  4234. return ret;
  4235. }
  4236. /**
  4237. * qib_get_7322_faststats - get word counters from chip before they overflow
  4238. * @opaque - contains a pointer to the qlogic_ib device qib_devdata
  4239. *
  4240. * VESTIGIAL IBA7322 has no "small fast counters", so the only
  4241. * real purpose of this function is to maintain the notion of
  4242. * "active time", which in turn is only logged into the eeprom,
  4243. * which we don;t have, yet, for 7322-based boards.
  4244. *
  4245. * called from add_timer
  4246. */
  4247. static void qib_get_7322_faststats(unsigned long opaque)
  4248. {
  4249. struct qib_devdata *dd = (struct qib_devdata *) opaque;
  4250. struct qib_pportdata *ppd;
  4251. unsigned long flags;
  4252. u64 traffic_wds;
  4253. int pidx;
  4254. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  4255. ppd = dd->pport + pidx;
  4256. /*
  4257. * If port isn't enabled or not operational ports, or
  4258. * diags is running (can cause memory diags to fail)
  4259. * skip this port this time.
  4260. */
  4261. if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
  4262. || dd->diag_client)
  4263. continue;
  4264. /*
  4265. * Maintain an activity timer, based on traffic
  4266. * exceeding a threshold, so we need to check the word-counts
  4267. * even if they are 64-bit.
  4268. */
  4269. traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
  4270. qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
  4271. spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
  4272. traffic_wds -= ppd->dd->traffic_wds;
  4273. ppd->dd->traffic_wds += traffic_wds;
  4274. if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
  4275. atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
  4276. spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
  4277. if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
  4278. QIB_IB_QDR) &&
  4279. (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
  4280. QIBL_LINKACTIVE)) &&
  4281. ppd->cpspec->qdr_dfe_time &&
  4282. time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
  4283. ppd->cpspec->qdr_dfe_on = 0;
  4284. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  4285. ppd->dd->cspec->r1 ?
  4286. QDR_STATIC_ADAPT_INIT_R1 :
  4287. QDR_STATIC_ADAPT_INIT);
  4288. force_h1(ppd);
  4289. }
  4290. }
  4291. mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
  4292. }
  4293. /*
  4294. * If we were using MSIx, try to fallback to INTx.
  4295. */
  4296. static int qib_7322_intr_fallback(struct qib_devdata *dd)
  4297. {
  4298. if (!dd->cspec->num_msix_entries)
  4299. return 0; /* already using INTx */
  4300. qib_devinfo(dd->pcidev, "MSIx interrupt not detected,"
  4301. " trying INTx interrupts\n");
  4302. qib_7322_nomsix(dd);
  4303. qib_enable_intx(dd->pcidev);
  4304. qib_setup_7322_interrupt(dd, 0);
  4305. return 1;
  4306. }
  4307. /*
  4308. * Reset the XGXS (between serdes and IBC). Slightly less intrusive
  4309. * than resetting the IBC or external link state, and useful in some
  4310. * cases to cause some retraining. To do this right, we reset IBC
  4311. * as well, then return to previous state (which may be still in reset)
  4312. * NOTE: some callers of this "know" this writes the current value
  4313. * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
  4314. * check all callers.
  4315. */
  4316. static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
  4317. {
  4318. u64 val;
  4319. struct qib_devdata *dd = ppd->dd;
  4320. const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
  4321. SYM_MASK(IBPCSConfig_0, xcv_treset) |
  4322. SYM_MASK(IBPCSConfig_0, tx_rx_reset);
  4323. val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
  4324. qib_write_kreg(dd, kr_hwerrmask,
  4325. dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
  4326. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  4327. ppd->cpspec->ibcctrl_a &
  4328. ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
  4329. qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
  4330. qib_read_kreg32(dd, kr_scratch);
  4331. qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
  4332. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  4333. qib_write_kreg(dd, kr_scratch, 0ULL);
  4334. qib_write_kreg(dd, kr_hwerrclear,
  4335. SYM_MASK(HwErrClear, statusValidNoEopClear));
  4336. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  4337. }
  4338. /*
  4339. * This code for non-IBTA-compliant IB speed negotiation is only known to
  4340. * work for the SDR to DDR transition, and only between an HCA and a switch
  4341. * with recent firmware. It is based on observed heuristics, rather than
  4342. * actual knowledge of the non-compliant speed negotiation.
  4343. * It has a number of hard-coded fields, since the hope is to rewrite this
  4344. * when a spec is available on how the negoation is intended to work.
  4345. */
  4346. static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
  4347. u32 dcnt, u32 *data)
  4348. {
  4349. int i;
  4350. u64 pbc;
  4351. u32 __iomem *piobuf;
  4352. u32 pnum, control, len;
  4353. struct qib_devdata *dd = ppd->dd;
  4354. i = 0;
  4355. len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
  4356. control = qib_7322_setpbc_control(ppd, len, 0, 15);
  4357. pbc = ((u64) control << 32) | len;
  4358. while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
  4359. if (i++ > 15)
  4360. return;
  4361. udelay(2);
  4362. }
  4363. /* disable header check on this packet, since it can't be valid */
  4364. dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
  4365. writeq(pbc, piobuf);
  4366. qib_flush_wc();
  4367. qib_pio_copy(piobuf + 2, hdr, 7);
  4368. qib_pio_copy(piobuf + 9, data, dcnt);
  4369. if (dd->flags & QIB_USE_SPCL_TRIG) {
  4370. u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
  4371. qib_flush_wc();
  4372. __raw_writel(0xaebecede, piobuf + spcl_off);
  4373. }
  4374. qib_flush_wc();
  4375. qib_sendbuf_done(dd, pnum);
  4376. /* and re-enable hdr check */
  4377. dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
  4378. }
  4379. /*
  4380. * _start packet gets sent twice at start, _done gets sent twice at end
  4381. */
  4382. static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
  4383. {
  4384. struct qib_devdata *dd = ppd->dd;
  4385. static u32 swapped;
  4386. u32 dw, i, hcnt, dcnt, *data;
  4387. static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
  4388. static u32 madpayload_start[0x40] = {
  4389. 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
  4390. 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
  4391. 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
  4392. };
  4393. static u32 madpayload_done[0x40] = {
  4394. 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
  4395. 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
  4396. 0x40000001, 0x1388, 0x15e, /* rest 0's */
  4397. };
  4398. dcnt = ARRAY_SIZE(madpayload_start);
  4399. hcnt = ARRAY_SIZE(hdr);
  4400. if (!swapped) {
  4401. /* for maintainability, do it at runtime */
  4402. for (i = 0; i < hcnt; i++) {
  4403. dw = (__force u32) cpu_to_be32(hdr[i]);
  4404. hdr[i] = dw;
  4405. }
  4406. for (i = 0; i < dcnt; i++) {
  4407. dw = (__force u32) cpu_to_be32(madpayload_start[i]);
  4408. madpayload_start[i] = dw;
  4409. dw = (__force u32) cpu_to_be32(madpayload_done[i]);
  4410. madpayload_done[i] = dw;
  4411. }
  4412. swapped = 1;
  4413. }
  4414. data = which ? madpayload_done : madpayload_start;
  4415. autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
  4416. qib_read_kreg64(dd, kr_scratch);
  4417. udelay(2);
  4418. autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
  4419. qib_read_kreg64(dd, kr_scratch);
  4420. udelay(2);
  4421. }
  4422. /*
  4423. * Do the absolute minimum to cause an IB speed change, and make it
  4424. * ready, but don't actually trigger the change. The caller will
  4425. * do that when ready (if link is in Polling training state, it will
  4426. * happen immediately, otherwise when link next goes down)
  4427. *
  4428. * This routine should only be used as part of the DDR autonegotation
  4429. * code for devices that are not compliant with IB 1.2 (or code that
  4430. * fixes things up for same).
  4431. *
  4432. * When link has gone down, and autoneg enabled, or autoneg has
  4433. * failed and we give up until next time we set both speeds, and
  4434. * then we want IBTA enabled as well as "use max enabled speed.
  4435. */
  4436. static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
  4437. {
  4438. u64 newctrlb;
  4439. newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
  4440. IBA7322_IBC_IBTA_1_2_MASK |
  4441. IBA7322_IBC_MAX_SPEED_MASK);
  4442. if (speed & (speed - 1)) /* multiple speeds */
  4443. newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
  4444. IBA7322_IBC_IBTA_1_2_MASK |
  4445. IBA7322_IBC_MAX_SPEED_MASK;
  4446. else
  4447. newctrlb |= speed == QIB_IB_QDR ?
  4448. IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
  4449. ((speed == QIB_IB_DDR ?
  4450. IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
  4451. if (newctrlb == ppd->cpspec->ibcctrl_b)
  4452. return;
  4453. ppd->cpspec->ibcctrl_b = newctrlb;
  4454. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  4455. qib_write_kreg(ppd->dd, kr_scratch, 0);
  4456. }
  4457. /*
  4458. * This routine is only used when we are not talking to another
  4459. * IB 1.2-compliant device that we think can do DDR.
  4460. * (This includes all existing switch chips as of Oct 2007.)
  4461. * 1.2-compliant devices go directly to DDR prior to reaching INIT
  4462. */
  4463. static void try_7322_autoneg(struct qib_pportdata *ppd)
  4464. {
  4465. unsigned long flags;
  4466. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4467. ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
  4468. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4469. qib_autoneg_7322_send(ppd, 0);
  4470. set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
  4471. qib_7322_mini_pcs_reset(ppd);
  4472. /* 2 msec is minimum length of a poll cycle */
  4473. queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
  4474. msecs_to_jiffies(2));
  4475. }
  4476. /*
  4477. * Handle the empirically determined mechanism for auto-negotiation
  4478. * of DDR speed with switches.
  4479. */
  4480. static void autoneg_7322_work(struct work_struct *work)
  4481. {
  4482. struct qib_pportdata *ppd;
  4483. struct qib_devdata *dd;
  4484. u64 startms;
  4485. u32 i;
  4486. unsigned long flags;
  4487. ppd = container_of(work, struct qib_chippport_specific,
  4488. autoneg_work.work)->ppd;
  4489. dd = ppd->dd;
  4490. startms = jiffies_to_msecs(jiffies);
  4491. /*
  4492. * Busy wait for this first part, it should be at most a
  4493. * few hundred usec, since we scheduled ourselves for 2msec.
  4494. */
  4495. for (i = 0; i < 25; i++) {
  4496. if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
  4497. == IB_7322_LT_STATE_POLLQUIET) {
  4498. qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
  4499. break;
  4500. }
  4501. udelay(100);
  4502. }
  4503. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
  4504. goto done; /* we got there early or told to stop */
  4505. /* we expect this to timeout */
  4506. if (wait_event_timeout(ppd->cpspec->autoneg_wait,
  4507. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4508. msecs_to_jiffies(90)))
  4509. goto done;
  4510. qib_7322_mini_pcs_reset(ppd);
  4511. /* we expect this to timeout */
  4512. if (wait_event_timeout(ppd->cpspec->autoneg_wait,
  4513. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4514. msecs_to_jiffies(1700)))
  4515. goto done;
  4516. qib_7322_mini_pcs_reset(ppd);
  4517. set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
  4518. /*
  4519. * Wait up to 250 msec for link to train and get to INIT at DDR;
  4520. * this should terminate early.
  4521. */
  4522. wait_event_timeout(ppd->cpspec->autoneg_wait,
  4523. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4524. msecs_to_jiffies(250));
  4525. done:
  4526. if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
  4527. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4528. ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
  4529. if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
  4530. ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
  4531. ppd->cpspec->autoneg_tries = 0;
  4532. }
  4533. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4534. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  4535. }
  4536. }
  4537. /*
  4538. * This routine is used to request IPG set in the QLogic switch.
  4539. * Only called if r1.
  4540. */
  4541. static void try_7322_ipg(struct qib_pportdata *ppd)
  4542. {
  4543. struct qib_ibport *ibp = &ppd->ibport_data;
  4544. struct ib_mad_send_buf *send_buf;
  4545. struct ib_mad_agent *agent;
  4546. struct ib_smp *smp;
  4547. unsigned delay;
  4548. int ret;
  4549. agent = ibp->send_agent;
  4550. if (!agent)
  4551. goto retry;
  4552. send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
  4553. IB_MGMT_MAD_DATA, GFP_ATOMIC);
  4554. if (IS_ERR(send_buf))
  4555. goto retry;
  4556. if (!ibp->smi_ah) {
  4557. struct ib_ah_attr attr;
  4558. struct ib_ah *ah;
  4559. memset(&attr, 0, sizeof attr);
  4560. attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE);
  4561. attr.port_num = ppd->port;
  4562. ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
  4563. if (IS_ERR(ah))
  4564. ret = -EINVAL;
  4565. else {
  4566. send_buf->ah = ah;
  4567. ibp->smi_ah = to_iah(ah);
  4568. ret = 0;
  4569. }
  4570. } else {
  4571. send_buf->ah = &ibp->smi_ah->ibah;
  4572. ret = 0;
  4573. }
  4574. smp = send_buf->mad;
  4575. smp->base_version = IB_MGMT_BASE_VERSION;
  4576. smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
  4577. smp->class_version = 1;
  4578. smp->method = IB_MGMT_METHOD_SEND;
  4579. smp->hop_cnt = 1;
  4580. smp->attr_id = QIB_VENDOR_IPG;
  4581. smp->attr_mod = 0;
  4582. if (!ret)
  4583. ret = ib_post_send_mad(send_buf, NULL);
  4584. if (ret)
  4585. ib_free_send_mad(send_buf);
  4586. retry:
  4587. delay = 2 << ppd->cpspec->ipg_tries;
  4588. queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
  4589. msecs_to_jiffies(delay));
  4590. }
  4591. /*
  4592. * Timeout handler for setting IPG.
  4593. * Only called if r1.
  4594. */
  4595. static void ipg_7322_work(struct work_struct *work)
  4596. {
  4597. struct qib_pportdata *ppd;
  4598. ppd = container_of(work, struct qib_chippport_specific,
  4599. ipg_work.work)->ppd;
  4600. if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
  4601. && ++ppd->cpspec->ipg_tries <= 10)
  4602. try_7322_ipg(ppd);
  4603. }
  4604. static u32 qib_7322_iblink_state(u64 ibcs)
  4605. {
  4606. u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
  4607. switch (state) {
  4608. case IB_7322_L_STATE_INIT:
  4609. state = IB_PORT_INIT;
  4610. break;
  4611. case IB_7322_L_STATE_ARM:
  4612. state = IB_PORT_ARMED;
  4613. break;
  4614. case IB_7322_L_STATE_ACTIVE:
  4615. /* fall through */
  4616. case IB_7322_L_STATE_ACT_DEFER:
  4617. state = IB_PORT_ACTIVE;
  4618. break;
  4619. default: /* fall through */
  4620. case IB_7322_L_STATE_DOWN:
  4621. state = IB_PORT_DOWN;
  4622. break;
  4623. }
  4624. return state;
  4625. }
  4626. /* returns the IBTA port state, rather than the IBC link training state */
  4627. static u8 qib_7322_phys_portstate(u64 ibcs)
  4628. {
  4629. u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
  4630. return qib_7322_physportstate[state];
  4631. }
  4632. static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
  4633. {
  4634. int ret = 0, symadj = 0;
  4635. unsigned long flags;
  4636. int mult;
  4637. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4638. ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
  4639. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4640. /* Update our picture of width and speed from chip */
  4641. if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
  4642. ppd->link_speed_active = QIB_IB_QDR;
  4643. mult = 4;
  4644. } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
  4645. ppd->link_speed_active = QIB_IB_DDR;
  4646. mult = 2;
  4647. } else {
  4648. ppd->link_speed_active = QIB_IB_SDR;
  4649. mult = 1;
  4650. }
  4651. if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
  4652. ppd->link_width_active = IB_WIDTH_4X;
  4653. mult *= 4;
  4654. } else
  4655. ppd->link_width_active = IB_WIDTH_1X;
  4656. ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
  4657. if (!ibup) {
  4658. u64 clr;
  4659. /* Link went down. */
  4660. /* do IPG MAD again after linkdown, even if last time failed */
  4661. ppd->cpspec->ipg_tries = 0;
  4662. clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
  4663. (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
  4664. SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
  4665. if (clr)
  4666. qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
  4667. if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
  4668. QIBL_IB_AUTONEG_INPROG)))
  4669. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  4670. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  4671. struct qib_qsfp_data *qd =
  4672. &ppd->cpspec->qsfp_data;
  4673. /* unlock the Tx settings, speed may change */
  4674. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  4675. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  4676. reset_tx_deemphasis_override));
  4677. qib_cancel_sends(ppd);
  4678. /* on link down, ensure sane pcs state */
  4679. qib_7322_mini_pcs_reset(ppd);
  4680. /* schedule the qsfp refresh which should turn the link
  4681. off */
  4682. if (ppd->dd->flags & QIB_HAS_QSFP) {
  4683. qd->t_insert = jiffies;
  4684. queue_work(ib_wq, &qd->work);
  4685. }
  4686. spin_lock_irqsave(&ppd->sdma_lock, flags);
  4687. if (__qib_sdma_running(ppd))
  4688. __qib_sdma_process_event(ppd,
  4689. qib_sdma_event_e70_go_idle);
  4690. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  4691. }
  4692. clr = read_7322_creg32_port(ppd, crp_iblinkdown);
  4693. if (clr == ppd->cpspec->iblnkdownsnap)
  4694. ppd->cpspec->iblnkdowndelta++;
  4695. } else {
  4696. if (qib_compat_ddr_negotiate &&
  4697. !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
  4698. QIBL_IB_AUTONEG_INPROG)) &&
  4699. ppd->link_speed_active == QIB_IB_SDR &&
  4700. (ppd->link_speed_enabled & QIB_IB_DDR)
  4701. && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
  4702. /* we are SDR, and auto-negotiation enabled */
  4703. ++ppd->cpspec->autoneg_tries;
  4704. if (!ppd->cpspec->ibdeltainprog) {
  4705. ppd->cpspec->ibdeltainprog = 1;
  4706. ppd->cpspec->ibsymdelta +=
  4707. read_7322_creg32_port(ppd,
  4708. crp_ibsymbolerr) -
  4709. ppd->cpspec->ibsymsnap;
  4710. ppd->cpspec->iblnkerrdelta +=
  4711. read_7322_creg32_port(ppd,
  4712. crp_iblinkerrrecov) -
  4713. ppd->cpspec->iblnkerrsnap;
  4714. }
  4715. try_7322_autoneg(ppd);
  4716. ret = 1; /* no other IB status change processing */
  4717. } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
  4718. ppd->link_speed_active == QIB_IB_SDR) {
  4719. qib_autoneg_7322_send(ppd, 1);
  4720. set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
  4721. qib_7322_mini_pcs_reset(ppd);
  4722. udelay(2);
  4723. ret = 1; /* no other IB status change processing */
  4724. } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
  4725. (ppd->link_speed_active & QIB_IB_DDR)) {
  4726. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4727. ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
  4728. QIBL_IB_AUTONEG_FAILED);
  4729. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4730. ppd->cpspec->autoneg_tries = 0;
  4731. /* re-enable SDR, for next link down */
  4732. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  4733. wake_up(&ppd->cpspec->autoneg_wait);
  4734. symadj = 1;
  4735. } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
  4736. /*
  4737. * Clear autoneg failure flag, and do setup
  4738. * so we'll try next time link goes down and
  4739. * back to INIT (possibly connected to a
  4740. * different device).
  4741. */
  4742. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4743. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  4744. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4745. ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
  4746. symadj = 1;
  4747. }
  4748. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  4749. symadj = 1;
  4750. if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
  4751. try_7322_ipg(ppd);
  4752. if (!ppd->cpspec->recovery_init)
  4753. setup_7322_link_recovery(ppd, 0);
  4754. ppd->cpspec->qdr_dfe_time = jiffies +
  4755. msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
  4756. }
  4757. ppd->cpspec->ibmalfusesnap = 0;
  4758. ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
  4759. crp_errlink);
  4760. }
  4761. if (symadj) {
  4762. ppd->cpspec->iblnkdownsnap =
  4763. read_7322_creg32_port(ppd, crp_iblinkdown);
  4764. if (ppd->cpspec->ibdeltainprog) {
  4765. ppd->cpspec->ibdeltainprog = 0;
  4766. ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
  4767. crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
  4768. ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
  4769. crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
  4770. }
  4771. } else if (!ibup && qib_compat_ddr_negotiate &&
  4772. !ppd->cpspec->ibdeltainprog &&
  4773. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  4774. ppd->cpspec->ibdeltainprog = 1;
  4775. ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
  4776. crp_ibsymbolerr);
  4777. ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
  4778. crp_iblinkerrrecov);
  4779. }
  4780. if (!ret)
  4781. qib_setup_7322_setextled(ppd, ibup);
  4782. return ret;
  4783. }
  4784. /*
  4785. * Does read/modify/write to appropriate registers to
  4786. * set output and direction bits selected by mask.
  4787. * these are in their canonical postions (e.g. lsb of
  4788. * dir will end up in D48 of extctrl on existing chips).
  4789. * returns contents of GP Inputs.
  4790. */
  4791. static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
  4792. {
  4793. u64 read_val, new_out;
  4794. unsigned long flags;
  4795. if (mask) {
  4796. /* some bits being written, lock access to GPIO */
  4797. dir &= mask;
  4798. out &= mask;
  4799. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  4800. dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
  4801. dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
  4802. new_out = (dd->cspec->gpio_out & ~mask) | out;
  4803. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  4804. qib_write_kreg(dd, kr_gpio_out, new_out);
  4805. dd->cspec->gpio_out = new_out;
  4806. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  4807. }
  4808. /*
  4809. * It is unlikely that a read at this time would get valid
  4810. * data on a pin whose direction line was set in the same
  4811. * call to this function. We include the read here because
  4812. * that allows us to potentially combine a change on one pin with
  4813. * a read on another, and because the old code did something like
  4814. * this.
  4815. */
  4816. read_val = qib_read_kreg64(dd, kr_extstatus);
  4817. return SYM_FIELD(read_val, EXTStatus, GPIOIn);
  4818. }
  4819. /* Enable writes to config EEPROM, if possible. Returns previous state */
  4820. static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
  4821. {
  4822. int prev_wen;
  4823. u32 mask;
  4824. mask = 1 << QIB_EEPROM_WEN_NUM;
  4825. prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
  4826. gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
  4827. return prev_wen & 1;
  4828. }
  4829. /*
  4830. * Read fundamental info we need to use the chip. These are
  4831. * the registers that describe chip capabilities, and are
  4832. * saved in shadow registers.
  4833. */
  4834. static void get_7322_chip_params(struct qib_devdata *dd)
  4835. {
  4836. u64 val;
  4837. u32 piobufs;
  4838. int mtu;
  4839. dd->palign = qib_read_kreg32(dd, kr_pagealign);
  4840. dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
  4841. dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
  4842. dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
  4843. dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
  4844. dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
  4845. dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
  4846. val = qib_read_kreg64(dd, kr_sendpiobufcnt);
  4847. dd->piobcnt2k = val & ~0U;
  4848. dd->piobcnt4k = val >> 32;
  4849. val = qib_read_kreg64(dd, kr_sendpiosize);
  4850. dd->piosize2k = val & ~0U;
  4851. dd->piosize4k = val >> 32;
  4852. mtu = ib_mtu_enum_to_int(qib_ibmtu);
  4853. if (mtu == -1)
  4854. mtu = QIB_DEFAULT_MTU;
  4855. dd->pport[0].ibmtu = (u32)mtu;
  4856. dd->pport[1].ibmtu = (u32)mtu;
  4857. /* these may be adjusted in init_chip_wc_pat() */
  4858. dd->pio2kbase = (u32 __iomem *)
  4859. ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
  4860. dd->pio4kbase = (u32 __iomem *)
  4861. ((char __iomem *) dd->kregbase +
  4862. (dd->piobufbase >> 32));
  4863. /*
  4864. * 4K buffers take 2 pages; we use roundup just to be
  4865. * paranoid; we calculate it once here, rather than on
  4866. * ever buf allocate
  4867. */
  4868. dd->align4k = ALIGN(dd->piosize4k, dd->palign);
  4869. piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
  4870. dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
  4871. (sizeof(u64) * BITS_PER_BYTE / 2);
  4872. }
  4873. /*
  4874. * The chip base addresses in cspec and cpspec have to be set
  4875. * after possible init_chip_wc_pat(), rather than in
  4876. * get_7322_chip_params(), so split out as separate function
  4877. */
  4878. static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
  4879. {
  4880. u32 cregbase;
  4881. cregbase = qib_read_kreg32(dd, kr_counterregbase);
  4882. dd->cspec->cregbase = (u64 __iomem *)(cregbase +
  4883. (char __iomem *)dd->kregbase);
  4884. dd->egrtidbase = (u64 __iomem *)
  4885. ((char __iomem *) dd->kregbase + dd->rcvegrbase);
  4886. /* port registers are defined as relative to base of chip */
  4887. dd->pport[0].cpspec->kpregbase =
  4888. (u64 __iomem *)((char __iomem *)dd->kregbase);
  4889. dd->pport[1].cpspec->kpregbase =
  4890. (u64 __iomem *)(dd->palign +
  4891. (char __iomem *)dd->kregbase);
  4892. dd->pport[0].cpspec->cpregbase =
  4893. (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
  4894. kr_counterregbase) + (char __iomem *)dd->kregbase);
  4895. dd->pport[1].cpspec->cpregbase =
  4896. (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
  4897. kr_counterregbase) + (char __iomem *)dd->kregbase);
  4898. }
  4899. /*
  4900. * This is a fairly special-purpose observer, so we only support
  4901. * the port-specific parts of SendCtrl
  4902. */
  4903. #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
  4904. SYM_MASK(SendCtrl_0, SDmaEnable) | \
  4905. SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
  4906. SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
  4907. SYM_MASK(SendCtrl_0, SDmaHalt) | \
  4908. SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
  4909. SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
  4910. static int sendctrl_hook(struct qib_devdata *dd,
  4911. const struct diag_observer *op, u32 offs,
  4912. u64 *data, u64 mask, int only_32)
  4913. {
  4914. unsigned long flags;
  4915. unsigned idx;
  4916. unsigned pidx;
  4917. struct qib_pportdata *ppd = NULL;
  4918. u64 local_data, all_bits;
  4919. /*
  4920. * The fixed correspondence between Physical ports and pports is
  4921. * severed. We need to hunt for the ppd that corresponds
  4922. * to the offset we got. And we have to do that without admitting
  4923. * we know the stride, apparently.
  4924. */
  4925. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  4926. u64 __iomem *psptr;
  4927. u32 psoffs;
  4928. ppd = dd->pport + pidx;
  4929. if (!ppd->cpspec->kpregbase)
  4930. continue;
  4931. psptr = ppd->cpspec->kpregbase + krp_sendctrl;
  4932. psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
  4933. if (psoffs == offs)
  4934. break;
  4935. }
  4936. /* If pport is not being managed by driver, just avoid shadows. */
  4937. if (pidx >= dd->num_pports)
  4938. ppd = NULL;
  4939. /* In any case, "idx" is flat index in kreg space */
  4940. idx = offs / sizeof(u64);
  4941. all_bits = ~0ULL;
  4942. if (only_32)
  4943. all_bits >>= 32;
  4944. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  4945. if (!ppd || (mask & all_bits) != all_bits) {
  4946. /*
  4947. * At least some mask bits are zero, so we need
  4948. * to read. The judgement call is whether from
  4949. * reg or shadow. First-cut: read reg, and complain
  4950. * if any bits which should be shadowed are different
  4951. * from their shadowed value.
  4952. */
  4953. if (only_32)
  4954. local_data = (u64)qib_read_kreg32(dd, idx);
  4955. else
  4956. local_data = qib_read_kreg64(dd, idx);
  4957. *data = (local_data & ~mask) | (*data & mask);
  4958. }
  4959. if (mask) {
  4960. /*
  4961. * At least some mask bits are one, so we need
  4962. * to write, but only shadow some bits.
  4963. */
  4964. u64 sval, tval; /* Shadowed, transient */
  4965. /*
  4966. * New shadow val is bits we don't want to touch,
  4967. * ORed with bits we do, that are intended for shadow.
  4968. */
  4969. if (ppd) {
  4970. sval = ppd->p_sendctrl & ~mask;
  4971. sval |= *data & SENDCTRL_SHADOWED & mask;
  4972. ppd->p_sendctrl = sval;
  4973. } else
  4974. sval = *data & SENDCTRL_SHADOWED & mask;
  4975. tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
  4976. qib_write_kreg(dd, idx, tval);
  4977. qib_write_kreg(dd, kr_scratch, 0Ull);
  4978. }
  4979. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  4980. return only_32 ? 4 : 8;
  4981. }
  4982. static const struct diag_observer sendctrl_0_observer = {
  4983. sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
  4984. KREG_IDX(SendCtrl_0) * sizeof(u64)
  4985. };
  4986. static const struct diag_observer sendctrl_1_observer = {
  4987. sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
  4988. KREG_IDX(SendCtrl_1) * sizeof(u64)
  4989. };
  4990. static ushort sdma_fetch_prio = 8;
  4991. module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
  4992. MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
  4993. /* Besides logging QSFP events, we set appropriate TxDDS values */
  4994. static void init_txdds_table(struct qib_pportdata *ppd, int override);
  4995. static void qsfp_7322_event(struct work_struct *work)
  4996. {
  4997. struct qib_qsfp_data *qd;
  4998. struct qib_pportdata *ppd;
  4999. unsigned long pwrup;
  5000. unsigned long flags;
  5001. int ret;
  5002. u32 le2;
  5003. qd = container_of(work, struct qib_qsfp_data, work);
  5004. ppd = qd->ppd;
  5005. pwrup = qd->t_insert +
  5006. msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
  5007. /* Delay for 20 msecs to allow ModPrs resistor to setup */
  5008. mdelay(QSFP_MODPRS_LAG_MSEC);
  5009. if (!qib_qsfp_mod_present(ppd)) {
  5010. ppd->cpspec->qsfp_data.modpresent = 0;
  5011. /* Set the physical link to disabled */
  5012. qib_set_ib_7322_lstate(ppd, 0,
  5013. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  5014. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5015. ppd->lflags &= ~QIBL_LINKV;
  5016. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5017. } else {
  5018. /*
  5019. * Some QSFP's not only do not respond until the full power-up
  5020. * time, but may behave badly if we try. So hold off responding
  5021. * to insertion.
  5022. */
  5023. while (1) {
  5024. if (time_is_before_jiffies(pwrup))
  5025. break;
  5026. msleep(20);
  5027. }
  5028. ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
  5029. /*
  5030. * Need to change LE2 back to defaults if we couldn't
  5031. * read the cable type (to handle cable swaps), so do this
  5032. * even on failure to read cable information. We don't
  5033. * get here for QME, so IS_QME check not needed here.
  5034. */
  5035. if (!ret && !ppd->dd->cspec->r1) {
  5036. if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
  5037. le2 = LE2_QME;
  5038. else if (qd->cache.atten[1] >= qib_long_atten &&
  5039. QSFP_IS_CU(qd->cache.tech))
  5040. le2 = LE2_5m;
  5041. else
  5042. le2 = LE2_DEFAULT;
  5043. } else
  5044. le2 = LE2_DEFAULT;
  5045. ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
  5046. /*
  5047. * We always change parameteters, since we can choose
  5048. * values for cables without eeproms, and the cable may have
  5049. * changed from a cable with full or partial eeprom content
  5050. * to one with partial or no content.
  5051. */
  5052. init_txdds_table(ppd, 0);
  5053. /* The physical link is being re-enabled only when the
  5054. * previous state was DISABLED and the VALID bit is not
  5055. * set. This should only happen when the cable has been
  5056. * physically pulled. */
  5057. if (!ppd->cpspec->qsfp_data.modpresent &&
  5058. (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
  5059. ppd->cpspec->qsfp_data.modpresent = 1;
  5060. qib_set_ib_7322_lstate(ppd, 0,
  5061. QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
  5062. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5063. ppd->lflags |= QIBL_LINKV;
  5064. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5065. }
  5066. }
  5067. }
  5068. /*
  5069. * There is little we can do but complain to the user if QSFP
  5070. * initialization fails.
  5071. */
  5072. static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
  5073. {
  5074. unsigned long flags;
  5075. struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
  5076. struct qib_devdata *dd = ppd->dd;
  5077. u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
  5078. mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
  5079. qd->ppd = ppd;
  5080. qib_qsfp_init(qd, qsfp_7322_event);
  5081. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  5082. dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
  5083. dd->cspec->gpio_mask |= mod_prs_bit;
  5084. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  5085. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  5086. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  5087. }
  5088. /*
  5089. * called at device initialization time, and also if the txselect
  5090. * module parameter is changed. This is used for cables that don't
  5091. * have valid QSFP EEPROMs (not present, or attenuation is zero).
  5092. * We initialize to the default, then if there is a specific
  5093. * unit,port match, we use that (and set it immediately, for the
  5094. * current speed, if the link is at INIT or better).
  5095. * String format is "default# unit#,port#=# ... u,p=#", separators must
  5096. * be a SPACE character. A newline terminates. The u,p=# tuples may
  5097. * optionally have "u,p=#,#", where the final # is the H1 value
  5098. * The last specific match is used (actually, all are used, but last
  5099. * one is the one that winds up set); if none at all, fall back on default.
  5100. */
  5101. static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
  5102. {
  5103. char *nxt, *str;
  5104. u32 pidx, unit, port, deflt, h1;
  5105. unsigned long val;
  5106. int any = 0, seth1;
  5107. int txdds_size;
  5108. str = txselect_list;
  5109. /* default number is validated in setup_txselect() */
  5110. deflt = simple_strtoul(str, &nxt, 0);
  5111. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  5112. dd->pport[pidx].cpspec->no_eep = deflt;
  5113. txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
  5114. if (IS_QME(dd) || IS_QMH(dd))
  5115. txdds_size += TXDDS_MFG_SZ;
  5116. while (*nxt && nxt[1]) {
  5117. str = ++nxt;
  5118. unit = simple_strtoul(str, &nxt, 0);
  5119. if (nxt == str || !*nxt || *nxt != ',') {
  5120. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5121. ;
  5122. continue;
  5123. }
  5124. str = ++nxt;
  5125. port = simple_strtoul(str, &nxt, 0);
  5126. if (nxt == str || *nxt != '=') {
  5127. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5128. ;
  5129. continue;
  5130. }
  5131. str = ++nxt;
  5132. val = simple_strtoul(str, &nxt, 0);
  5133. if (nxt == str) {
  5134. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5135. ;
  5136. continue;
  5137. }
  5138. if (val >= txdds_size)
  5139. continue;
  5140. seth1 = 0;
  5141. h1 = 0; /* gcc thinks it might be used uninitted */
  5142. if (*nxt == ',' && nxt[1]) {
  5143. str = ++nxt;
  5144. h1 = (u32)simple_strtoul(str, &nxt, 0);
  5145. if (nxt == str)
  5146. while (*nxt && *nxt++ != ' ') /* skip */
  5147. ;
  5148. else
  5149. seth1 = 1;
  5150. }
  5151. for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
  5152. ++pidx) {
  5153. struct qib_pportdata *ppd = &dd->pport[pidx];
  5154. if (ppd->port != port || !ppd->link_speed_supported)
  5155. continue;
  5156. ppd->cpspec->no_eep = val;
  5157. if (seth1)
  5158. ppd->cpspec->h1_val = h1;
  5159. /* now change the IBC and serdes, overriding generic */
  5160. init_txdds_table(ppd, 1);
  5161. /* Re-enable the physical state machine on mezz boards
  5162. * now that the correct settings have been set.
  5163. * QSFP boards are handles by the QSFP event handler */
  5164. if (IS_QMH(dd) || IS_QME(dd))
  5165. qib_set_ib_7322_lstate(ppd, 0,
  5166. QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
  5167. any++;
  5168. }
  5169. if (*nxt == '\n')
  5170. break; /* done */
  5171. }
  5172. if (change && !any) {
  5173. /* no specific setting, use the default.
  5174. * Change the IBC and serdes, but since it's
  5175. * general, don't override specific settings.
  5176. */
  5177. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  5178. if (dd->pport[pidx].link_speed_supported)
  5179. init_txdds_table(&dd->pport[pidx], 0);
  5180. }
  5181. }
  5182. /* handle the txselect parameter changing */
  5183. static int setup_txselect(const char *str, struct kernel_param *kp)
  5184. {
  5185. struct qib_devdata *dd;
  5186. unsigned long val;
  5187. char *n;
  5188. if (strlen(str) >= MAX_ATTEN_LEN) {
  5189. printk(KERN_INFO QIB_DRV_NAME " txselect_values string "
  5190. "too long\n");
  5191. return -ENOSPC;
  5192. }
  5193. val = simple_strtoul(str, &n, 0);
  5194. if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
  5195. TXDDS_MFG_SZ)) {
  5196. printk(KERN_INFO QIB_DRV_NAME
  5197. "txselect_values must start with a number < %d\n",
  5198. TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
  5199. return -EINVAL;
  5200. }
  5201. strcpy(txselect_list, str);
  5202. list_for_each_entry(dd, &qib_dev_list, list)
  5203. if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
  5204. set_no_qsfp_atten(dd, 1);
  5205. return 0;
  5206. }
  5207. /*
  5208. * Write the final few registers that depend on some of the
  5209. * init setup. Done late in init, just before bringing up
  5210. * the serdes.
  5211. */
  5212. static int qib_late_7322_initreg(struct qib_devdata *dd)
  5213. {
  5214. int ret = 0, n;
  5215. u64 val;
  5216. qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
  5217. qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
  5218. qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
  5219. qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
  5220. val = qib_read_kreg64(dd, kr_sendpioavailaddr);
  5221. if (val != dd->pioavailregs_phys) {
  5222. qib_dev_err(dd, "Catastrophic software error, "
  5223. "SendPIOAvailAddr written as %lx, "
  5224. "read back as %llx\n",
  5225. (unsigned long) dd->pioavailregs_phys,
  5226. (unsigned long long) val);
  5227. ret = -EINVAL;
  5228. }
  5229. n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  5230. qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
  5231. /* driver sends get pkey, lid, etc. checking also, to catch bugs */
  5232. qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
  5233. qib_register_observer(dd, &sendctrl_0_observer);
  5234. qib_register_observer(dd, &sendctrl_1_observer);
  5235. dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
  5236. qib_write_kreg(dd, kr_control, dd->control);
  5237. /*
  5238. * Set SendDmaFetchPriority and init Tx params, including
  5239. * QSFP handler on boards that have QSFP.
  5240. * First set our default attenuation entry for cables that
  5241. * don't have valid attenuation.
  5242. */
  5243. set_no_qsfp_atten(dd, 0);
  5244. for (n = 0; n < dd->num_pports; ++n) {
  5245. struct qib_pportdata *ppd = dd->pport + n;
  5246. qib_write_kreg_port(ppd, krp_senddmaprioritythld,
  5247. sdma_fetch_prio & 0xf);
  5248. /* Initialize qsfp if present on board. */
  5249. if (dd->flags & QIB_HAS_QSFP)
  5250. qib_init_7322_qsfp(ppd);
  5251. }
  5252. dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
  5253. qib_write_kreg(dd, kr_control, dd->control);
  5254. return ret;
  5255. }
  5256. /* per IB port errors. */
  5257. #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
  5258. MASK_ACROSS(8, 15))
  5259. #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
  5260. #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
  5261. MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
  5262. MASK_ACROSS(0, 11))
  5263. /*
  5264. * Write the initialization per-port registers that need to be done at
  5265. * driver load and after reset completes (i.e., that aren't done as part
  5266. * of other init procedures called from qib_init.c).
  5267. * Some of these should be redundant on reset, but play safe.
  5268. */
  5269. static void write_7322_init_portregs(struct qib_pportdata *ppd)
  5270. {
  5271. u64 val;
  5272. int i;
  5273. if (!ppd->link_speed_supported) {
  5274. /* no buffer credits for this port */
  5275. for (i = 1; i < 8; i++)
  5276. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
  5277. qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
  5278. qib_write_kreg(ppd->dd, kr_scratch, 0);
  5279. return;
  5280. }
  5281. /*
  5282. * Set the number of supported virtual lanes in IBC,
  5283. * for flow control packet handling on unsupported VLs
  5284. */
  5285. val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
  5286. val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
  5287. val |= (u64)(ppd->vls_supported - 1) <<
  5288. SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
  5289. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  5290. qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
  5291. /* enable tx header checking */
  5292. qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
  5293. IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
  5294. IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
  5295. qib_write_kreg_port(ppd, krp_ncmodectrl,
  5296. SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
  5297. /*
  5298. * Unconditionally clear the bufmask bits. If SDMA is
  5299. * enabled, we'll set them appropriately later.
  5300. */
  5301. qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
  5302. qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
  5303. qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
  5304. if (ppd->dd->cspec->r1)
  5305. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
  5306. }
  5307. /*
  5308. * Write the initialization per-device registers that need to be done at
  5309. * driver load and after reset completes (i.e., that aren't done as part
  5310. * of other init procedures called from qib_init.c). Also write per-port
  5311. * registers that are affected by overall device config, such as QP mapping
  5312. * Some of these should be redundant on reset, but play safe.
  5313. */
  5314. static void write_7322_initregs(struct qib_devdata *dd)
  5315. {
  5316. struct qib_pportdata *ppd;
  5317. int i, pidx;
  5318. u64 val;
  5319. /* Set Multicast QPs received by port 2 to map to context one. */
  5320. qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
  5321. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  5322. unsigned n, regno;
  5323. unsigned long flags;
  5324. if (dd->n_krcv_queues < 2 ||
  5325. !dd->pport[pidx].link_speed_supported)
  5326. continue;
  5327. ppd = &dd->pport[pidx];
  5328. /* be paranoid against later code motion, etc. */
  5329. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  5330. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
  5331. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  5332. /* Initialize QP to context mapping */
  5333. regno = krp_rcvqpmaptable;
  5334. val = 0;
  5335. if (dd->num_pports > 1)
  5336. n = dd->first_user_ctxt / dd->num_pports;
  5337. else
  5338. n = dd->first_user_ctxt - 1;
  5339. for (i = 0; i < 32; ) {
  5340. unsigned ctxt;
  5341. if (dd->num_pports > 1)
  5342. ctxt = (i % n) * dd->num_pports + pidx;
  5343. else if (i % n)
  5344. ctxt = (i % n) + 1;
  5345. else
  5346. ctxt = ppd->hw_pidx;
  5347. val |= ctxt << (5 * (i % 6));
  5348. i++;
  5349. if (i % 6 == 0) {
  5350. qib_write_kreg_port(ppd, regno, val);
  5351. val = 0;
  5352. regno++;
  5353. }
  5354. }
  5355. qib_write_kreg_port(ppd, regno, val);
  5356. }
  5357. /*
  5358. * Setup up interrupt mitigation for kernel contexts, but
  5359. * not user contexts (user contexts use interrupts when
  5360. * stalled waiting for any packet, so want those interrupts
  5361. * right away).
  5362. */
  5363. for (i = 0; i < dd->first_user_ctxt; i++) {
  5364. dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
  5365. qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
  5366. }
  5367. /*
  5368. * Initialize as (disabled) rcvflow tables. Application code
  5369. * will setup each flow as it uses the flow.
  5370. * Doesn't clear any of the error bits that might be set.
  5371. */
  5372. val = TIDFLOW_ERRBITS; /* these are W1C */
  5373. for (i = 0; i < dd->cfgctxts; i++) {
  5374. int flow;
  5375. for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
  5376. qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
  5377. }
  5378. /*
  5379. * dual cards init to dual port recovery, single port cards to
  5380. * the one port. Dual port cards may later adjust to 1 port,
  5381. * and then back to dual port if both ports are connected
  5382. * */
  5383. if (dd->num_pports)
  5384. setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
  5385. }
  5386. static int qib_init_7322_variables(struct qib_devdata *dd)
  5387. {
  5388. struct qib_pportdata *ppd;
  5389. unsigned features, pidx, sbufcnt;
  5390. int ret, mtu;
  5391. u32 sbufs, updthresh;
  5392. /* pport structs are contiguous, allocated after devdata */
  5393. ppd = (struct qib_pportdata *)(dd + 1);
  5394. dd->pport = ppd;
  5395. ppd[0].dd = dd;
  5396. ppd[1].dd = dd;
  5397. dd->cspec = (struct qib_chip_specific *)(ppd + 2);
  5398. ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
  5399. ppd[1].cpspec = &ppd[0].cpspec[1];
  5400. ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
  5401. ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
  5402. spin_lock_init(&dd->cspec->rcvmod_lock);
  5403. spin_lock_init(&dd->cspec->gpio_lock);
  5404. /* we haven't yet set QIB_PRESENT, so use read directly */
  5405. dd->revision = readq(&dd->kregbase[kr_revision]);
  5406. if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
  5407. qib_dev_err(dd, "Revision register read failure, "
  5408. "giving up initialization\n");
  5409. ret = -ENODEV;
  5410. goto bail;
  5411. }
  5412. dd->flags |= QIB_PRESENT; /* now register routines work */
  5413. dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
  5414. dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
  5415. dd->cspec->r1 = dd->minrev == 1;
  5416. get_7322_chip_params(dd);
  5417. features = qib_7322_boardname(dd);
  5418. /* now that piobcnt2k and 4k set, we can allocate these */
  5419. sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
  5420. NUM_VL15_BUFS + BITS_PER_LONG - 1;
  5421. sbufcnt /= BITS_PER_LONG;
  5422. dd->cspec->sendchkenable = kmalloc(sbufcnt *
  5423. sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
  5424. dd->cspec->sendgrhchk = kmalloc(sbufcnt *
  5425. sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
  5426. dd->cspec->sendibchk = kmalloc(sbufcnt *
  5427. sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
  5428. if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
  5429. !dd->cspec->sendibchk) {
  5430. qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
  5431. ret = -ENOMEM;
  5432. goto bail;
  5433. }
  5434. ppd = dd->pport;
  5435. /*
  5436. * GPIO bits for TWSI data and clock,
  5437. * used for serial EEPROM.
  5438. */
  5439. dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
  5440. dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
  5441. dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
  5442. dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
  5443. QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
  5444. QIB_HAS_THRESH_UPDATE |
  5445. (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
  5446. dd->flags |= qib_special_trigger ?
  5447. QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
  5448. /*
  5449. * Setup initial values. These may change when PAT is enabled, but
  5450. * we need these to do initial chip register accesses.
  5451. */
  5452. qib_7322_set_baseaddrs(dd);
  5453. mtu = ib_mtu_enum_to_int(qib_ibmtu);
  5454. if (mtu == -1)
  5455. mtu = QIB_DEFAULT_MTU;
  5456. dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
  5457. /* all hwerrors become interrupts, unless special purposed */
  5458. dd->cspec->hwerrmask = ~0ULL;
  5459. /* link_recovery setup causes these errors, so ignore them,
  5460. * other than clearing them when they occur */
  5461. dd->cspec->hwerrmask &=
  5462. ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
  5463. SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
  5464. HWE_MASK(LATriggered));
  5465. for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
  5466. struct qib_chippport_specific *cp = ppd->cpspec;
  5467. ppd->link_speed_supported = features & PORT_SPD_CAP;
  5468. features >>= PORT_SPD_CAP_SHIFT;
  5469. if (!ppd->link_speed_supported) {
  5470. /* single port mode (7340, or configured) */
  5471. dd->skip_kctxt_mask |= 1 << pidx;
  5472. if (pidx == 0) {
  5473. /* Make sure port is disabled. */
  5474. qib_write_kreg_port(ppd, krp_rcvctrl, 0);
  5475. qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
  5476. ppd[0] = ppd[1];
  5477. dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
  5478. IBSerdesPClkNotDetectMask_0)
  5479. | SYM_MASK(HwErrMask,
  5480. SDmaMemReadErrMask_0));
  5481. dd->cspec->int_enable_mask &= ~(
  5482. SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
  5483. SYM_MASK(IntMask, SDmaIdleIntMask_0) |
  5484. SYM_MASK(IntMask, SDmaProgressIntMask_0) |
  5485. SYM_MASK(IntMask, SDmaIntMask_0) |
  5486. SYM_MASK(IntMask, ErrIntMask_0) |
  5487. SYM_MASK(IntMask, SendDoneIntMask_0));
  5488. } else {
  5489. /* Make sure port is disabled. */
  5490. qib_write_kreg_port(ppd, krp_rcvctrl, 0);
  5491. qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
  5492. dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
  5493. IBSerdesPClkNotDetectMask_1)
  5494. | SYM_MASK(HwErrMask,
  5495. SDmaMemReadErrMask_1));
  5496. dd->cspec->int_enable_mask &= ~(
  5497. SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
  5498. SYM_MASK(IntMask, SDmaIdleIntMask_1) |
  5499. SYM_MASK(IntMask, SDmaProgressIntMask_1) |
  5500. SYM_MASK(IntMask, SDmaIntMask_1) |
  5501. SYM_MASK(IntMask, ErrIntMask_1) |
  5502. SYM_MASK(IntMask, SendDoneIntMask_1));
  5503. }
  5504. continue;
  5505. }
  5506. dd->num_pports++;
  5507. qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
  5508. ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
  5509. ppd->link_width_enabled = IB_WIDTH_4X;
  5510. ppd->link_speed_enabled = ppd->link_speed_supported;
  5511. /*
  5512. * Set the initial values to reasonable default, will be set
  5513. * for real when link is up.
  5514. */
  5515. ppd->link_width_active = IB_WIDTH_4X;
  5516. ppd->link_speed_active = QIB_IB_SDR;
  5517. ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
  5518. switch (qib_num_cfg_vls) {
  5519. case 1:
  5520. ppd->vls_supported = IB_VL_VL0;
  5521. break;
  5522. case 2:
  5523. ppd->vls_supported = IB_VL_VL0_1;
  5524. break;
  5525. default:
  5526. qib_devinfo(dd->pcidev,
  5527. "Invalid num_vls %u, using 4 VLs\n",
  5528. qib_num_cfg_vls);
  5529. qib_num_cfg_vls = 4;
  5530. /* fall through */
  5531. case 4:
  5532. ppd->vls_supported = IB_VL_VL0_3;
  5533. break;
  5534. case 8:
  5535. if (mtu <= 2048)
  5536. ppd->vls_supported = IB_VL_VL0_7;
  5537. else {
  5538. qib_devinfo(dd->pcidev,
  5539. "Invalid num_vls %u for MTU %d "
  5540. ", using 4 VLs\n",
  5541. qib_num_cfg_vls, mtu);
  5542. ppd->vls_supported = IB_VL_VL0_3;
  5543. qib_num_cfg_vls = 4;
  5544. }
  5545. break;
  5546. }
  5547. ppd->vls_operational = ppd->vls_supported;
  5548. init_waitqueue_head(&cp->autoneg_wait);
  5549. INIT_DELAYED_WORK(&cp->autoneg_work,
  5550. autoneg_7322_work);
  5551. if (ppd->dd->cspec->r1)
  5552. INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
  5553. /*
  5554. * For Mez and similar cards, no qsfp info, so do
  5555. * the "cable info" setup here. Can be overridden
  5556. * in adapter-specific routines.
  5557. */
  5558. if (!(dd->flags & QIB_HAS_QSFP)) {
  5559. if (!IS_QMH(dd) && !IS_QME(dd))
  5560. qib_devinfo(dd->pcidev, "IB%u:%u: "
  5561. "Unknown mezzanine card type\n",
  5562. dd->unit, ppd->port);
  5563. cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
  5564. /*
  5565. * Choose center value as default tx serdes setting
  5566. * until changed through module parameter.
  5567. */
  5568. ppd->cpspec->no_eep = IS_QMH(dd) ?
  5569. TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
  5570. } else
  5571. cp->h1_val = H1_FORCE_VAL;
  5572. /* Avoid writes to chip for mini_init */
  5573. if (!qib_mini_init)
  5574. write_7322_init_portregs(ppd);
  5575. init_timer(&cp->chase_timer);
  5576. cp->chase_timer.function = reenable_chase;
  5577. cp->chase_timer.data = (unsigned long)ppd;
  5578. ppd++;
  5579. }
  5580. dd->rcvhdrentsize = qib_rcvhdrentsize ?
  5581. qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
  5582. dd->rcvhdrsize = qib_rcvhdrsize ?
  5583. qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
  5584. dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
  5585. /* we always allocate at least 2048 bytes for eager buffers */
  5586. dd->rcvegrbufsize = max(mtu, 2048);
  5587. BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
  5588. dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
  5589. qib_7322_tidtemplate(dd);
  5590. /*
  5591. * We can request a receive interrupt for 1 or
  5592. * more packets from current offset.
  5593. */
  5594. dd->rhdrhead_intr_off =
  5595. (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
  5596. /* setup the stats timer; the add_timer is done at end of init */
  5597. init_timer(&dd->stats_timer);
  5598. dd->stats_timer.function = qib_get_7322_faststats;
  5599. dd->stats_timer.data = (unsigned long) dd;
  5600. dd->ureg_align = 0x10000; /* 64KB alignment */
  5601. dd->piosize2kmax_dwords = dd->piosize2k >> 2;
  5602. qib_7322_config_ctxts(dd);
  5603. qib_set_ctxtcnt(dd);
  5604. if (qib_wc_pat) {
  5605. resource_size_t vl15off;
  5606. /*
  5607. * We do not set WC on the VL15 buffers to avoid
  5608. * a rare problem with unaligned writes from
  5609. * interrupt-flushed store buffers, so we need
  5610. * to map those separately here. We can't solve
  5611. * this for the rarely used mtrr case.
  5612. */
  5613. ret = init_chip_wc_pat(dd, 0);
  5614. if (ret)
  5615. goto bail;
  5616. /* vl15 buffers start just after the 4k buffers */
  5617. vl15off = dd->physaddr + (dd->piobufbase >> 32) +
  5618. dd->piobcnt4k * dd->align4k;
  5619. dd->piovl15base = ioremap_nocache(vl15off,
  5620. NUM_VL15_BUFS * dd->align4k);
  5621. if (!dd->piovl15base)
  5622. goto bail;
  5623. }
  5624. qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
  5625. ret = 0;
  5626. if (qib_mini_init)
  5627. goto bail;
  5628. if (!dd->num_pports) {
  5629. qib_dev_err(dd, "No ports enabled, giving up initialization\n");
  5630. goto bail; /* no error, so can still figure out why err */
  5631. }
  5632. write_7322_initregs(dd);
  5633. ret = qib_create_ctxts(dd);
  5634. init_7322_cntrnames(dd);
  5635. updthresh = 8U; /* update threshold */
  5636. /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
  5637. * reserve the update threshold amount for other kernel use, such
  5638. * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
  5639. * unless we aren't enabling SDMA, in which case we want to use
  5640. * all the 4k bufs for the kernel.
  5641. * if this was less than the update threshold, we could wait
  5642. * a long time for an update. Coded this way because we
  5643. * sometimes change the update threshold for various reasons,
  5644. * and we want this to remain robust.
  5645. */
  5646. if (dd->flags & QIB_HAS_SEND_DMA) {
  5647. dd->cspec->sdmabufcnt = dd->piobcnt4k;
  5648. sbufs = updthresh > 3 ? updthresh : 3;
  5649. } else {
  5650. dd->cspec->sdmabufcnt = 0;
  5651. sbufs = dd->piobcnt4k;
  5652. }
  5653. dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
  5654. dd->cspec->sdmabufcnt;
  5655. dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
  5656. dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
  5657. dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
  5658. dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
  5659. /*
  5660. * If we have 16 user contexts, we will have 7 sbufs
  5661. * per context, so reduce the update threshold to match. We
  5662. * want to update before we actually run out, at low pbufs/ctxt
  5663. * so give ourselves some margin.
  5664. */
  5665. if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
  5666. updthresh = dd->pbufsctxt - 2;
  5667. dd->cspec->updthresh_dflt = updthresh;
  5668. dd->cspec->updthresh = updthresh;
  5669. /* before full enable, no interrupts, no locking needed */
  5670. dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
  5671. << SYM_LSB(SendCtrl, AvailUpdThld)) |
  5672. SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
  5673. dd->psxmitwait_supported = 1;
  5674. dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
  5675. bail:
  5676. if (!dd->ctxtcnt)
  5677. dd->ctxtcnt = 1; /* for other initialization code */
  5678. return ret;
  5679. }
  5680. static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
  5681. u32 *pbufnum)
  5682. {
  5683. u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
  5684. struct qib_devdata *dd = ppd->dd;
  5685. /* last is same for 2k and 4k, because we use 4k if all 2k busy */
  5686. if (pbc & PBC_7322_VL15_SEND) {
  5687. first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
  5688. last = first;
  5689. } else {
  5690. if ((plen + 1) > dd->piosize2kmax_dwords)
  5691. first = dd->piobcnt2k;
  5692. else
  5693. first = 0;
  5694. last = dd->cspec->lastbuf_for_pio;
  5695. }
  5696. return qib_getsendbuf_range(dd, pbufnum, first, last);
  5697. }
  5698. static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
  5699. u32 start)
  5700. {
  5701. qib_write_kreg_port(ppd, krp_psinterval, intv);
  5702. qib_write_kreg_port(ppd, krp_psstart, start);
  5703. }
  5704. /*
  5705. * Must be called with sdma_lock held, or before init finished.
  5706. */
  5707. static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
  5708. {
  5709. qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
  5710. }
  5711. static struct sdma_set_state_action sdma_7322_action_table[] = {
  5712. [qib_sdma_state_s00_hw_down] = {
  5713. .go_s99_running_tofalse = 1,
  5714. .op_enable = 0,
  5715. .op_intenable = 0,
  5716. .op_halt = 0,
  5717. .op_drain = 0,
  5718. },
  5719. [qib_sdma_state_s10_hw_start_up_wait] = {
  5720. .op_enable = 0,
  5721. .op_intenable = 1,
  5722. .op_halt = 1,
  5723. .op_drain = 0,
  5724. },
  5725. [qib_sdma_state_s20_idle] = {
  5726. .op_enable = 1,
  5727. .op_intenable = 1,
  5728. .op_halt = 1,
  5729. .op_drain = 0,
  5730. },
  5731. [qib_sdma_state_s30_sw_clean_up_wait] = {
  5732. .op_enable = 0,
  5733. .op_intenable = 1,
  5734. .op_halt = 1,
  5735. .op_drain = 0,
  5736. },
  5737. [qib_sdma_state_s40_hw_clean_up_wait] = {
  5738. .op_enable = 1,
  5739. .op_intenable = 1,
  5740. .op_halt = 1,
  5741. .op_drain = 0,
  5742. },
  5743. [qib_sdma_state_s50_hw_halt_wait] = {
  5744. .op_enable = 1,
  5745. .op_intenable = 1,
  5746. .op_halt = 1,
  5747. .op_drain = 1,
  5748. },
  5749. [qib_sdma_state_s99_running] = {
  5750. .op_enable = 1,
  5751. .op_intenable = 1,
  5752. .op_halt = 0,
  5753. .op_drain = 0,
  5754. .go_s99_running_totrue = 1,
  5755. },
  5756. };
  5757. static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
  5758. {
  5759. ppd->sdma_state.set_state_action = sdma_7322_action_table;
  5760. }
  5761. static int init_sdma_7322_regs(struct qib_pportdata *ppd)
  5762. {
  5763. struct qib_devdata *dd = ppd->dd;
  5764. unsigned lastbuf, erstbuf;
  5765. u64 senddmabufmask[3] = { 0 };
  5766. int n, ret = 0;
  5767. qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
  5768. qib_sdma_7322_setlengen(ppd);
  5769. qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
  5770. qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
  5771. qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
  5772. qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
  5773. if (dd->num_pports)
  5774. n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
  5775. else
  5776. n = dd->cspec->sdmabufcnt; /* failsafe for init */
  5777. erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
  5778. ((dd->num_pports == 1 || ppd->port == 2) ? n :
  5779. dd->cspec->sdmabufcnt);
  5780. lastbuf = erstbuf + n;
  5781. ppd->sdma_state.first_sendbuf = erstbuf;
  5782. ppd->sdma_state.last_sendbuf = lastbuf;
  5783. for (; erstbuf < lastbuf; ++erstbuf) {
  5784. unsigned word = erstbuf / BITS_PER_LONG;
  5785. unsigned bit = erstbuf & (BITS_PER_LONG - 1);
  5786. BUG_ON(word >= 3);
  5787. senddmabufmask[word] |= 1ULL << bit;
  5788. }
  5789. qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
  5790. qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
  5791. qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
  5792. return ret;
  5793. }
  5794. /* sdma_lock must be held */
  5795. static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
  5796. {
  5797. struct qib_devdata *dd = ppd->dd;
  5798. int sane;
  5799. int use_dmahead;
  5800. u16 swhead;
  5801. u16 swtail;
  5802. u16 cnt;
  5803. u16 hwhead;
  5804. use_dmahead = __qib_sdma_running(ppd) &&
  5805. (dd->flags & QIB_HAS_SDMA_TIMEOUT);
  5806. retry:
  5807. hwhead = use_dmahead ?
  5808. (u16) le64_to_cpu(*ppd->sdma_head_dma) :
  5809. (u16) qib_read_kreg_port(ppd, krp_senddmahead);
  5810. swhead = ppd->sdma_descq_head;
  5811. swtail = ppd->sdma_descq_tail;
  5812. cnt = ppd->sdma_descq_cnt;
  5813. if (swhead < swtail)
  5814. /* not wrapped */
  5815. sane = (hwhead >= swhead) & (hwhead <= swtail);
  5816. else if (swhead > swtail)
  5817. /* wrapped around */
  5818. sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
  5819. (hwhead <= swtail);
  5820. else
  5821. /* empty */
  5822. sane = (hwhead == swhead);
  5823. if (unlikely(!sane)) {
  5824. if (use_dmahead) {
  5825. /* try one more time, directly from the register */
  5826. use_dmahead = 0;
  5827. goto retry;
  5828. }
  5829. /* proceed as if no progress */
  5830. hwhead = swhead;
  5831. }
  5832. return hwhead;
  5833. }
  5834. static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
  5835. {
  5836. u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
  5837. return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
  5838. (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
  5839. !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
  5840. !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
  5841. }
  5842. /*
  5843. * Compute the amount of delay before sending the next packet if the
  5844. * port's send rate differs from the static rate set for the QP.
  5845. * The delay affects the next packet and the amount of the delay is
  5846. * based on the length of the this packet.
  5847. */
  5848. static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
  5849. u8 srate, u8 vl)
  5850. {
  5851. u8 snd_mult = ppd->delay_mult;
  5852. u8 rcv_mult = ib_rate_to_delay[srate];
  5853. u32 ret;
  5854. ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
  5855. /* Indicate VL15, else set the VL in the control word */
  5856. if (vl == 15)
  5857. ret |= PBC_7322_VL15_SEND_CTRL;
  5858. else
  5859. ret |= vl << PBC_VL_NUM_LSB;
  5860. ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
  5861. return ret;
  5862. }
  5863. /*
  5864. * Enable the per-port VL15 send buffers for use.
  5865. * They follow the rest of the buffers, without a config parameter.
  5866. * This was in initregs, but that is done before the shadow
  5867. * is set up, and this has to be done after the shadow is
  5868. * set up.
  5869. */
  5870. static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
  5871. {
  5872. unsigned vl15bufs;
  5873. vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
  5874. qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
  5875. TXCHK_CHG_TYPE_KERN, NULL);
  5876. }
  5877. static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
  5878. {
  5879. if (rcd->ctxt < NUM_IB_PORTS) {
  5880. if (rcd->dd->num_pports > 1) {
  5881. rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
  5882. rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
  5883. } else {
  5884. rcd->rcvegrcnt = KCTXT0_EGRCNT;
  5885. rcd->rcvegr_tid_base = 0;
  5886. }
  5887. } else {
  5888. rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
  5889. rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
  5890. (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
  5891. }
  5892. }
  5893. #define QTXSLEEPS 5000
  5894. static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
  5895. u32 len, u32 which, struct qib_ctxtdata *rcd)
  5896. {
  5897. int i;
  5898. const int last = start + len - 1;
  5899. const int lastr = last / BITS_PER_LONG;
  5900. u32 sleeps = 0;
  5901. int wait = rcd != NULL;
  5902. unsigned long flags;
  5903. while (wait) {
  5904. unsigned long shadow;
  5905. int cstart, previ = -1;
  5906. /*
  5907. * when flipping from kernel to user, we can't change
  5908. * the checking type if the buffer is allocated to the
  5909. * driver. It's OK the other direction, because it's
  5910. * from close, and we have just disarm'ed all the
  5911. * buffers. All the kernel to kernel changes are also
  5912. * OK.
  5913. */
  5914. for (cstart = start; cstart <= last; cstart++) {
  5915. i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
  5916. / BITS_PER_LONG;
  5917. if (i != previ) {
  5918. shadow = (unsigned long)
  5919. le64_to_cpu(dd->pioavailregs_dma[i]);
  5920. previ = i;
  5921. }
  5922. if (test_bit(((2 * cstart) +
  5923. QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
  5924. % BITS_PER_LONG, &shadow))
  5925. break;
  5926. }
  5927. if (cstart > last)
  5928. break;
  5929. if (sleeps == QTXSLEEPS)
  5930. break;
  5931. /* make sure we see an updated copy next time around */
  5932. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  5933. sleeps++;
  5934. msleep(20);
  5935. }
  5936. switch (which) {
  5937. case TXCHK_CHG_TYPE_DIS1:
  5938. /*
  5939. * disable checking on a range; used by diags; just
  5940. * one buffer, but still written generically
  5941. */
  5942. for (i = start; i <= last; i++)
  5943. clear_bit(i, dd->cspec->sendchkenable);
  5944. break;
  5945. case TXCHK_CHG_TYPE_ENAB1:
  5946. /*
  5947. * (re)enable checking on a range; used by diags; just
  5948. * one buffer, but still written generically; read
  5949. * scratch to be sure buffer actually triggered, not
  5950. * just flushed from processor.
  5951. */
  5952. qib_read_kreg32(dd, kr_scratch);
  5953. for (i = start; i <= last; i++)
  5954. set_bit(i, dd->cspec->sendchkenable);
  5955. break;
  5956. case TXCHK_CHG_TYPE_KERN:
  5957. /* usable by kernel */
  5958. for (i = start; i <= last; i++) {
  5959. set_bit(i, dd->cspec->sendibchk);
  5960. clear_bit(i, dd->cspec->sendgrhchk);
  5961. }
  5962. spin_lock_irqsave(&dd->uctxt_lock, flags);
  5963. /* see if we need to raise avail update threshold */
  5964. for (i = dd->first_user_ctxt;
  5965. dd->cspec->updthresh != dd->cspec->updthresh_dflt
  5966. && i < dd->cfgctxts; i++)
  5967. if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
  5968. ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
  5969. < dd->cspec->updthresh_dflt)
  5970. break;
  5971. spin_unlock_irqrestore(&dd->uctxt_lock, flags);
  5972. if (i == dd->cfgctxts) {
  5973. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  5974. dd->cspec->updthresh = dd->cspec->updthresh_dflt;
  5975. dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
  5976. dd->sendctrl |= (dd->cspec->updthresh &
  5977. SYM_RMASK(SendCtrl, AvailUpdThld)) <<
  5978. SYM_LSB(SendCtrl, AvailUpdThld);
  5979. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  5980. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  5981. }
  5982. break;
  5983. case TXCHK_CHG_TYPE_USER:
  5984. /* for user process */
  5985. for (i = start; i <= last; i++) {
  5986. clear_bit(i, dd->cspec->sendibchk);
  5987. set_bit(i, dd->cspec->sendgrhchk);
  5988. }
  5989. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  5990. if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
  5991. / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
  5992. dd->cspec->updthresh = (rcd->piocnt /
  5993. rcd->subctxt_cnt) - 1;
  5994. dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
  5995. dd->sendctrl |= (dd->cspec->updthresh &
  5996. SYM_RMASK(SendCtrl, AvailUpdThld))
  5997. << SYM_LSB(SendCtrl, AvailUpdThld);
  5998. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  5999. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  6000. } else
  6001. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  6002. break;
  6003. default:
  6004. break;
  6005. }
  6006. for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
  6007. qib_write_kreg(dd, kr_sendcheckmask + i,
  6008. dd->cspec->sendchkenable[i]);
  6009. for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
  6010. qib_write_kreg(dd, kr_sendgrhcheckmask + i,
  6011. dd->cspec->sendgrhchk[i]);
  6012. qib_write_kreg(dd, kr_sendibpktmask + i,
  6013. dd->cspec->sendibchk[i]);
  6014. }
  6015. /*
  6016. * Be sure whatever we did was seen by the chip and acted upon,
  6017. * before we return. Mostly important for which >= 2.
  6018. */
  6019. qib_read_kreg32(dd, kr_scratch);
  6020. }
  6021. /* useful for trigger analyzers, etc. */
  6022. static void writescratch(struct qib_devdata *dd, u32 val)
  6023. {
  6024. qib_write_kreg(dd, kr_scratch, val);
  6025. }
  6026. /* Dummy for now, use chip regs soon */
  6027. static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
  6028. {
  6029. return -ENXIO;
  6030. }
  6031. /**
  6032. * qib_init_iba7322_funcs - set up the chip-specific function pointers
  6033. * @dev: the pci_dev for qlogic_ib device
  6034. * @ent: pci_device_id struct for this dev
  6035. *
  6036. * Also allocates, inits, and returns the devdata struct for this
  6037. * device instance
  6038. *
  6039. * This is global, and is called directly at init to set up the
  6040. * chip-specific function pointers for later use.
  6041. */
  6042. struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
  6043. const struct pci_device_id *ent)
  6044. {
  6045. struct qib_devdata *dd;
  6046. int ret, i;
  6047. u32 tabsize, actual_cnt = 0;
  6048. dd = qib_alloc_devdata(pdev,
  6049. NUM_IB_PORTS * sizeof(struct qib_pportdata) +
  6050. sizeof(struct qib_chip_specific) +
  6051. NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
  6052. if (IS_ERR(dd))
  6053. goto bail;
  6054. dd->f_bringup_serdes = qib_7322_bringup_serdes;
  6055. dd->f_cleanup = qib_setup_7322_cleanup;
  6056. dd->f_clear_tids = qib_7322_clear_tids;
  6057. dd->f_free_irq = qib_7322_free_irq;
  6058. dd->f_get_base_info = qib_7322_get_base_info;
  6059. dd->f_get_msgheader = qib_7322_get_msgheader;
  6060. dd->f_getsendbuf = qib_7322_getsendbuf;
  6061. dd->f_gpio_mod = gpio_7322_mod;
  6062. dd->f_eeprom_wen = qib_7322_eeprom_wen;
  6063. dd->f_hdrqempty = qib_7322_hdrqempty;
  6064. dd->f_ib_updown = qib_7322_ib_updown;
  6065. dd->f_init_ctxt = qib_7322_init_ctxt;
  6066. dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
  6067. dd->f_intr_fallback = qib_7322_intr_fallback;
  6068. dd->f_late_initreg = qib_late_7322_initreg;
  6069. dd->f_setpbc_control = qib_7322_setpbc_control;
  6070. dd->f_portcntr = qib_portcntr_7322;
  6071. dd->f_put_tid = qib_7322_put_tid;
  6072. dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
  6073. dd->f_rcvctrl = rcvctrl_7322_mod;
  6074. dd->f_read_cntrs = qib_read_7322cntrs;
  6075. dd->f_read_portcntrs = qib_read_7322portcntrs;
  6076. dd->f_reset = qib_do_7322_reset;
  6077. dd->f_init_sdma_regs = init_sdma_7322_regs;
  6078. dd->f_sdma_busy = qib_sdma_7322_busy;
  6079. dd->f_sdma_gethead = qib_sdma_7322_gethead;
  6080. dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
  6081. dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
  6082. dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
  6083. dd->f_sendctrl = sendctrl_7322_mod;
  6084. dd->f_set_armlaunch = qib_set_7322_armlaunch;
  6085. dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
  6086. dd->f_iblink_state = qib_7322_iblink_state;
  6087. dd->f_ibphys_portstate = qib_7322_phys_portstate;
  6088. dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
  6089. dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
  6090. dd->f_set_ib_loopback = qib_7322_set_loopback;
  6091. dd->f_get_ib_table = qib_7322_get_ib_table;
  6092. dd->f_set_ib_table = qib_7322_set_ib_table;
  6093. dd->f_set_intr_state = qib_7322_set_intr_state;
  6094. dd->f_setextled = qib_setup_7322_setextled;
  6095. dd->f_txchk_change = qib_7322_txchk_change;
  6096. dd->f_update_usrhead = qib_update_7322_usrhead;
  6097. dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
  6098. dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
  6099. dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
  6100. dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
  6101. dd->f_sdma_init_early = qib_7322_sdma_init_early;
  6102. dd->f_writescratch = writescratch;
  6103. dd->f_tempsense_rd = qib_7322_tempsense_rd;
  6104. /*
  6105. * Do remaining PCIe setup and save PCIe values in dd.
  6106. * Any error printing is already done by the init code.
  6107. * On return, we have the chip mapped, but chip registers
  6108. * are not set up until start of qib_init_7322_variables.
  6109. */
  6110. ret = qib_pcie_ddinit(dd, pdev, ent);
  6111. if (ret < 0)
  6112. goto bail_free;
  6113. /* initialize chip-specific variables */
  6114. ret = qib_init_7322_variables(dd);
  6115. if (ret)
  6116. goto bail_cleanup;
  6117. if (qib_mini_init || !dd->num_pports)
  6118. goto bail;
  6119. /*
  6120. * Determine number of vectors we want; depends on port count
  6121. * and number of configured kernel receive queues actually used.
  6122. * Should also depend on whether sdma is enabled or not, but
  6123. * that's such a rare testing case it's not worth worrying about.
  6124. */
  6125. tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
  6126. for (i = 0; i < tabsize; i++)
  6127. if ((i < ARRAY_SIZE(irq_table) &&
  6128. irq_table[i].port <= dd->num_pports) ||
  6129. (i >= ARRAY_SIZE(irq_table) &&
  6130. dd->rcd[i - ARRAY_SIZE(irq_table)]))
  6131. actual_cnt++;
  6132. /* reduce by ctxt's < 2 */
  6133. if (qib_krcvq01_no_msi)
  6134. actual_cnt -= dd->num_pports;
  6135. tabsize = actual_cnt;
  6136. dd->cspec->msix_entries = kmalloc(tabsize *
  6137. sizeof(struct msix_entry), GFP_KERNEL);
  6138. dd->cspec->msix_arg = kmalloc(tabsize *
  6139. sizeof(void *), GFP_KERNEL);
  6140. if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) {
  6141. qib_dev_err(dd, "No memory for MSIx table\n");
  6142. tabsize = 0;
  6143. }
  6144. for (i = 0; i < tabsize; i++)
  6145. dd->cspec->msix_entries[i].entry = i;
  6146. if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
  6147. qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
  6148. "continuing anyway\n");
  6149. /* may be less than we wanted, if not enough available */
  6150. dd->cspec->num_msix_entries = tabsize;
  6151. /* setup interrupt handler */
  6152. qib_setup_7322_interrupt(dd, 1);
  6153. /* clear diagctrl register, in case diags were running and crashed */
  6154. qib_write_kreg(dd, kr_hwdiagctrl, 0);
  6155. goto bail;
  6156. bail_cleanup:
  6157. qib_pcie_ddcleanup(dd);
  6158. bail_free:
  6159. qib_free_devdata(dd);
  6160. dd = ERR_PTR(ret);
  6161. bail:
  6162. return dd;
  6163. }
  6164. /*
  6165. * Set the table entry at the specified index from the table specifed.
  6166. * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
  6167. * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
  6168. * 'idx' below addresses the correct entry, while its 4 LSBs select the
  6169. * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
  6170. */
  6171. #define DDS_ENT_AMP_LSB 14
  6172. #define DDS_ENT_MAIN_LSB 9
  6173. #define DDS_ENT_POST_LSB 5
  6174. #define DDS_ENT_PRE_XTRA_LSB 3
  6175. #define DDS_ENT_PRE_LSB 0
  6176. /*
  6177. * Set one entry in the TxDDS table for spec'd port
  6178. * ridx picks one of the entries, while tp points
  6179. * to the appropriate table entry.
  6180. */
  6181. static void set_txdds(struct qib_pportdata *ppd, int ridx,
  6182. const struct txdds_ent *tp)
  6183. {
  6184. struct qib_devdata *dd = ppd->dd;
  6185. u32 pack_ent;
  6186. int regidx;
  6187. /* Get correct offset in chip-space, and in source table */
  6188. regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
  6189. /*
  6190. * We do not use qib_write_kreg_port() because it was intended
  6191. * only for registers in the lower "port specific" pages.
  6192. * So do index calculation by hand.
  6193. */
  6194. if (ppd->hw_pidx)
  6195. regidx += (dd->palign / sizeof(u64));
  6196. pack_ent = tp->amp << DDS_ENT_AMP_LSB;
  6197. pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
  6198. pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
  6199. pack_ent |= tp->post << DDS_ENT_POST_LSB;
  6200. qib_write_kreg(dd, regidx, pack_ent);
  6201. /* Prevent back-to-back writes by hitting scratch */
  6202. qib_write_kreg(ppd->dd, kr_scratch, 0);
  6203. }
  6204. static const struct vendor_txdds_ent vendor_txdds[] = {
  6205. { /* Amphenol 1m 30awg NoEq */
  6206. { 0x41, 0x50, 0x48 }, "584470002 ",
  6207. { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
  6208. },
  6209. { /* Amphenol 3m 28awg NoEq */
  6210. { 0x41, 0x50, 0x48 }, "584470004 ",
  6211. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
  6212. },
  6213. { /* Finisar 3m OM2 Optical */
  6214. { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
  6215. { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
  6216. },
  6217. { /* Finisar 30m OM2 Optical */
  6218. { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
  6219. { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
  6220. },
  6221. { /* Finisar Default OM2 Optical */
  6222. { 0x00, 0x90, 0x65 }, NULL,
  6223. { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
  6224. },
  6225. { /* Gore 1m 30awg NoEq */
  6226. { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
  6227. { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
  6228. },
  6229. { /* Gore 2m 30awg NoEq */
  6230. { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
  6231. { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
  6232. },
  6233. { /* Gore 1m 28awg NoEq */
  6234. { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
  6235. { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
  6236. },
  6237. { /* Gore 3m 28awg NoEq */
  6238. { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
  6239. { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
  6240. },
  6241. { /* Gore 5m 24awg Eq */
  6242. { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
  6243. { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
  6244. },
  6245. { /* Gore 7m 24awg Eq */
  6246. { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
  6247. { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
  6248. },
  6249. { /* Gore 5m 26awg Eq */
  6250. { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
  6251. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
  6252. },
  6253. { /* Gore 7m 26awg Eq */
  6254. { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
  6255. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
  6256. },
  6257. { /* Intersil 12m 24awg Active */
  6258. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
  6259. { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
  6260. },
  6261. { /* Intersil 10m 28awg Active */
  6262. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
  6263. { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
  6264. },
  6265. { /* Intersil 7m 30awg Active */
  6266. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
  6267. { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
  6268. },
  6269. { /* Intersil 5m 32awg Active */
  6270. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
  6271. { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
  6272. },
  6273. { /* Intersil Default Active */
  6274. { 0x00, 0x30, 0xB4 }, NULL,
  6275. { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
  6276. },
  6277. { /* Luxtera 20m Active Optical */
  6278. { 0x00, 0x25, 0x63 }, NULL,
  6279. { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
  6280. },
  6281. { /* Molex 1M Cu loopback */
  6282. { 0x00, 0x09, 0x3A }, "74763-0025 ",
  6283. { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
  6284. },
  6285. { /* Molex 2m 28awg NoEq */
  6286. { 0x00, 0x09, 0x3A }, "74757-2201 ",
  6287. { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
  6288. },
  6289. };
  6290. static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
  6291. /* amp, pre, main, post */
  6292. { 2, 2, 15, 6 }, /* Loopback */
  6293. { 0, 0, 0, 1 }, /* 2 dB */
  6294. { 0, 0, 0, 2 }, /* 3 dB */
  6295. { 0, 0, 0, 3 }, /* 4 dB */
  6296. { 0, 0, 0, 4 }, /* 5 dB */
  6297. { 0, 0, 0, 5 }, /* 6 dB */
  6298. { 0, 0, 0, 6 }, /* 7 dB */
  6299. { 0, 0, 0, 7 }, /* 8 dB */
  6300. { 0, 0, 0, 8 }, /* 9 dB */
  6301. { 0, 0, 0, 9 }, /* 10 dB */
  6302. { 0, 0, 0, 10 }, /* 11 dB */
  6303. { 0, 0, 0, 11 }, /* 12 dB */
  6304. { 0, 0, 0, 12 }, /* 13 dB */
  6305. { 0, 0, 0, 13 }, /* 14 dB */
  6306. { 0, 0, 0, 14 }, /* 15 dB */
  6307. { 0, 0, 0, 15 }, /* 16 dB */
  6308. };
  6309. static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
  6310. /* amp, pre, main, post */
  6311. { 2, 2, 15, 6 }, /* Loopback */
  6312. { 0, 0, 0, 8 }, /* 2 dB */
  6313. { 0, 0, 0, 8 }, /* 3 dB */
  6314. { 0, 0, 0, 9 }, /* 4 dB */
  6315. { 0, 0, 0, 9 }, /* 5 dB */
  6316. { 0, 0, 0, 10 }, /* 6 dB */
  6317. { 0, 0, 0, 10 }, /* 7 dB */
  6318. { 0, 0, 0, 11 }, /* 8 dB */
  6319. { 0, 0, 0, 11 }, /* 9 dB */
  6320. { 0, 0, 0, 12 }, /* 10 dB */
  6321. { 0, 0, 0, 12 }, /* 11 dB */
  6322. { 0, 0, 0, 13 }, /* 12 dB */
  6323. { 0, 0, 0, 13 }, /* 13 dB */
  6324. { 0, 0, 0, 14 }, /* 14 dB */
  6325. { 0, 0, 0, 14 }, /* 15 dB */
  6326. { 0, 0, 0, 15 }, /* 16 dB */
  6327. };
  6328. static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
  6329. /* amp, pre, main, post */
  6330. { 2, 2, 15, 6 }, /* Loopback */
  6331. { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
  6332. { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
  6333. { 0, 1, 0, 11 }, /* 4 dB */
  6334. { 0, 1, 0, 13 }, /* 5 dB */
  6335. { 0, 1, 0, 15 }, /* 6 dB */
  6336. { 0, 1, 3, 15 }, /* 7 dB */
  6337. { 0, 1, 7, 15 }, /* 8 dB */
  6338. { 0, 1, 7, 15 }, /* 9 dB */
  6339. { 0, 1, 8, 15 }, /* 10 dB */
  6340. { 0, 1, 9, 15 }, /* 11 dB */
  6341. { 0, 1, 10, 15 }, /* 12 dB */
  6342. { 0, 2, 6, 15 }, /* 13 dB */
  6343. { 0, 2, 7, 15 }, /* 14 dB */
  6344. { 0, 2, 8, 15 }, /* 15 dB */
  6345. { 0, 2, 9, 15 }, /* 16 dB */
  6346. };
  6347. /*
  6348. * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
  6349. * These are mostly used for mez cards going through connectors
  6350. * and backplane traces, but can be used to add other "unusual"
  6351. * table values as well.
  6352. */
  6353. static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
  6354. /* amp, pre, main, post */
  6355. { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
  6356. { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
  6357. { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
  6358. { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
  6359. { 0, 0, 0, 11 }, /* QME7342 backplane settings */
  6360. { 0, 0, 0, 11 }, /* QME7342 backplane settings */
  6361. { 0, 0, 0, 11 }, /* QME7342 backplane settings */
  6362. { 0, 0, 0, 11 }, /* QME7342 backplane settings */
  6363. { 0, 0, 0, 11 }, /* QME7342 backplane settings */
  6364. { 0, 0, 0, 11 }, /* QME7342 backplane settings */
  6365. { 0, 0, 0, 11 }, /* QME7342 backplane settings */
  6366. { 0, 0, 0, 3 }, /* QMH7342 backplane settings */
  6367. { 0, 0, 0, 4 }, /* QMH7342 backplane settings */
  6368. };
  6369. static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
  6370. /* amp, pre, main, post */
  6371. { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
  6372. { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
  6373. { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
  6374. { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
  6375. { 0, 0, 0, 13 }, /* QME7342 backplane settings */
  6376. { 0, 0, 0, 13 }, /* QME7342 backplane settings */
  6377. { 0, 0, 0, 13 }, /* QME7342 backplane settings */
  6378. { 0, 0, 0, 13 }, /* QME7342 backplane settings */
  6379. { 0, 0, 0, 13 }, /* QME7342 backplane settings */
  6380. { 0, 0, 0, 13 }, /* QME7342 backplane settings */
  6381. { 0, 0, 0, 13 }, /* QME7342 backplane settings */
  6382. { 0, 0, 0, 9 }, /* QMH7342 backplane settings */
  6383. { 0, 0, 0, 10 }, /* QMH7342 backplane settings */
  6384. };
  6385. static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
  6386. /* amp, pre, main, post */
  6387. { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
  6388. { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
  6389. { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
  6390. { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
  6391. { 0, 1, 12, 10 }, /* QME7342 backplane setting */
  6392. { 0, 1, 12, 11 }, /* QME7342 backplane setting */
  6393. { 0, 1, 12, 12 }, /* QME7342 backplane setting */
  6394. { 0, 1, 12, 14 }, /* QME7342 backplane setting */
  6395. { 0, 1, 12, 6 }, /* QME7342 backplane setting */
  6396. { 0, 1, 12, 7 }, /* QME7342 backplane setting */
  6397. { 0, 1, 12, 8 }, /* QME7342 backplane setting */
  6398. { 0, 1, 0, 10 }, /* QMH7342 backplane settings */
  6399. { 0, 1, 0, 12 }, /* QMH7342 backplane settings */
  6400. };
  6401. static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
  6402. /* amp, pre, main, post */
  6403. { 0, 0, 0, 0 }, /* QME7342 mfg settings */
  6404. { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
  6405. };
  6406. static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
  6407. unsigned atten)
  6408. {
  6409. /*
  6410. * The attenuation table starts at 2dB for entry 1,
  6411. * with entry 0 being the loopback entry.
  6412. */
  6413. if (atten <= 2)
  6414. atten = 1;
  6415. else if (atten > TXDDS_TABLE_SZ)
  6416. atten = TXDDS_TABLE_SZ - 1;
  6417. else
  6418. atten--;
  6419. return txdds + atten;
  6420. }
  6421. /*
  6422. * if override is set, the module parameter txselect has a value
  6423. * for this specific port, so use it, rather than our normal mechanism.
  6424. */
  6425. static void find_best_ent(struct qib_pportdata *ppd,
  6426. const struct txdds_ent **sdr_dds,
  6427. const struct txdds_ent **ddr_dds,
  6428. const struct txdds_ent **qdr_dds, int override)
  6429. {
  6430. struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
  6431. int idx;
  6432. /* Search table of known cables */
  6433. for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
  6434. const struct vendor_txdds_ent *v = vendor_txdds + idx;
  6435. if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
  6436. (!v->partnum ||
  6437. !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
  6438. *sdr_dds = &v->sdr;
  6439. *ddr_dds = &v->ddr;
  6440. *qdr_dds = &v->qdr;
  6441. return;
  6442. }
  6443. }
  6444. /* Active cables don't have attenuation so we only set SERDES
  6445. * settings to account for the attenuation of the board traces. */
  6446. if (!override && QSFP_IS_ACTIVE(qd->tech)) {
  6447. *sdr_dds = txdds_sdr + ppd->dd->board_atten;
  6448. *ddr_dds = txdds_ddr + ppd->dd->board_atten;
  6449. *qdr_dds = txdds_qdr + ppd->dd->board_atten;
  6450. return;
  6451. }
  6452. if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
  6453. qd->atten[1])) {
  6454. *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
  6455. *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
  6456. *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
  6457. return;
  6458. } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
  6459. /*
  6460. * If we have no (or incomplete) data from the cable
  6461. * EEPROM, or no QSFP, or override is set, use the
  6462. * module parameter value to index into the attentuation
  6463. * table.
  6464. */
  6465. idx = ppd->cpspec->no_eep;
  6466. *sdr_dds = &txdds_sdr[idx];
  6467. *ddr_dds = &txdds_ddr[idx];
  6468. *qdr_dds = &txdds_qdr[idx];
  6469. } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
  6470. /* similar to above, but index into the "extra" table. */
  6471. idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
  6472. *sdr_dds = &txdds_extra_sdr[idx];
  6473. *ddr_dds = &txdds_extra_ddr[idx];
  6474. *qdr_dds = &txdds_extra_qdr[idx];
  6475. } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
  6476. ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
  6477. TXDDS_MFG_SZ)) {
  6478. idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
  6479. printk(KERN_INFO QIB_DRV_NAME
  6480. " IB%u:%u use idx %u into txdds_mfg\n",
  6481. ppd->dd->unit, ppd->port, idx);
  6482. *sdr_dds = &txdds_extra_mfg[idx];
  6483. *ddr_dds = &txdds_extra_mfg[idx];
  6484. *qdr_dds = &txdds_extra_mfg[idx];
  6485. } else {
  6486. /* this shouldn't happen, it's range checked */
  6487. *sdr_dds = txdds_sdr + qib_long_atten;
  6488. *ddr_dds = txdds_ddr + qib_long_atten;
  6489. *qdr_dds = txdds_qdr + qib_long_atten;
  6490. }
  6491. }
  6492. static void init_txdds_table(struct qib_pportdata *ppd, int override)
  6493. {
  6494. const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
  6495. struct txdds_ent *dds;
  6496. int idx;
  6497. int single_ent = 0;
  6498. find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
  6499. /* for mez cards or override, use the selected value for all entries */
  6500. if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
  6501. single_ent = 1;
  6502. /* Fill in the first entry with the best entry found. */
  6503. set_txdds(ppd, 0, sdr_dds);
  6504. set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
  6505. set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
  6506. if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
  6507. QIBL_LINKACTIVE)) {
  6508. dds = (struct txdds_ent *)(ppd->link_speed_active ==
  6509. QIB_IB_QDR ? qdr_dds :
  6510. (ppd->link_speed_active ==
  6511. QIB_IB_DDR ? ddr_dds : sdr_dds));
  6512. write_tx_serdes_param(ppd, dds);
  6513. }
  6514. /* Fill in the remaining entries with the default table values. */
  6515. for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
  6516. set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
  6517. set_txdds(ppd, idx + TXDDS_TABLE_SZ,
  6518. single_ent ? ddr_dds : txdds_ddr + idx);
  6519. set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
  6520. single_ent ? qdr_dds : txdds_qdr + idx);
  6521. }
  6522. }
  6523. #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
  6524. #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
  6525. #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
  6526. #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
  6527. #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
  6528. #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
  6529. #define AHB_TRANS_TRIES 10
  6530. /*
  6531. * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
  6532. * 5=subsystem which is why most calls have "chan + chan >> 1"
  6533. * for the channel argument.
  6534. */
  6535. static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
  6536. u32 data, u32 mask)
  6537. {
  6538. u32 rd_data, wr_data, sz_mask;
  6539. u64 trans, acc, prev_acc;
  6540. u32 ret = 0xBAD0BAD;
  6541. int tries;
  6542. prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
  6543. /* From this point on, make sure we return access */
  6544. acc = (quad << 1) | 1;
  6545. qib_write_kreg(dd, KR_AHB_ACC, acc);
  6546. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  6547. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  6548. if (trans & AHB_TRANS_RDY)
  6549. break;
  6550. }
  6551. if (tries >= AHB_TRANS_TRIES) {
  6552. qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
  6553. goto bail;
  6554. }
  6555. /* If mask is not all 1s, we need to read, but different SerDes
  6556. * entities have different sizes
  6557. */
  6558. sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
  6559. wr_data = data & mask & sz_mask;
  6560. if ((~mask & sz_mask) != 0) {
  6561. trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
  6562. qib_write_kreg(dd, KR_AHB_TRANS, trans);
  6563. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  6564. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  6565. if (trans & AHB_TRANS_RDY)
  6566. break;
  6567. }
  6568. if (tries >= AHB_TRANS_TRIES) {
  6569. qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
  6570. AHB_TRANS_TRIES);
  6571. goto bail;
  6572. }
  6573. /* Re-read in case host split reads and read data first */
  6574. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  6575. rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
  6576. wr_data |= (rd_data & ~mask & sz_mask);
  6577. }
  6578. /* If mask is not zero, we need to write. */
  6579. if (mask & sz_mask) {
  6580. trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
  6581. trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
  6582. trans |= AHB_WR;
  6583. qib_write_kreg(dd, KR_AHB_TRANS, trans);
  6584. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  6585. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  6586. if (trans & AHB_TRANS_RDY)
  6587. break;
  6588. }
  6589. if (tries >= AHB_TRANS_TRIES) {
  6590. qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
  6591. AHB_TRANS_TRIES);
  6592. goto bail;
  6593. }
  6594. }
  6595. ret = wr_data;
  6596. bail:
  6597. qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
  6598. return ret;
  6599. }
  6600. static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
  6601. unsigned mask)
  6602. {
  6603. struct qib_devdata *dd = ppd->dd;
  6604. int chan;
  6605. u32 rbc;
  6606. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  6607. ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
  6608. data, mask);
  6609. rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6610. addr, 0, 0);
  6611. }
  6612. }
  6613. static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
  6614. {
  6615. u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
  6616. u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
  6617. if (enable && !state) {
  6618. printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n",
  6619. ppd->dd->unit, ppd->port);
  6620. data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
  6621. } else if (!enable && state) {
  6622. printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n",
  6623. ppd->dd->unit, ppd->port);
  6624. data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
  6625. }
  6626. qib_write_kreg_port(ppd, krp_serdesctrl, data);
  6627. }
  6628. static int serdes_7322_init(struct qib_pportdata *ppd)
  6629. {
  6630. int ret = 0;
  6631. if (ppd->dd->cspec->r1)
  6632. ret = serdes_7322_init_old(ppd);
  6633. else
  6634. ret = serdes_7322_init_new(ppd);
  6635. return ret;
  6636. }
  6637. static int serdes_7322_init_old(struct qib_pportdata *ppd)
  6638. {
  6639. u32 le_val;
  6640. /*
  6641. * Initialize the Tx DDS tables. Also done every QSFP event,
  6642. * for adapters with QSFP
  6643. */
  6644. init_txdds_table(ppd, 0);
  6645. /* ensure no tx overrides from earlier driver loads */
  6646. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  6647. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6648. reset_tx_deemphasis_override));
  6649. /* Patch some SerDes defaults to "Better for IB" */
  6650. /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
  6651. ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
  6652. /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
  6653. ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
  6654. /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
  6655. ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
  6656. /* May be overridden in qsfp_7322_event */
  6657. le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
  6658. ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
  6659. /* enable LE1 adaptation for all but QME, which is disabled */
  6660. le_val = IS_QME(ppd->dd) ? 0 : 1;
  6661. ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
  6662. /* Clear cmode-override, may be set from older driver */
  6663. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
  6664. /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
  6665. ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
  6666. /* setup LoS params; these are subsystem, so chan == 5 */
  6667. /* LoS filter threshold_count on, ch 0-3, set to 8 */
  6668. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
  6669. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
  6670. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
  6671. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
  6672. /* LoS filter threshold_count off, ch 0-3, set to 4 */
  6673. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
  6674. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
  6675. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
  6676. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
  6677. /* LoS filter select enabled */
  6678. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
  6679. /* LoS target data: SDR=4, DDR=2, QDR=1 */
  6680. ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
  6681. ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
  6682. ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
  6683. serdes_7322_los_enable(ppd, 1);
  6684. /* rxbistena; set 0 to avoid effects of it switch later */
  6685. ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
  6686. /* Configure 4 DFE taps, and only they adapt */
  6687. ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
  6688. /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
  6689. le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
  6690. ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
  6691. /*
  6692. * Set receive adaptation mode. SDR and DDR adaptation are
  6693. * always on, and QDR is initially enabled; later disabled.
  6694. */
  6695. qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
  6696. qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
  6697. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  6698. ppd->dd->cspec->r1 ?
  6699. QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
  6700. ppd->cpspec->qdr_dfe_on = 1;
  6701. /* FLoop LOS gate: PPM filter enabled */
  6702. ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
  6703. /* rx offset center enabled */
  6704. ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
  6705. if (!ppd->dd->cspec->r1) {
  6706. ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
  6707. ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
  6708. }
  6709. /* Set the frequency loop bandwidth to 15 */
  6710. ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
  6711. return 0;
  6712. }
  6713. static int serdes_7322_init_new(struct qib_pportdata *ppd)
  6714. {
  6715. unsigned long tend;
  6716. u32 le_val, rxcaldone;
  6717. int chan, chan_done = (1 << SERDES_CHANS) - 1;
  6718. /* Clear cmode-override, may be set from older driver */
  6719. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
  6720. /* ensure no tx overrides from earlier driver loads */
  6721. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  6722. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6723. reset_tx_deemphasis_override));
  6724. /* START OF LSI SUGGESTED SERDES BRINGUP */
  6725. /* Reset - Calibration Setup */
  6726. /* Stop DFE adaptaion */
  6727. ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
  6728. /* Disable LE1 */
  6729. ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
  6730. /* Disable autoadapt for LE1 */
  6731. ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
  6732. /* Disable LE2 */
  6733. ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
  6734. /* Disable VGA */
  6735. ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
  6736. /* Disable AFE Offset Cancel */
  6737. ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
  6738. /* Disable Timing Loop */
  6739. ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
  6740. /* Disable Frequency Loop */
  6741. ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
  6742. /* Disable Baseline Wander Correction */
  6743. ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
  6744. /* Disable RX Calibration */
  6745. ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
  6746. /* Disable RX Offset Calibration */
  6747. ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
  6748. /* Select BB CDR */
  6749. ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
  6750. /* CDR Step Size */
  6751. ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
  6752. /* Enable phase Calibration */
  6753. ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
  6754. /* DFE Bandwidth [2:14-12] */
  6755. ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
  6756. /* DFE Config (4 taps only) */
  6757. ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
  6758. /* Gain Loop Bandwidth */
  6759. if (!ppd->dd->cspec->r1) {
  6760. ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
  6761. ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
  6762. } else {
  6763. ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
  6764. }
  6765. /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
  6766. /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
  6767. /* Data Rate Select [5:7-6] (leave as default) */
  6768. /* RX Parallel Word Width [3:10-8] (leave as default) */
  6769. /* RX REST */
  6770. /* Single- or Multi-channel reset */
  6771. /* RX Analog reset */
  6772. /* RX Digital reset */
  6773. ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
  6774. msleep(20);
  6775. /* RX Analog reset */
  6776. ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
  6777. msleep(20);
  6778. /* RX Digital reset */
  6779. ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
  6780. msleep(20);
  6781. /* setup LoS params; these are subsystem, so chan == 5 */
  6782. /* LoS filter threshold_count on, ch 0-3, set to 8 */
  6783. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
  6784. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
  6785. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
  6786. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
  6787. /* LoS filter threshold_count off, ch 0-3, set to 4 */
  6788. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
  6789. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
  6790. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
  6791. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
  6792. /* LoS filter select enabled */
  6793. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
  6794. /* LoS target data: SDR=4, DDR=2, QDR=1 */
  6795. ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
  6796. ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
  6797. ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
  6798. /* Turn on LOS on initial SERDES init */
  6799. serdes_7322_los_enable(ppd, 1);
  6800. /* FLoop LOS gate: PPM filter enabled */
  6801. ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
  6802. /* RX LATCH CALIBRATION */
  6803. /* Enable Eyefinder Phase Calibration latch */
  6804. ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
  6805. /* Enable RX Offset Calibration latch */
  6806. ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
  6807. msleep(20);
  6808. /* Start Calibration */
  6809. ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
  6810. tend = jiffies + msecs_to_jiffies(500);
  6811. while (chan_done && !time_is_before_jiffies(tend)) {
  6812. msleep(20);
  6813. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  6814. rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
  6815. (chan + (chan >> 1)),
  6816. 25, 0, 0);
  6817. if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
  6818. (~chan_done & (1 << chan)) == 0)
  6819. chan_done &= ~(1 << chan);
  6820. }
  6821. }
  6822. if (chan_done) {
  6823. printk(KERN_INFO QIB_DRV_NAME
  6824. " Serdes %d calibration not done after .5 sec: 0x%x\n",
  6825. IBSD(ppd->hw_pidx), chan_done);
  6826. } else {
  6827. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  6828. rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
  6829. (chan + (chan >> 1)),
  6830. 25, 0, 0);
  6831. if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
  6832. printk(KERN_INFO QIB_DRV_NAME
  6833. " Serdes %d chan %d calibration "
  6834. "failed\n", IBSD(ppd->hw_pidx), chan);
  6835. }
  6836. }
  6837. /* Turn off Calibration */
  6838. ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
  6839. msleep(20);
  6840. /* BRING RX UP */
  6841. /* Set LE2 value (May be overridden in qsfp_7322_event) */
  6842. le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
  6843. ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
  6844. /* Set LE2 Loop bandwidth */
  6845. ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
  6846. /* Enable LE2 */
  6847. ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
  6848. msleep(20);
  6849. /* Enable H0 only */
  6850. ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
  6851. /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
  6852. le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
  6853. ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
  6854. /* Enable VGA */
  6855. ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
  6856. msleep(20);
  6857. /* Set Frequency Loop Bandwidth */
  6858. ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
  6859. /* Enable Frequency Loop */
  6860. ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
  6861. /* Set Timing Loop Bandwidth */
  6862. ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
  6863. /* Enable Timing Loop */
  6864. ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
  6865. msleep(50);
  6866. /* Enable DFE
  6867. * Set receive adaptation mode. SDR and DDR adaptation are
  6868. * always on, and QDR is initially enabled; later disabled.
  6869. */
  6870. qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
  6871. qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
  6872. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  6873. ppd->dd->cspec->r1 ?
  6874. QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
  6875. ppd->cpspec->qdr_dfe_on = 1;
  6876. /* Disable LE1 */
  6877. ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
  6878. /* Disable auto adapt for LE1 */
  6879. ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
  6880. msleep(20);
  6881. /* Enable AFE Offset Cancel */
  6882. ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
  6883. /* Enable Baseline Wander Correction */
  6884. ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
  6885. /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
  6886. ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
  6887. /* VGA output common mode */
  6888. ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
  6889. /*
  6890. * Initialize the Tx DDS tables. Also done every QSFP event,
  6891. * for adapters with QSFP
  6892. */
  6893. init_txdds_table(ppd, 0);
  6894. return 0;
  6895. }
  6896. /* start adjust QMH serdes parameters */
  6897. static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
  6898. {
  6899. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6900. 9, code << 9, 0x3f << 9);
  6901. }
  6902. static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
  6903. int enable, u32 tapenable)
  6904. {
  6905. if (enable)
  6906. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6907. 1, 3 << 10, 0x1f << 10);
  6908. else
  6909. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6910. 1, 0, 0x1f << 10);
  6911. }
  6912. /* Set clock to 1, 0, 1, 0 */
  6913. static void clock_man(struct qib_pportdata *ppd, int chan)
  6914. {
  6915. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6916. 4, 0x4000, 0x4000);
  6917. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6918. 4, 0, 0x4000);
  6919. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6920. 4, 0x4000, 0x4000);
  6921. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6922. 4, 0, 0x4000);
  6923. }
  6924. /*
  6925. * write the current Tx serdes pre,post,main,amp settings into the serdes.
  6926. * The caller must pass the settings appropriate for the current speed,
  6927. * or not care if they are correct for the current speed.
  6928. */
  6929. static void write_tx_serdes_param(struct qib_pportdata *ppd,
  6930. struct txdds_ent *txdds)
  6931. {
  6932. u64 deemph;
  6933. deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
  6934. /* field names for amp, main, post, pre, respectively */
  6935. deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
  6936. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
  6937. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
  6938. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
  6939. deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6940. tx_override_deemphasis_select);
  6941. deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6942. txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6943. txampcntl_d2a);
  6944. deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6945. txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6946. txc0_ena);
  6947. deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6948. txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6949. txcp1_ena);
  6950. deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6951. txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6952. txcn1_ena);
  6953. qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
  6954. }
  6955. /*
  6956. * Set the parameters for mez cards on link bounce, so they are
  6957. * always exactly what was requested. Similar logic to init_txdds
  6958. * but does just the serdes.
  6959. */
  6960. static void adj_tx_serdes(struct qib_pportdata *ppd)
  6961. {
  6962. const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
  6963. struct txdds_ent *dds;
  6964. find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
  6965. dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
  6966. qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
  6967. ddr_dds : sdr_dds));
  6968. write_tx_serdes_param(ppd, dds);
  6969. }
  6970. /* set QDR forced value for H1, if needed */
  6971. static void force_h1(struct qib_pportdata *ppd)
  6972. {
  6973. int chan;
  6974. ppd->cpspec->qdr_reforce = 0;
  6975. if (!ppd->dd->cspec->r1)
  6976. return;
  6977. for (chan = 0; chan < SERDES_CHANS; chan++) {
  6978. set_man_mode_h1(ppd, chan, 1, 0);
  6979. set_man_code(ppd, chan, ppd->cpspec->h1_val);
  6980. clock_man(ppd, chan);
  6981. set_man_mode_h1(ppd, chan, 0, 0);
  6982. }
  6983. }
  6984. #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
  6985. #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
  6986. #define R_OPCODE_LSB 3
  6987. #define R_OP_NOP 0
  6988. #define R_OP_SHIFT 2
  6989. #define R_OP_UPDATE 3
  6990. #define R_TDI_LSB 2
  6991. #define R_TDO_LSB 1
  6992. #define R_RDY 1
  6993. static int qib_r_grab(struct qib_devdata *dd)
  6994. {
  6995. u64 val;
  6996. val = SJA_EN;
  6997. qib_write_kreg(dd, kr_r_access, val);
  6998. qib_read_kreg32(dd, kr_scratch);
  6999. return 0;
  7000. }
  7001. /* qib_r_wait_for_rdy() not only waits for the ready bit, it
  7002. * returns the current state of R_TDO
  7003. */
  7004. static int qib_r_wait_for_rdy(struct qib_devdata *dd)
  7005. {
  7006. u64 val;
  7007. int timeout;
  7008. for (timeout = 0; timeout < 100 ; ++timeout) {
  7009. val = qib_read_kreg32(dd, kr_r_access);
  7010. if (val & R_RDY)
  7011. return (val >> R_TDO_LSB) & 1;
  7012. }
  7013. return -1;
  7014. }
  7015. static int qib_r_shift(struct qib_devdata *dd, int bisten,
  7016. int len, u8 *inp, u8 *outp)
  7017. {
  7018. u64 valbase, val;
  7019. int ret, pos;
  7020. valbase = SJA_EN | (bisten << BISTEN_LSB) |
  7021. (R_OP_SHIFT << R_OPCODE_LSB);
  7022. ret = qib_r_wait_for_rdy(dd);
  7023. if (ret < 0)
  7024. goto bail;
  7025. for (pos = 0; pos < len; ++pos) {
  7026. val = valbase;
  7027. if (outp) {
  7028. outp[pos >> 3] &= ~(1 << (pos & 7));
  7029. outp[pos >> 3] |= (ret << (pos & 7));
  7030. }
  7031. if (inp) {
  7032. int tdi = inp[pos >> 3] >> (pos & 7);
  7033. val |= ((tdi & 1) << R_TDI_LSB);
  7034. }
  7035. qib_write_kreg(dd, kr_r_access, val);
  7036. qib_read_kreg32(dd, kr_scratch);
  7037. ret = qib_r_wait_for_rdy(dd);
  7038. if (ret < 0)
  7039. break;
  7040. }
  7041. /* Restore to NOP between operations. */
  7042. val = SJA_EN | (bisten << BISTEN_LSB);
  7043. qib_write_kreg(dd, kr_r_access, val);
  7044. qib_read_kreg32(dd, kr_scratch);
  7045. ret = qib_r_wait_for_rdy(dd);
  7046. if (ret >= 0)
  7047. ret = pos;
  7048. bail:
  7049. return ret;
  7050. }
  7051. static int qib_r_update(struct qib_devdata *dd, int bisten)
  7052. {
  7053. u64 val;
  7054. int ret;
  7055. val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
  7056. ret = qib_r_wait_for_rdy(dd);
  7057. if (ret >= 0) {
  7058. qib_write_kreg(dd, kr_r_access, val);
  7059. qib_read_kreg32(dd, kr_scratch);
  7060. }
  7061. return ret;
  7062. }
  7063. #define BISTEN_PORT_SEL 15
  7064. #define LEN_PORT_SEL 625
  7065. #define BISTEN_AT 17
  7066. #define LEN_AT 156
  7067. #define BISTEN_ETM 16
  7068. #define LEN_ETM 632
  7069. #define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
  7070. /* these are common for all IB port use cases. */
  7071. static u8 reset_at[BIT2BYTE(LEN_AT)] = {
  7072. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7073. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
  7074. };
  7075. static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
  7076. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7077. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7078. 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
  7079. 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
  7080. 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
  7081. 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
  7082. 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7083. 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
  7084. };
  7085. static u8 at[BIT2BYTE(LEN_AT)] = {
  7086. 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
  7087. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
  7088. };
  7089. /* used for IB1 or IB2, only one in use */
  7090. static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
  7091. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7092. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7093. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7094. 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
  7095. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7096. 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
  7097. 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
  7098. 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
  7099. };
  7100. /* used when both IB1 and IB2 are in use */
  7101. static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
  7102. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7103. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
  7104. 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7105. 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
  7106. 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
  7107. 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
  7108. 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
  7109. 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
  7110. };
  7111. /* used when only IB1 is in use */
  7112. static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
  7113. 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
  7114. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
  7115. 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7116. 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7117. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
  7118. 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7119. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7120. 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
  7121. };
  7122. /* used when only IB2 is in use */
  7123. static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
  7124. 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
  7125. 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
  7126. 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
  7127. 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
  7128. 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
  7129. 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
  7130. 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
  7131. 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
  7132. };
  7133. /* used when both IB1 and IB2 are in use */
  7134. static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
  7135. 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
  7136. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
  7137. 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7138. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7139. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
  7140. 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
  7141. 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7142. 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
  7143. };
  7144. /*
  7145. * Do setup to properly handle IB link recovery; if port is zero, we
  7146. * are initializing to cover both ports; otherwise we are initializing
  7147. * to cover a single port card, or the port has reached INIT and we may
  7148. * need to switch coverage types.
  7149. */
  7150. static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
  7151. {
  7152. u8 *portsel, *etm;
  7153. struct qib_devdata *dd = ppd->dd;
  7154. if (!ppd->dd->cspec->r1)
  7155. return;
  7156. if (!both) {
  7157. dd->cspec->recovery_ports_initted++;
  7158. ppd->cpspec->recovery_init = 1;
  7159. }
  7160. if (!both && dd->cspec->recovery_ports_initted == 1) {
  7161. portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
  7162. etm = atetm_1port;
  7163. } else {
  7164. portsel = portsel_2port;
  7165. etm = atetm_2port;
  7166. }
  7167. if (qib_r_grab(dd) < 0 ||
  7168. qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
  7169. qib_r_update(dd, BISTEN_ETM) < 0 ||
  7170. qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
  7171. qib_r_update(dd, BISTEN_AT) < 0 ||
  7172. qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
  7173. portsel, NULL) < 0 ||
  7174. qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
  7175. qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
  7176. qib_r_update(dd, BISTEN_AT) < 0 ||
  7177. qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
  7178. qib_r_update(dd, BISTEN_ETM) < 0)
  7179. qib_dev_err(dd, "Failed IB link recovery setup\n");
  7180. }
  7181. static void check_7322_rxe_status(struct qib_pportdata *ppd)
  7182. {
  7183. struct qib_devdata *dd = ppd->dd;
  7184. u64 fmask;
  7185. if (dd->cspec->recovery_ports_initted != 1)
  7186. return; /* rest doesn't apply to dualport */
  7187. qib_write_kreg(dd, kr_control, dd->control |
  7188. SYM_MASK(Control, FreezeMode));
  7189. (void)qib_read_kreg64(dd, kr_scratch);
  7190. udelay(3); /* ibcreset asserted 400ns, be sure that's over */
  7191. fmask = qib_read_kreg64(dd, kr_act_fmask);
  7192. if (!fmask) {
  7193. /*
  7194. * require a powercycle before we'll work again, and make
  7195. * sure we get no more interrupts, and don't turn off
  7196. * freeze.
  7197. */
  7198. ppd->dd->cspec->stay_in_freeze = 1;
  7199. qib_7322_set_intr_state(ppd->dd, 0);
  7200. qib_write_kreg(dd, kr_fmask, 0ULL);
  7201. qib_dev_err(dd, "HCA unusable until powercycled\n");
  7202. return; /* eventually reset */
  7203. }
  7204. qib_write_kreg(ppd->dd, kr_hwerrclear,
  7205. SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
  7206. /* don't do the full clear_freeze(), not needed for this */
  7207. qib_write_kreg(dd, kr_control, dd->control);
  7208. qib_read_kreg32(dd, kr_scratch);
  7209. /* take IBC out of reset */
  7210. if (ppd->link_speed_supported) {
  7211. ppd->cpspec->ibcctrl_a &=
  7212. ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  7213. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  7214. ppd->cpspec->ibcctrl_a);
  7215. qib_read_kreg32(dd, kr_scratch);
  7216. if (ppd->lflags & QIBL_IB_LINK_DISABLED)
  7217. qib_set_ib_7322_lstate(ppd, 0,
  7218. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  7219. }
  7220. }