qib_iba7322.c 265 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557
  1. /*
  2. * Copyright (c) 2012 Intel Corporation. All rights reserved.
  3. * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. /*
  34. * This file contains all of the code that is specific to the
  35. * InfiniPath 7322 chip
  36. */
  37. #include <linux/interrupt.h>
  38. #include <linux/pci.h>
  39. #include <linux/delay.h>
  40. #include <linux/io.h>
  41. #include <linux/jiffies.h>
  42. #include <linux/module.h>
  43. #include <rdma/ib_verbs.h>
  44. #include <rdma/ib_smi.h>
  45. #ifdef CONFIG_INFINIBAND_QIB_DCA
  46. #include <linux/dca.h>
  47. #endif
  48. #include "qib.h"
  49. #include "qib_7322_regs.h"
  50. #include "qib_qsfp.h"
  51. #include "qib_mad.h"
  52. #include "qib_verbs.h"
  53. #undef pr_fmt
  54. #define pr_fmt(fmt) QIB_DRV_NAME " " fmt
  55. static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
  56. static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
  57. static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
  58. static irqreturn_t qib_7322intr(int irq, void *data);
  59. static irqreturn_t qib_7322bufavail(int irq, void *data);
  60. static irqreturn_t sdma_intr(int irq, void *data);
  61. static irqreturn_t sdma_idle_intr(int irq, void *data);
  62. static irqreturn_t sdma_progress_intr(int irq, void *data);
  63. static irqreturn_t sdma_cleanup_intr(int irq, void *data);
  64. static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
  65. struct qib_ctxtdata *rcd);
  66. static u8 qib_7322_phys_portstate(u64);
  67. static u32 qib_7322_iblink_state(u64);
  68. static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  69. u16 linitcmd);
  70. static void force_h1(struct qib_pportdata *);
  71. static void adj_tx_serdes(struct qib_pportdata *);
  72. static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
  73. static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
  74. static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
  75. static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
  76. static void serdes_7322_los_enable(struct qib_pportdata *, int);
  77. static int serdes_7322_init_old(struct qib_pportdata *);
  78. static int serdes_7322_init_new(struct qib_pportdata *);
  79. static void dump_sdma_7322_state(struct qib_pportdata *);
  80. #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
  81. /* LE2 serdes values for different cases */
  82. #define LE2_DEFAULT 5
  83. #define LE2_5m 4
  84. #define LE2_QME 0
  85. /* Below is special-purpose, so only really works for the IB SerDes blocks. */
  86. #define IBSD(hw_pidx) (hw_pidx + 2)
  87. /* these are variables for documentation and experimentation purposes */
  88. static const unsigned rcv_int_timeout = 375;
  89. static const unsigned rcv_int_count = 16;
  90. static const unsigned sdma_idle_cnt = 64;
  91. /* Time to stop altering Rx Equalization parameters, after link up. */
  92. #define RXEQ_DISABLE_MSECS 2500
  93. /*
  94. * Number of VLs we are configured to use (to allow for more
  95. * credits per vl, etc.)
  96. */
  97. ushort qib_num_cfg_vls = 2;
  98. module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
  99. MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
  100. static ushort qib_chase = 1;
  101. module_param_named(chase, qib_chase, ushort, S_IRUGO);
  102. MODULE_PARM_DESC(chase, "Enable state chase handling");
  103. static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
  104. module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
  105. MODULE_PARM_DESC(long_attenuation, \
  106. "attenuation cutoff (dB) for long copper cable setup");
  107. static ushort qib_singleport;
  108. module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
  109. MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
  110. static ushort qib_krcvq01_no_msi;
  111. module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
  112. MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
  113. /*
  114. * Receive header queue sizes
  115. */
  116. static unsigned qib_rcvhdrcnt;
  117. module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
  118. MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
  119. static unsigned qib_rcvhdrsize;
  120. module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
  121. MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
  122. static unsigned qib_rcvhdrentsize;
  123. module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
  124. MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
  125. #define MAX_ATTEN_LEN 64 /* plenty for any real system */
  126. /* for read back, default index is ~5m copper cable */
  127. static char txselect_list[MAX_ATTEN_LEN] = "10";
  128. static struct kparam_string kp_txselect = {
  129. .string = txselect_list,
  130. .maxlen = MAX_ATTEN_LEN
  131. };
  132. static int setup_txselect(const char *, struct kernel_param *);
  133. module_param_call(txselect, setup_txselect, param_get_string,
  134. &kp_txselect, S_IWUSR | S_IRUGO);
  135. MODULE_PARM_DESC(txselect, \
  136. "Tx serdes indices (for no QSFP or invalid QSFP data)");
  137. #define BOARD_QME7342 5
  138. #define BOARD_QMH7342 6
  139. #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
  140. BOARD_QMH7342)
  141. #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
  142. BOARD_QME7342)
  143. #define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
  144. #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
  145. #define MASK_ACROSS(lsb, msb) \
  146. (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
  147. #define SYM_RMASK(regname, fldname) ((u64) \
  148. QIB_7322_##regname##_##fldname##_RMASK)
  149. #define SYM_MASK(regname, fldname) ((u64) \
  150. QIB_7322_##regname##_##fldname##_RMASK << \
  151. QIB_7322_##regname##_##fldname##_LSB)
  152. #define SYM_FIELD(value, regname, fldname) ((u64) \
  153. (((value) >> SYM_LSB(regname, fldname)) & \
  154. SYM_RMASK(regname, fldname)))
  155. /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
  156. #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
  157. (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
  158. #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
  159. #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
  160. #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
  161. #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
  162. #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
  163. /* Below because most, but not all, fields of IntMask have that full suffix */
  164. #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
  165. #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
  166. /*
  167. * the size bits give us 2^N, in KB units. 0 marks as invalid,
  168. * and 7 is reserved. We currently use only 2KB and 4KB
  169. */
  170. #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
  171. #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
  172. #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
  173. #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
  174. #define SendIBSLIDAssignMask \
  175. QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
  176. #define SendIBSLMCMask \
  177. QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
  178. #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
  179. #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
  180. #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
  181. #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
  182. #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
  183. #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
  184. #define _QIB_GPIO_SDA_NUM 1
  185. #define _QIB_GPIO_SCL_NUM 0
  186. #define QIB_EEPROM_WEN_NUM 14
  187. #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
  188. /* HW counter clock is at 4nsec */
  189. #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
  190. /* full speed IB port 1 only */
  191. #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
  192. #define PORT_SPD_CAP_SHIFT 3
  193. /* full speed featuremask, both ports */
  194. #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
  195. /*
  196. * This file contains almost all the chip-specific register information and
  197. * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
  198. */
  199. /* Use defines to tie machine-generated names to lower-case names */
  200. #define kr_contextcnt KREG_IDX(ContextCnt)
  201. #define kr_control KREG_IDX(Control)
  202. #define kr_counterregbase KREG_IDX(CntrRegBase)
  203. #define kr_errclear KREG_IDX(ErrClear)
  204. #define kr_errmask KREG_IDX(ErrMask)
  205. #define kr_errstatus KREG_IDX(ErrStatus)
  206. #define kr_extctrl KREG_IDX(EXTCtrl)
  207. #define kr_extstatus KREG_IDX(EXTStatus)
  208. #define kr_gpio_clear KREG_IDX(GPIOClear)
  209. #define kr_gpio_mask KREG_IDX(GPIOMask)
  210. #define kr_gpio_out KREG_IDX(GPIOOut)
  211. #define kr_gpio_status KREG_IDX(GPIOStatus)
  212. #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
  213. #define kr_debugportval KREG_IDX(DebugPortValueReg)
  214. #define kr_fmask KREG_IDX(feature_mask)
  215. #define kr_act_fmask KREG_IDX(active_feature_mask)
  216. #define kr_hwerrclear KREG_IDX(HwErrClear)
  217. #define kr_hwerrmask KREG_IDX(HwErrMask)
  218. #define kr_hwerrstatus KREG_IDX(HwErrStatus)
  219. #define kr_intclear KREG_IDX(IntClear)
  220. #define kr_intmask KREG_IDX(IntMask)
  221. #define kr_intredirect KREG_IDX(IntRedirect0)
  222. #define kr_intstatus KREG_IDX(IntStatus)
  223. #define kr_pagealign KREG_IDX(PageAlign)
  224. #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
  225. #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
  226. #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
  227. #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
  228. #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
  229. #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
  230. #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
  231. #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
  232. #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
  233. #define kr_revision KREG_IDX(Revision)
  234. #define kr_scratch KREG_IDX(Scratch)
  235. #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
  236. #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
  237. #define kr_sendctrl KREG_IDX(SendCtrl)
  238. #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
  239. #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
  240. #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
  241. #define kr_sendpiobufbase KREG_IDX(SendBufBase)
  242. #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
  243. #define kr_sendpiosize KREG_IDX(SendBufSize)
  244. #define kr_sendregbase KREG_IDX(SendRegBase)
  245. #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
  246. #define kr_userregbase KREG_IDX(UserRegBase)
  247. #define kr_intgranted KREG_IDX(Int_Granted)
  248. #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
  249. #define kr_intblocked KREG_IDX(IntBlocked)
  250. #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
  251. /*
  252. * per-port kernel registers. Access only with qib_read_kreg_port()
  253. * or qib_write_kreg_port()
  254. */
  255. #define krp_errclear KREG_IBPORT_IDX(ErrClear)
  256. #define krp_errmask KREG_IBPORT_IDX(ErrMask)
  257. #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
  258. #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
  259. #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
  260. #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
  261. #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
  262. #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
  263. #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
  264. #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
  265. #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
  266. #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
  267. #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
  268. #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
  269. #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
  270. #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
  271. #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
  272. #define krp_psstart KREG_IBPORT_IDX(PSStart)
  273. #define krp_psstat KREG_IBPORT_IDX(PSStat)
  274. #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
  275. #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
  276. #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
  277. #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
  278. #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
  279. #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
  280. #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
  281. #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
  282. #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
  283. #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
  284. #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
  285. #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
  286. #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
  287. #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
  288. #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
  289. #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
  290. #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
  291. #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
  292. #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
  293. #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
  294. #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
  295. #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
  296. #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
  297. #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
  298. #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
  299. #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
  300. #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
  301. #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
  302. #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
  303. #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
  304. #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
  305. /*
  306. * Per-context kernel registers. Access only with qib_read_kreg_ctxt()
  307. * or qib_write_kreg_ctxt()
  308. */
  309. #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
  310. #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
  311. /*
  312. * TID Flow table, per context. Reduces
  313. * number of hdrq updates to one per flow (or on errors).
  314. * context 0 and 1 share same memory, but have distinct
  315. * addresses. Since for now, we never use expected sends
  316. * on kernel contexts, we don't worry about that (we initialize
  317. * those entries for ctxt 0/1 on driver load twice, for example).
  318. */
  319. #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
  320. #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
  321. /* these are the error bits in the tid flows, and are W1C */
  322. #define TIDFLOW_ERRBITS ( \
  323. (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
  324. SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
  325. (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
  326. SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
  327. /* Most (not all) Counters are per-IBport.
  328. * Requires LBIntCnt is at offset 0 in the group
  329. */
  330. #define CREG_IDX(regname) \
  331. ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
  332. #define crp_badformat CREG_IDX(RxVersionErrCnt)
  333. #define crp_err_rlen CREG_IDX(RxLenErrCnt)
  334. #define crp_erricrc CREG_IDX(RxICRCErrCnt)
  335. #define crp_errlink CREG_IDX(RxLinkMalformCnt)
  336. #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
  337. #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
  338. #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
  339. #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
  340. #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
  341. #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
  342. #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
  343. #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
  344. #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
  345. #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
  346. #define crp_pktrcv CREG_IDX(RxDataPktCnt)
  347. #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
  348. #define crp_pktsend CREG_IDX(TxDataPktCnt)
  349. #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
  350. #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
  351. #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
  352. #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
  353. #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
  354. #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
  355. #define crp_rcvebp CREG_IDX(RxEBPCnt)
  356. #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
  357. #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
  358. #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
  359. #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
  360. #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
  361. #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
  362. #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
  363. #define crp_sendstall CREG_IDX(TxFlowStallCnt)
  364. #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
  365. #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
  366. #define crp_txlenerr CREG_IDX(TxLenErrCnt)
  367. #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
  368. #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
  369. #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
  370. #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
  371. #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
  372. #define crp_wordrcv CREG_IDX(RxDwordCnt)
  373. #define crp_wordsend CREG_IDX(TxDwordCnt)
  374. #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
  375. /* these are the (few) counters that are not port-specific */
  376. #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
  377. QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
  378. #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
  379. #define cr_lbint CREG_DEVIDX(LBIntCnt)
  380. #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
  381. #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
  382. #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
  383. #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
  384. #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
  385. /* no chip register for # of IB ports supported, so define */
  386. #define NUM_IB_PORTS 2
  387. /* 1 VL15 buffer per hardware IB port, no register for this, so define */
  388. #define NUM_VL15_BUFS NUM_IB_PORTS
  389. /*
  390. * context 0 and 1 are special, and there is no chip register that
  391. * defines this value, so we have to define it here.
  392. * These are all allocated to either 0 or 1 for single port
  393. * hardware configuration, otherwise each gets half
  394. */
  395. #define KCTXT0_EGRCNT 2048
  396. /* values for vl and port fields in PBC, 7322-specific */
  397. #define PBC_PORT_SEL_LSB 26
  398. #define PBC_PORT_SEL_RMASK 1
  399. #define PBC_VL_NUM_LSB 27
  400. #define PBC_VL_NUM_RMASK 7
  401. #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
  402. #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
  403. static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
  404. [IB_RATE_2_5_GBPS] = 16,
  405. [IB_RATE_5_GBPS] = 8,
  406. [IB_RATE_10_GBPS] = 4,
  407. [IB_RATE_20_GBPS] = 2,
  408. [IB_RATE_30_GBPS] = 2,
  409. [IB_RATE_40_GBPS] = 1
  410. };
  411. #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
  412. #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
  413. /* link training states, from IBC */
  414. #define IB_7322_LT_STATE_DISABLED 0x00
  415. #define IB_7322_LT_STATE_LINKUP 0x01
  416. #define IB_7322_LT_STATE_POLLACTIVE 0x02
  417. #define IB_7322_LT_STATE_POLLQUIET 0x03
  418. #define IB_7322_LT_STATE_SLEEPDELAY 0x04
  419. #define IB_7322_LT_STATE_SLEEPQUIET 0x05
  420. #define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
  421. #define IB_7322_LT_STATE_CFGRCVFCFG 0x09
  422. #define IB_7322_LT_STATE_CFGWAITRMT 0x0a
  423. #define IB_7322_LT_STATE_CFGIDLE 0x0b
  424. #define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
  425. #define IB_7322_LT_STATE_TXREVLANES 0x0d
  426. #define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
  427. #define IB_7322_LT_STATE_RECOVERIDLE 0x0f
  428. #define IB_7322_LT_STATE_CFGENH 0x10
  429. #define IB_7322_LT_STATE_CFGTEST 0x11
  430. #define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
  431. #define IB_7322_LT_STATE_CFGWAITENH 0x13
  432. /* link state machine states from IBC */
  433. #define IB_7322_L_STATE_DOWN 0x0
  434. #define IB_7322_L_STATE_INIT 0x1
  435. #define IB_7322_L_STATE_ARM 0x2
  436. #define IB_7322_L_STATE_ACTIVE 0x3
  437. #define IB_7322_L_STATE_ACT_DEFER 0x4
  438. static const u8 qib_7322_physportstate[0x20] = {
  439. [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
  440. [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
  441. [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
  442. [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
  443. [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
  444. [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
  445. [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
  446. [IB_7322_LT_STATE_CFGRCVFCFG] =
  447. IB_PHYSPORTSTATE_CFG_TRAIN,
  448. [IB_7322_LT_STATE_CFGWAITRMT] =
  449. IB_PHYSPORTSTATE_CFG_TRAIN,
  450. [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
  451. [IB_7322_LT_STATE_RECOVERRETRAIN] =
  452. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  453. [IB_7322_LT_STATE_RECOVERWAITRMT] =
  454. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  455. [IB_7322_LT_STATE_RECOVERIDLE] =
  456. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  457. [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
  458. [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
  459. [IB_7322_LT_STATE_CFGWAITRMTTEST] =
  460. IB_PHYSPORTSTATE_CFG_TRAIN,
  461. [IB_7322_LT_STATE_CFGWAITENH] =
  462. IB_PHYSPORTSTATE_CFG_WAIT_ENH,
  463. [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
  464. [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
  465. [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
  466. [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
  467. };
  468. #ifdef CONFIG_INFINIBAND_QIB_DCA
  469. struct qib_irq_notify {
  470. int rcv;
  471. void *arg;
  472. struct irq_affinity_notify notify;
  473. };
  474. #endif
  475. struct qib_chip_specific {
  476. u64 __iomem *cregbase;
  477. u64 *cntrs;
  478. spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
  479. spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
  480. u64 main_int_mask; /* clear bits which have dedicated handlers */
  481. u64 int_enable_mask; /* for per port interrupts in single port mode */
  482. u64 errormask;
  483. u64 hwerrmask;
  484. u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
  485. u64 gpio_mask; /* shadow the gpio mask register */
  486. u64 extctrl; /* shadow the gpio output enable, etc... */
  487. u32 ncntrs;
  488. u32 nportcntrs;
  489. u32 cntrnamelen;
  490. u32 portcntrnamelen;
  491. u32 numctxts;
  492. u32 rcvegrcnt;
  493. u32 updthresh; /* current AvailUpdThld */
  494. u32 updthresh_dflt; /* default AvailUpdThld */
  495. u32 r1;
  496. int irq;
  497. u32 num_msix_entries;
  498. u32 sdmabufcnt;
  499. u32 lastbuf_for_pio;
  500. u32 stay_in_freeze;
  501. u32 recovery_ports_initted;
  502. #ifdef CONFIG_INFINIBAND_QIB_DCA
  503. u32 dca_ctrl;
  504. int rhdr_cpu[18];
  505. int sdma_cpu[2];
  506. u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
  507. #endif
  508. struct qib_msix_entry *msix_entries;
  509. unsigned long *sendchkenable;
  510. unsigned long *sendgrhchk;
  511. unsigned long *sendibchk;
  512. u32 rcvavail_timeout[18];
  513. char emsgbuf[128]; /* for device error interrupt msg buffer */
  514. };
  515. /* Table of entries in "human readable" form Tx Emphasis. */
  516. struct txdds_ent {
  517. u8 amp;
  518. u8 pre;
  519. u8 main;
  520. u8 post;
  521. };
  522. struct vendor_txdds_ent {
  523. u8 oui[QSFP_VOUI_LEN];
  524. u8 *partnum;
  525. struct txdds_ent sdr;
  526. struct txdds_ent ddr;
  527. struct txdds_ent qdr;
  528. };
  529. static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
  530. #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
  531. #define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
  532. #define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
  533. #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
  534. #define H1_FORCE_VAL 8
  535. #define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
  536. #define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
  537. /* The static and dynamic registers are paired, and the pairs indexed by spd */
  538. #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
  539. + ((spd) * 2))
  540. #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
  541. #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
  542. #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
  543. #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
  544. #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
  545. struct qib_chippport_specific {
  546. u64 __iomem *kpregbase;
  547. u64 __iomem *cpregbase;
  548. u64 *portcntrs;
  549. struct qib_pportdata *ppd;
  550. wait_queue_head_t autoneg_wait;
  551. struct delayed_work autoneg_work;
  552. struct delayed_work ipg_work;
  553. struct timer_list chase_timer;
  554. /*
  555. * these 5 fields are used to establish deltas for IB symbol
  556. * errors and linkrecovery errors. They can be reported on
  557. * some chips during link negotiation prior to INIT, and with
  558. * DDR when faking DDR negotiations with non-IBTA switches.
  559. * The chip counters are adjusted at driver unload if there is
  560. * a non-zero delta.
  561. */
  562. u64 ibdeltainprog;
  563. u64 ibsymdelta;
  564. u64 ibsymsnap;
  565. u64 iblnkerrdelta;
  566. u64 iblnkerrsnap;
  567. u64 iblnkdownsnap;
  568. u64 iblnkdowndelta;
  569. u64 ibmalfdelta;
  570. u64 ibmalfsnap;
  571. u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
  572. u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
  573. unsigned long qdr_dfe_time;
  574. unsigned long chase_end;
  575. u32 autoneg_tries;
  576. u32 recovery_init;
  577. u32 qdr_dfe_on;
  578. u32 qdr_reforce;
  579. /*
  580. * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
  581. * entry zero is unused, to simplify indexing
  582. */
  583. u8 h1_val;
  584. u8 no_eep; /* txselect table index to use if no qsfp info */
  585. u8 ipg_tries;
  586. u8 ibmalfusesnap;
  587. struct qib_qsfp_data qsfp_data;
  588. char epmsgbuf[192]; /* for port error interrupt msg buffer */
  589. char sdmamsgbuf[192]; /* for per-port sdma error messages */
  590. };
  591. static struct {
  592. const char *name;
  593. irq_handler_t handler;
  594. int lsb;
  595. int port; /* 0 if not port-specific, else port # */
  596. int dca;
  597. } irq_table[] = {
  598. { "", qib_7322intr, -1, 0, 0 },
  599. { " (buf avail)", qib_7322bufavail,
  600. SYM_LSB(IntStatus, SendBufAvail), 0, 0},
  601. { " (sdma 0)", sdma_intr,
  602. SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
  603. { " (sdma 1)", sdma_intr,
  604. SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
  605. { " (sdmaI 0)", sdma_idle_intr,
  606. SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
  607. { " (sdmaI 1)", sdma_idle_intr,
  608. SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
  609. { " (sdmaP 0)", sdma_progress_intr,
  610. SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
  611. { " (sdmaP 1)", sdma_progress_intr,
  612. SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
  613. { " (sdmaC 0)", sdma_cleanup_intr,
  614. SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
  615. { " (sdmaC 1)", sdma_cleanup_intr,
  616. SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
  617. };
  618. #ifdef CONFIG_INFINIBAND_QIB_DCA
  619. static const struct dca_reg_map {
  620. int shadow_inx;
  621. int lsb;
  622. u64 mask;
  623. u16 regno;
  624. } dca_rcvhdr_reg_map[] = {
  625. { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
  626. ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
  627. { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
  628. ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
  629. { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
  630. ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
  631. { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
  632. ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
  633. { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
  634. ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
  635. { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
  636. ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
  637. { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
  638. ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
  639. { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
  640. ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
  641. { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
  642. ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
  643. { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
  644. ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
  645. { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
  646. ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
  647. { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
  648. ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
  649. { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
  650. ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
  651. { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
  652. ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
  653. { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
  654. ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
  655. { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
  656. ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
  657. { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
  658. ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
  659. { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
  660. ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
  661. };
  662. #endif
  663. /* ibcctrl bits */
  664. #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
  665. /* cycle through TS1/TS2 till OK */
  666. #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
  667. /* wait for TS1, then go on */
  668. #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
  669. #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
  670. #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
  671. #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
  672. #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
  673. #define BLOB_7322_IBCHG 0x101
  674. static inline void qib_write_kreg(const struct qib_devdata *dd,
  675. const u32 regno, u64 value);
  676. static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
  677. static void write_7322_initregs(struct qib_devdata *);
  678. static void write_7322_init_portregs(struct qib_pportdata *);
  679. static void setup_7322_link_recovery(struct qib_pportdata *, u32);
  680. static void check_7322_rxe_status(struct qib_pportdata *);
  681. static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
  682. #ifdef CONFIG_INFINIBAND_QIB_DCA
  683. static void qib_setup_dca(struct qib_devdata *dd);
  684. static void setup_dca_notifier(struct qib_devdata *dd,
  685. struct qib_msix_entry *m);
  686. static void reset_dca_notifier(struct qib_devdata *dd,
  687. struct qib_msix_entry *m);
  688. #endif
  689. /**
  690. * qib_read_ureg32 - read 32-bit virtualized per-context register
  691. * @dd: device
  692. * @regno: register number
  693. * @ctxt: context number
  694. *
  695. * Return the contents of a register that is virtualized to be per context.
  696. * Returns -1 on errors (not distinguishable from valid contents at
  697. * runtime; we may add a separate error variable at some point).
  698. */
  699. static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
  700. enum qib_ureg regno, int ctxt)
  701. {
  702. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  703. return 0;
  704. return readl(regno + (u64 __iomem *)(
  705. (dd->ureg_align * ctxt) + (dd->userbase ?
  706. (char __iomem *)dd->userbase :
  707. (char __iomem *)dd->kregbase + dd->uregbase)));
  708. }
  709. /**
  710. * qib_read_ureg - read virtualized per-context register
  711. * @dd: device
  712. * @regno: register number
  713. * @ctxt: context number
  714. *
  715. * Return the contents of a register that is virtualized to be per context.
  716. * Returns -1 on errors (not distinguishable from valid contents at
  717. * runtime; we may add a separate error variable at some point).
  718. */
  719. static inline u64 qib_read_ureg(const struct qib_devdata *dd,
  720. enum qib_ureg regno, int ctxt)
  721. {
  722. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  723. return 0;
  724. return readq(regno + (u64 __iomem *)(
  725. (dd->ureg_align * ctxt) + (dd->userbase ?
  726. (char __iomem *)dd->userbase :
  727. (char __iomem *)dd->kregbase + dd->uregbase)));
  728. }
  729. /**
  730. * qib_write_ureg - write virtualized per-context register
  731. * @dd: device
  732. * @regno: register number
  733. * @value: value
  734. * @ctxt: context
  735. *
  736. * Write the contents of a register that is virtualized to be per context.
  737. */
  738. static inline void qib_write_ureg(const struct qib_devdata *dd,
  739. enum qib_ureg regno, u64 value, int ctxt)
  740. {
  741. u64 __iomem *ubase;
  742. if (dd->userbase)
  743. ubase = (u64 __iomem *)
  744. ((char __iomem *) dd->userbase +
  745. dd->ureg_align * ctxt);
  746. else
  747. ubase = (u64 __iomem *)
  748. (dd->uregbase +
  749. (char __iomem *) dd->kregbase +
  750. dd->ureg_align * ctxt);
  751. if (dd->kregbase && (dd->flags & QIB_PRESENT))
  752. writeq(value, &ubase[regno]);
  753. }
  754. static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
  755. const u32 regno)
  756. {
  757. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  758. return -1;
  759. return readl((u32 __iomem *) &dd->kregbase[regno]);
  760. }
  761. static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
  762. const u32 regno)
  763. {
  764. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  765. return -1;
  766. return readq(&dd->kregbase[regno]);
  767. }
  768. static inline void qib_write_kreg(const struct qib_devdata *dd,
  769. const u32 regno, u64 value)
  770. {
  771. if (dd->kregbase && (dd->flags & QIB_PRESENT))
  772. writeq(value, &dd->kregbase[regno]);
  773. }
  774. /*
  775. * not many sanity checks for the port-specific kernel register routines,
  776. * since they are only used when it's known to be safe.
  777. */
  778. static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
  779. const u16 regno)
  780. {
  781. if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
  782. return 0ULL;
  783. return readq(&ppd->cpspec->kpregbase[regno]);
  784. }
  785. static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
  786. const u16 regno, u64 value)
  787. {
  788. if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
  789. (ppd->dd->flags & QIB_PRESENT))
  790. writeq(value, &ppd->cpspec->kpregbase[regno]);
  791. }
  792. /**
  793. * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
  794. * @dd: the qlogic_ib device
  795. * @regno: the register number to write
  796. * @ctxt: the context containing the register
  797. * @value: the value to write
  798. */
  799. static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
  800. const u16 regno, unsigned ctxt,
  801. u64 value)
  802. {
  803. qib_write_kreg(dd, regno + ctxt, value);
  804. }
  805. static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
  806. {
  807. if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
  808. return 0;
  809. return readq(&dd->cspec->cregbase[regno]);
  810. }
  811. static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
  812. {
  813. if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
  814. return 0;
  815. return readl(&dd->cspec->cregbase[regno]);
  816. }
  817. static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
  818. u16 regno, u64 value)
  819. {
  820. if (ppd->cpspec && ppd->cpspec->cpregbase &&
  821. (ppd->dd->flags & QIB_PRESENT))
  822. writeq(value, &ppd->cpspec->cpregbase[regno]);
  823. }
  824. static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
  825. u16 regno)
  826. {
  827. if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
  828. !(ppd->dd->flags & QIB_PRESENT))
  829. return 0;
  830. return readq(&ppd->cpspec->cpregbase[regno]);
  831. }
  832. static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
  833. u16 regno)
  834. {
  835. if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
  836. !(ppd->dd->flags & QIB_PRESENT))
  837. return 0;
  838. return readl(&ppd->cpspec->cpregbase[regno]);
  839. }
  840. /* bits in Control register */
  841. #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
  842. #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
  843. /* bits in general interrupt regs */
  844. #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
  845. #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
  846. #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
  847. #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
  848. #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
  849. #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
  850. #define QIB_I_C_ERROR INT_MASK(Err)
  851. #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
  852. #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
  853. #define QIB_I_GPIO INT_MASK(AssertGPIO)
  854. #define QIB_I_P_SDMAINT(pidx) \
  855. (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
  856. INT_MASK_P(SDmaProgress, pidx) | \
  857. INT_MASK_PM(SDmaCleanupDone, pidx))
  858. /* Interrupt bits that are "per port" */
  859. #define QIB_I_P_BITSEXTANT(pidx) \
  860. (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
  861. INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
  862. INT_MASK_P(SDmaProgress, pidx) | \
  863. INT_MASK_PM(SDmaCleanupDone, pidx))
  864. /* Interrupt bits that are common to a device */
  865. /* currently unused: QIB_I_SPIOSENT */
  866. #define QIB_I_C_BITSEXTANT \
  867. (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
  868. QIB_I_SPIOSENT | \
  869. QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
  870. #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
  871. QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
  872. /*
  873. * Error bits that are "per port".
  874. */
  875. #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
  876. #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
  877. #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
  878. #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
  879. #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
  880. #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
  881. #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
  882. #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
  883. #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
  884. #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
  885. #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
  886. #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
  887. #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
  888. #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
  889. #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
  890. #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
  891. #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
  892. #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
  893. #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
  894. #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
  895. #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
  896. #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
  897. #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
  898. #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
  899. #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
  900. #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
  901. #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
  902. #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
  903. #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
  904. #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
  905. #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
  906. #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
  907. #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
  908. #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
  909. #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
  910. #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
  911. #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
  912. #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
  913. #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
  914. /* Error bits that are common to a device */
  915. #define QIB_E_RESET ERR_MASK(ResetNegated)
  916. #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
  917. #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
  918. /*
  919. * Per chip (rather than per-port) errors. Most either do
  920. * nothing but trigger a print (because they self-recover, or
  921. * always occur in tandem with other errors that handle the
  922. * issue), or because they indicate errors with no recovery,
  923. * but we want to know that they happened.
  924. */
  925. #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
  926. #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
  927. #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
  928. #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
  929. #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
  930. #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
  931. #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
  932. #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
  933. /* SDMA chip errors (not per port)
  934. * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
  935. * the SDMAHALT error immediately, so we just print the dup error via the
  936. * E_AUTO mechanism. This is true of most of the per-port fatal errors
  937. * as well, but since this is port-independent, by definition, it's
  938. * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
  939. * packet send errors, and so are handled in the same manner as other
  940. * per-packet errors.
  941. */
  942. #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
  943. #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
  944. #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
  945. /*
  946. * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
  947. * it is used to print "common" packet errors.
  948. */
  949. #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
  950. QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
  951. QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
  952. QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
  953. QIB_E_P_REBP)
  954. /* Error Bits that Packet-related (Receive, per-port) */
  955. #define QIB_E_P_RPKTERRS (\
  956. QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
  957. QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
  958. QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
  959. QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
  960. QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
  961. QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
  962. /*
  963. * Error bits that are Send-related (per port)
  964. * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
  965. * All of these potentially need to have a buffer disarmed
  966. */
  967. #define QIB_E_P_SPKTERRS (\
  968. QIB_E_P_SUNEXP_PKTNUM |\
  969. QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
  970. QIB_E_P_SMAXPKTLEN |\
  971. QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
  972. QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
  973. QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
  974. #define QIB_E_SPKTERRS ( \
  975. QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
  976. ERR_MASK_N(SendUnsupportedVLErr) | \
  977. QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
  978. #define QIB_E_P_SDMAERRS ( \
  979. QIB_E_P_SDMAHALT | \
  980. QIB_E_P_SDMADESCADDRMISALIGN | \
  981. QIB_E_P_SDMAUNEXPDATA | \
  982. QIB_E_P_SDMAMISSINGDW | \
  983. QIB_E_P_SDMADWEN | \
  984. QIB_E_P_SDMARPYTAG | \
  985. QIB_E_P_SDMA1STDESC | \
  986. QIB_E_P_SDMABASE | \
  987. QIB_E_P_SDMATAILOUTOFBOUND | \
  988. QIB_E_P_SDMAOUTOFBOUND | \
  989. QIB_E_P_SDMAGENMISMATCH)
  990. /*
  991. * This sets some bits more than once, but makes it more obvious which
  992. * bits are not handled under other categories, and the repeat definition
  993. * is not a problem.
  994. */
  995. #define QIB_E_P_BITSEXTANT ( \
  996. QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
  997. QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
  998. QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
  999. QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
  1000. )
  1001. /*
  1002. * These are errors that can occur when the link
  1003. * changes state while a packet is being sent or received. This doesn't
  1004. * cover things like EBP or VCRC that can be the result of a sending
  1005. * having the link change state, so we receive a "known bad" packet.
  1006. * All of these are "per port", so renamed:
  1007. */
  1008. #define QIB_E_P_LINK_PKTERRS (\
  1009. QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
  1010. QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
  1011. QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
  1012. QIB_E_P_RUNEXPCHAR)
  1013. /*
  1014. * This sets some bits more than once, but makes it more obvious which
  1015. * bits are not handled under other categories (such as QIB_E_SPKTERRS),
  1016. * and the repeat definition is not a problem.
  1017. */
  1018. #define QIB_E_C_BITSEXTANT (\
  1019. QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
  1020. QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
  1021. QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
  1022. /* Likewise Neuter E_SPKT_ERRS_IGNORE */
  1023. #define E_SPKT_ERRS_IGNORE 0
  1024. #define QIB_EXTS_MEMBIST_DISABLED \
  1025. SYM_MASK(EXTStatus, MemBISTDisabled)
  1026. #define QIB_EXTS_MEMBIST_ENDTEST \
  1027. SYM_MASK(EXTStatus, MemBISTEndTest)
  1028. #define QIB_E_SPIOARMLAUNCH \
  1029. ERR_MASK(SendArmLaunchErr)
  1030. #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
  1031. #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
  1032. /*
  1033. * IBTA_1_2 is set when multiple speeds are enabled (normal),
  1034. * and also if forced QDR (only QDR enabled). It's enabled for the
  1035. * forced QDR case so that scrambling will be enabled by the TS3
  1036. * exchange, when supported by both sides of the link.
  1037. */
  1038. #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
  1039. #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
  1040. #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
  1041. #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
  1042. #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
  1043. #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
  1044. SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
  1045. #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
  1046. #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
  1047. #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
  1048. #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
  1049. #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
  1050. #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
  1051. #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
  1052. #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
  1053. #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
  1054. SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
  1055. #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
  1056. SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
  1057. #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
  1058. #define IBA7322_REDIRECT_VEC_PER_REG 12
  1059. #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
  1060. #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
  1061. #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
  1062. #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
  1063. #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
  1064. #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
  1065. #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
  1066. .msg = #fldname , .sz = sizeof(#fldname) }
  1067. #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
  1068. fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
  1069. static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
  1070. HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
  1071. HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
  1072. HWE_AUTO(PCIESerdesPClkNotDetect),
  1073. HWE_AUTO(PowerOnBISTFailed),
  1074. HWE_AUTO(TempsenseTholdReached),
  1075. HWE_AUTO(MemoryErr),
  1076. HWE_AUTO(PCIeBusParityErr),
  1077. HWE_AUTO(PcieCplTimeout),
  1078. HWE_AUTO(PciePoisonedTLP),
  1079. HWE_AUTO_P(SDmaMemReadErr, 1),
  1080. HWE_AUTO_P(SDmaMemReadErr, 0),
  1081. HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
  1082. HWE_AUTO_P(IBCBusToSPCParityErr, 1),
  1083. HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
  1084. HWE_AUTO(statusValidNoEop),
  1085. HWE_AUTO(LATriggered),
  1086. { .mask = 0, .sz = 0 }
  1087. };
  1088. #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
  1089. .msg = #fldname, .sz = sizeof(#fldname) }
  1090. #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
  1091. .msg = #fldname, .sz = sizeof(#fldname) }
  1092. static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
  1093. E_AUTO(RcvEgrFullErr),
  1094. E_AUTO(RcvHdrFullErr),
  1095. E_AUTO(ResetNegated),
  1096. E_AUTO(HardwareErr),
  1097. E_AUTO(InvalidAddrErr),
  1098. E_AUTO(SDmaVL15Err),
  1099. E_AUTO(SBufVL15MisUseErr),
  1100. E_AUTO(InvalidEEPCmd),
  1101. E_AUTO(RcvContextShareErr),
  1102. E_AUTO(SendVLMismatchErr),
  1103. E_AUTO(SendArmLaunchErr),
  1104. E_AUTO(SendSpecialTriggerErr),
  1105. E_AUTO(SDmaWrongPortErr),
  1106. E_AUTO(SDmaBufMaskDuplicateErr),
  1107. { .mask = 0, .sz = 0 }
  1108. };
  1109. static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
  1110. E_P_AUTO(IBStatusChanged),
  1111. E_P_AUTO(SHeadersErr),
  1112. E_P_AUTO(VL15BufMisuseErr),
  1113. /*
  1114. * SDmaHaltErr is not really an error, make it clearer;
  1115. */
  1116. {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
  1117. .sz = 11},
  1118. E_P_AUTO(SDmaDescAddrMisalignErr),
  1119. E_P_AUTO(SDmaUnexpDataErr),
  1120. E_P_AUTO(SDmaMissingDwErr),
  1121. E_P_AUTO(SDmaDwEnErr),
  1122. E_P_AUTO(SDmaRpyTagErr),
  1123. E_P_AUTO(SDma1stDescErr),
  1124. E_P_AUTO(SDmaBaseErr),
  1125. E_P_AUTO(SDmaTailOutOfBoundErr),
  1126. E_P_AUTO(SDmaOutOfBoundErr),
  1127. E_P_AUTO(SDmaGenMismatchErr),
  1128. E_P_AUTO(SendBufMisuseErr),
  1129. E_P_AUTO(SendUnsupportedVLErr),
  1130. E_P_AUTO(SendUnexpectedPktNumErr),
  1131. E_P_AUTO(SendDroppedDataPktErr),
  1132. E_P_AUTO(SendDroppedSmpPktErr),
  1133. E_P_AUTO(SendPktLenErr),
  1134. E_P_AUTO(SendUnderRunErr),
  1135. E_P_AUTO(SendMaxPktLenErr),
  1136. E_P_AUTO(SendMinPktLenErr),
  1137. E_P_AUTO(RcvIBLostLinkErr),
  1138. E_P_AUTO(RcvHdrErr),
  1139. E_P_AUTO(RcvHdrLenErr),
  1140. E_P_AUTO(RcvBadTidErr),
  1141. E_P_AUTO(RcvBadVersionErr),
  1142. E_P_AUTO(RcvIBFlowErr),
  1143. E_P_AUTO(RcvEBPErr),
  1144. E_P_AUTO(RcvUnsupportedVLErr),
  1145. E_P_AUTO(RcvUnexpectedCharErr),
  1146. E_P_AUTO(RcvShortPktLenErr),
  1147. E_P_AUTO(RcvLongPktLenErr),
  1148. E_P_AUTO(RcvMaxPktLenErr),
  1149. E_P_AUTO(RcvMinPktLenErr),
  1150. E_P_AUTO(RcvICRCErr),
  1151. E_P_AUTO(RcvVCRCErr),
  1152. E_P_AUTO(RcvFormatErr),
  1153. { .mask = 0, .sz = 0 }
  1154. };
  1155. /*
  1156. * Below generates "auto-message" for interrupts not specific to any port or
  1157. * context
  1158. */
  1159. #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
  1160. .msg = #fldname, .sz = sizeof(#fldname) }
  1161. /* Below generates "auto-message" for interrupts specific to a port */
  1162. #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
  1163. SYM_LSB(IntMask, fldname##Mask##_0), \
  1164. SYM_LSB(IntMask, fldname##Mask##_1)), \
  1165. .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
  1166. /* For some reason, the SerDesTrimDone bits are reversed */
  1167. #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
  1168. SYM_LSB(IntMask, fldname##Mask##_1), \
  1169. SYM_LSB(IntMask, fldname##Mask##_0)), \
  1170. .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
  1171. /*
  1172. * Below generates "auto-message" for interrupts specific to a context,
  1173. * with ctxt-number appended
  1174. */
  1175. #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
  1176. SYM_LSB(IntMask, fldname##0IntMask), \
  1177. SYM_LSB(IntMask, fldname##17IntMask)), \
  1178. .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
  1179. static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
  1180. INTR_AUTO_P(SDmaInt),
  1181. INTR_AUTO_P(SDmaProgressInt),
  1182. INTR_AUTO_P(SDmaIdleInt),
  1183. INTR_AUTO_P(SDmaCleanupDone),
  1184. INTR_AUTO_C(RcvUrg),
  1185. INTR_AUTO_P(ErrInt),
  1186. INTR_AUTO(ErrInt), /* non-port-specific errs */
  1187. INTR_AUTO(AssertGPIOInt),
  1188. INTR_AUTO_P(SendDoneInt),
  1189. INTR_AUTO(SendBufAvailInt),
  1190. INTR_AUTO_C(RcvAvail),
  1191. { .mask = 0, .sz = 0 }
  1192. };
  1193. #define TXSYMPTOM_AUTO_P(fldname) \
  1194. { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
  1195. .msg = #fldname, .sz = sizeof(#fldname) }
  1196. static const struct qib_hwerror_msgs hdrchk_msgs[] = {
  1197. TXSYMPTOM_AUTO_P(NonKeyPacket),
  1198. TXSYMPTOM_AUTO_P(GRHFail),
  1199. TXSYMPTOM_AUTO_P(PkeyFail),
  1200. TXSYMPTOM_AUTO_P(QPFail),
  1201. TXSYMPTOM_AUTO_P(SLIDFail),
  1202. TXSYMPTOM_AUTO_P(RawIPV6),
  1203. TXSYMPTOM_AUTO_P(PacketTooSmall),
  1204. { .mask = 0, .sz = 0 }
  1205. };
  1206. #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
  1207. /*
  1208. * Called when we might have an error that is specific to a particular
  1209. * PIO buffer, and may need to cancel that buffer, so it can be re-used,
  1210. * because we don't need to force the update of pioavail
  1211. */
  1212. static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
  1213. {
  1214. struct qib_devdata *dd = ppd->dd;
  1215. u32 i;
  1216. int any;
  1217. u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  1218. u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
  1219. unsigned long sbuf[4];
  1220. /*
  1221. * It's possible that sendbuffererror could have bits set; might
  1222. * have already done this as a result of hardware error handling.
  1223. */
  1224. any = 0;
  1225. for (i = 0; i < regcnt; ++i) {
  1226. sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
  1227. if (sbuf[i]) {
  1228. any = 1;
  1229. qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
  1230. }
  1231. }
  1232. if (any)
  1233. qib_disarm_piobufs_set(dd, sbuf, piobcnt);
  1234. }
  1235. /* No txe_recover yet, if ever */
  1236. /* No decode__errors yet */
  1237. static void err_decode(char *msg, size_t len, u64 errs,
  1238. const struct qib_hwerror_msgs *msp)
  1239. {
  1240. u64 these, lmask;
  1241. int took, multi, n = 0;
  1242. while (errs && msp && msp->mask) {
  1243. multi = (msp->mask & (msp->mask - 1));
  1244. while (errs & msp->mask) {
  1245. these = (errs & msp->mask);
  1246. lmask = (these & (these - 1)) ^ these;
  1247. if (len) {
  1248. if (n++) {
  1249. /* separate the strings */
  1250. *msg++ = ',';
  1251. len--;
  1252. }
  1253. BUG_ON(!msp->sz);
  1254. /* msp->sz counts the nul */
  1255. took = min_t(size_t, msp->sz - (size_t)1, len);
  1256. memcpy(msg, msp->msg, took);
  1257. len -= took;
  1258. msg += took;
  1259. if (len)
  1260. *msg = '\0';
  1261. }
  1262. errs &= ~lmask;
  1263. if (len && multi) {
  1264. /* More than one bit this mask */
  1265. int idx = -1;
  1266. while (lmask & msp->mask) {
  1267. ++idx;
  1268. lmask >>= 1;
  1269. }
  1270. took = scnprintf(msg, len, "_%d", idx);
  1271. len -= took;
  1272. msg += took;
  1273. }
  1274. }
  1275. ++msp;
  1276. }
  1277. /* If some bits are left, show in hex. */
  1278. if (len && errs)
  1279. snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
  1280. (unsigned long long) errs);
  1281. }
  1282. /* only called if r1 set */
  1283. static void flush_fifo(struct qib_pportdata *ppd)
  1284. {
  1285. struct qib_devdata *dd = ppd->dd;
  1286. u32 __iomem *piobuf;
  1287. u32 bufn;
  1288. u32 *hdr;
  1289. u64 pbc;
  1290. const unsigned hdrwords = 7;
  1291. static struct qib_ib_header ibhdr = {
  1292. .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
  1293. .lrh[1] = IB_LID_PERMISSIVE,
  1294. .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
  1295. .lrh[3] = IB_LID_PERMISSIVE,
  1296. .u.oth.bth[0] = cpu_to_be32(
  1297. (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
  1298. .u.oth.bth[1] = cpu_to_be32(0),
  1299. .u.oth.bth[2] = cpu_to_be32(0),
  1300. .u.oth.u.ud.deth[0] = cpu_to_be32(0),
  1301. .u.oth.u.ud.deth[1] = cpu_to_be32(0),
  1302. };
  1303. /*
  1304. * Send a dummy VL15 packet to flush the launch FIFO.
  1305. * This will not actually be sent since the TxeBypassIbc bit is set.
  1306. */
  1307. pbc = PBC_7322_VL15_SEND |
  1308. (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
  1309. (hdrwords + SIZE_OF_CRC);
  1310. piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
  1311. if (!piobuf)
  1312. return;
  1313. writeq(pbc, piobuf);
  1314. hdr = (u32 *) &ibhdr;
  1315. if (dd->flags & QIB_PIO_FLUSH_WC) {
  1316. qib_flush_wc();
  1317. qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
  1318. qib_flush_wc();
  1319. __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
  1320. qib_flush_wc();
  1321. } else
  1322. qib_pio_copy(piobuf + 2, hdr, hdrwords);
  1323. qib_sendbuf_done(dd, bufn);
  1324. }
  1325. /*
  1326. * This is called with interrupts disabled and sdma_lock held.
  1327. */
  1328. static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
  1329. {
  1330. struct qib_devdata *dd = ppd->dd;
  1331. u64 set_sendctrl = 0;
  1332. u64 clr_sendctrl = 0;
  1333. if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
  1334. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
  1335. else
  1336. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
  1337. if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
  1338. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
  1339. else
  1340. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
  1341. if (op & QIB_SDMA_SENDCTRL_OP_HALT)
  1342. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
  1343. else
  1344. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
  1345. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
  1346. set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
  1347. SYM_MASK(SendCtrl_0, TxeAbortIbc) |
  1348. SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
  1349. else
  1350. clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
  1351. SYM_MASK(SendCtrl_0, TxeAbortIbc) |
  1352. SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
  1353. spin_lock(&dd->sendctrl_lock);
  1354. /* If we are draining everything, block sends first */
  1355. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
  1356. ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
  1357. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1358. qib_write_kreg(dd, kr_scratch, 0);
  1359. }
  1360. ppd->p_sendctrl |= set_sendctrl;
  1361. ppd->p_sendctrl &= ~clr_sendctrl;
  1362. if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
  1363. qib_write_kreg_port(ppd, krp_sendctrl,
  1364. ppd->p_sendctrl |
  1365. SYM_MASK(SendCtrl_0, SDmaCleanup));
  1366. else
  1367. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1368. qib_write_kreg(dd, kr_scratch, 0);
  1369. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
  1370. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
  1371. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1372. qib_write_kreg(dd, kr_scratch, 0);
  1373. }
  1374. spin_unlock(&dd->sendctrl_lock);
  1375. if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
  1376. flush_fifo(ppd);
  1377. }
  1378. static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
  1379. {
  1380. __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
  1381. }
  1382. static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
  1383. {
  1384. /*
  1385. * Set SendDmaLenGen and clear and set
  1386. * the MSB of the generation count to enable generation checking
  1387. * and load the internal generation counter.
  1388. */
  1389. qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
  1390. qib_write_kreg_port(ppd, krp_senddmalengen,
  1391. ppd->sdma_descq_cnt |
  1392. (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
  1393. }
  1394. /*
  1395. * Must be called with sdma_lock held, or before init finished.
  1396. */
  1397. static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
  1398. {
  1399. /* Commit writes to memory and advance the tail on the chip */
  1400. wmb();
  1401. ppd->sdma_descq_tail = tail;
  1402. qib_write_kreg_port(ppd, krp_senddmatail, tail);
  1403. }
  1404. /*
  1405. * This is called with interrupts disabled and sdma_lock held.
  1406. */
  1407. static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
  1408. {
  1409. /*
  1410. * Drain all FIFOs.
  1411. * The hardware doesn't require this but we do it so that verbs
  1412. * and user applications don't wait for link active to send stale
  1413. * data.
  1414. */
  1415. sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
  1416. qib_sdma_7322_setlengen(ppd);
  1417. qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
  1418. ppd->sdma_head_dma[0] = 0;
  1419. qib_7322_sdma_sendctrl(ppd,
  1420. ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
  1421. }
  1422. #define DISABLES_SDMA ( \
  1423. QIB_E_P_SDMAHALT | \
  1424. QIB_E_P_SDMADESCADDRMISALIGN | \
  1425. QIB_E_P_SDMAMISSINGDW | \
  1426. QIB_E_P_SDMADWEN | \
  1427. QIB_E_P_SDMARPYTAG | \
  1428. QIB_E_P_SDMA1STDESC | \
  1429. QIB_E_P_SDMABASE | \
  1430. QIB_E_P_SDMATAILOUTOFBOUND | \
  1431. QIB_E_P_SDMAOUTOFBOUND | \
  1432. QIB_E_P_SDMAGENMISMATCH)
  1433. static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
  1434. {
  1435. unsigned long flags;
  1436. struct qib_devdata *dd = ppd->dd;
  1437. errs &= QIB_E_P_SDMAERRS;
  1438. if (errs & QIB_E_P_SDMAUNEXPDATA)
  1439. qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
  1440. ppd->port);
  1441. spin_lock_irqsave(&ppd->sdma_lock, flags);
  1442. if (errs != QIB_E_P_SDMAHALT) {
  1443. /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
  1444. qib_dev_porterr(dd, ppd->port,
  1445. "SDMA %s 0x%016llx %s\n",
  1446. qib_sdma_state_names[ppd->sdma_state.current_state],
  1447. errs, ppd->cpspec->sdmamsgbuf);
  1448. dump_sdma_7322_state(ppd);
  1449. }
  1450. switch (ppd->sdma_state.current_state) {
  1451. case qib_sdma_state_s00_hw_down:
  1452. break;
  1453. case qib_sdma_state_s10_hw_start_up_wait:
  1454. if (errs & QIB_E_P_SDMAHALT)
  1455. __qib_sdma_process_event(ppd,
  1456. qib_sdma_event_e20_hw_started);
  1457. break;
  1458. case qib_sdma_state_s20_idle:
  1459. break;
  1460. case qib_sdma_state_s30_sw_clean_up_wait:
  1461. break;
  1462. case qib_sdma_state_s40_hw_clean_up_wait:
  1463. if (errs & QIB_E_P_SDMAHALT)
  1464. __qib_sdma_process_event(ppd,
  1465. qib_sdma_event_e50_hw_cleaned);
  1466. break;
  1467. case qib_sdma_state_s50_hw_halt_wait:
  1468. if (errs & QIB_E_P_SDMAHALT)
  1469. __qib_sdma_process_event(ppd,
  1470. qib_sdma_event_e60_hw_halted);
  1471. break;
  1472. case qib_sdma_state_s99_running:
  1473. __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
  1474. __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
  1475. break;
  1476. }
  1477. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  1478. }
  1479. /*
  1480. * handle per-device errors (not per-port errors)
  1481. */
  1482. static noinline void handle_7322_errors(struct qib_devdata *dd)
  1483. {
  1484. char *msg;
  1485. u64 iserr = 0;
  1486. u64 errs;
  1487. u64 mask;
  1488. int log_idx;
  1489. qib_stats.sps_errints++;
  1490. errs = qib_read_kreg64(dd, kr_errstatus);
  1491. if (!errs) {
  1492. qib_devinfo(dd->pcidev,
  1493. "device error interrupt, but no error bits set!\n");
  1494. goto done;
  1495. }
  1496. /* don't report errors that are masked */
  1497. errs &= dd->cspec->errormask;
  1498. msg = dd->cspec->emsgbuf;
  1499. /* do these first, they are most important */
  1500. if (errs & QIB_E_HARDWARE) {
  1501. *msg = '\0';
  1502. qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
  1503. } else
  1504. for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
  1505. if (errs & dd->eep_st_masks[log_idx].errs_to_log)
  1506. qib_inc_eeprom_err(dd, log_idx, 1);
  1507. if (errs & QIB_E_SPKTERRS) {
  1508. qib_disarm_7322_senderrbufs(dd->pport);
  1509. qib_stats.sps_txerrs++;
  1510. } else if (errs & QIB_E_INVALIDADDR)
  1511. qib_stats.sps_txerrs++;
  1512. else if (errs & QIB_E_ARMLAUNCH) {
  1513. qib_stats.sps_txerrs++;
  1514. qib_disarm_7322_senderrbufs(dd->pport);
  1515. }
  1516. qib_write_kreg(dd, kr_errclear, errs);
  1517. /*
  1518. * The ones we mask off are handled specially below
  1519. * or above. Also mask SDMADISABLED by default as it
  1520. * is too chatty.
  1521. */
  1522. mask = QIB_E_HARDWARE;
  1523. *msg = '\0';
  1524. err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
  1525. qib_7322error_msgs);
  1526. /*
  1527. * Getting reset is a tragedy for all ports. Mark the device
  1528. * _and_ the ports as "offline" in way meaningful to each.
  1529. */
  1530. if (errs & QIB_E_RESET) {
  1531. int pidx;
  1532. qib_dev_err(dd,
  1533. "Got reset, requires re-init (unload and reload driver)\n");
  1534. dd->flags &= ~QIB_INITTED; /* needs re-init */
  1535. /* mark as having had error */
  1536. *dd->devstatusp |= QIB_STATUS_HWERROR;
  1537. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  1538. if (dd->pport[pidx].link_speed_supported)
  1539. *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
  1540. }
  1541. if (*msg && iserr)
  1542. qib_dev_err(dd, "%s error\n", msg);
  1543. /*
  1544. * If there were hdrq or egrfull errors, wake up any processes
  1545. * waiting in poll. We used to try to check which contexts had
  1546. * the overflow, but given the cost of that and the chip reads
  1547. * to support it, it's better to just wake everybody up if we
  1548. * get an overflow; waiters can poll again if it's not them.
  1549. */
  1550. if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
  1551. qib_handle_urcv(dd, ~0U);
  1552. if (errs & ERR_MASK(RcvEgrFullErr))
  1553. qib_stats.sps_buffull++;
  1554. else
  1555. qib_stats.sps_hdrfull++;
  1556. }
  1557. done:
  1558. return;
  1559. }
  1560. static void qib_error_tasklet(unsigned long data)
  1561. {
  1562. struct qib_devdata *dd = (struct qib_devdata *)data;
  1563. handle_7322_errors(dd);
  1564. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  1565. }
  1566. static void reenable_chase(unsigned long opaque)
  1567. {
  1568. struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
  1569. ppd->cpspec->chase_timer.expires = 0;
  1570. qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
  1571. QLOGIC_IB_IBCC_LINKINITCMD_POLL);
  1572. }
  1573. static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
  1574. u8 ibclt)
  1575. {
  1576. ppd->cpspec->chase_end = 0;
  1577. if (!qib_chase)
  1578. return;
  1579. qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
  1580. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  1581. ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
  1582. add_timer(&ppd->cpspec->chase_timer);
  1583. }
  1584. static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
  1585. {
  1586. u8 ibclt;
  1587. unsigned long tnow;
  1588. ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
  1589. /*
  1590. * Detect and handle the state chase issue, where we can
  1591. * get stuck if we are unlucky on timing on both sides of
  1592. * the link. If we are, we disable, set a timer, and
  1593. * then re-enable.
  1594. */
  1595. switch (ibclt) {
  1596. case IB_7322_LT_STATE_CFGRCVFCFG:
  1597. case IB_7322_LT_STATE_CFGWAITRMT:
  1598. case IB_7322_LT_STATE_TXREVLANES:
  1599. case IB_7322_LT_STATE_CFGENH:
  1600. tnow = jiffies;
  1601. if (ppd->cpspec->chase_end &&
  1602. time_after(tnow, ppd->cpspec->chase_end))
  1603. disable_chase(ppd, tnow, ibclt);
  1604. else if (!ppd->cpspec->chase_end)
  1605. ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
  1606. break;
  1607. default:
  1608. ppd->cpspec->chase_end = 0;
  1609. break;
  1610. }
  1611. if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
  1612. ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
  1613. ibclt == IB_7322_LT_STATE_LINKUP) &&
  1614. (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
  1615. force_h1(ppd);
  1616. ppd->cpspec->qdr_reforce = 1;
  1617. if (!ppd->dd->cspec->r1)
  1618. serdes_7322_los_enable(ppd, 0);
  1619. } else if (ppd->cpspec->qdr_reforce &&
  1620. (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
  1621. (ibclt == IB_7322_LT_STATE_CFGENH ||
  1622. ibclt == IB_7322_LT_STATE_CFGIDLE ||
  1623. ibclt == IB_7322_LT_STATE_LINKUP))
  1624. force_h1(ppd);
  1625. if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
  1626. ppd->link_speed_enabled == QIB_IB_QDR &&
  1627. (ibclt == IB_7322_LT_STATE_CFGTEST ||
  1628. ibclt == IB_7322_LT_STATE_CFGENH ||
  1629. (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
  1630. ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
  1631. adj_tx_serdes(ppd);
  1632. if (ibclt != IB_7322_LT_STATE_LINKUP) {
  1633. u8 ltstate = qib_7322_phys_portstate(ibcst);
  1634. u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
  1635. LinkTrainingState);
  1636. if (!ppd->dd->cspec->r1 &&
  1637. pibclt == IB_7322_LT_STATE_LINKUP &&
  1638. ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
  1639. ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
  1640. ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
  1641. ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
  1642. /* If the link went down (but no into recovery,
  1643. * turn LOS back on */
  1644. serdes_7322_los_enable(ppd, 1);
  1645. if (!ppd->cpspec->qdr_dfe_on &&
  1646. ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
  1647. ppd->cpspec->qdr_dfe_on = 1;
  1648. ppd->cpspec->qdr_dfe_time = 0;
  1649. /* On link down, reenable QDR adaptation */
  1650. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  1651. ppd->dd->cspec->r1 ?
  1652. QDR_STATIC_ADAPT_DOWN_R1 :
  1653. QDR_STATIC_ADAPT_DOWN);
  1654. pr_info(
  1655. "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
  1656. ppd->dd->unit, ppd->port, ibclt);
  1657. }
  1658. }
  1659. }
  1660. static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
  1661. /*
  1662. * This is per-pport error handling.
  1663. * will likely get it's own MSIx interrupt (one for each port,
  1664. * although just a single handler).
  1665. */
  1666. static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
  1667. {
  1668. char *msg;
  1669. u64 ignore_this_time = 0, iserr = 0, errs, fmask;
  1670. struct qib_devdata *dd = ppd->dd;
  1671. /* do this as soon as possible */
  1672. fmask = qib_read_kreg64(dd, kr_act_fmask);
  1673. if (!fmask)
  1674. check_7322_rxe_status(ppd);
  1675. errs = qib_read_kreg_port(ppd, krp_errstatus);
  1676. if (!errs)
  1677. qib_devinfo(dd->pcidev,
  1678. "Port%d error interrupt, but no error bits set!\n",
  1679. ppd->port);
  1680. if (!fmask)
  1681. errs &= ~QIB_E_P_IBSTATUSCHANGED;
  1682. if (!errs)
  1683. goto done;
  1684. msg = ppd->cpspec->epmsgbuf;
  1685. *msg = '\0';
  1686. if (errs & ~QIB_E_P_BITSEXTANT) {
  1687. err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
  1688. errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
  1689. if (!*msg)
  1690. snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
  1691. "no others");
  1692. qib_dev_porterr(dd, ppd->port,
  1693. "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
  1694. (errs & ~QIB_E_P_BITSEXTANT), msg);
  1695. *msg = '\0';
  1696. }
  1697. if (errs & QIB_E_P_SHDR) {
  1698. u64 symptom;
  1699. /* determine cause, then write to clear */
  1700. symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
  1701. qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
  1702. err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
  1703. hdrchk_msgs);
  1704. *msg = '\0';
  1705. /* senderrbuf cleared in SPKTERRS below */
  1706. }
  1707. if (errs & QIB_E_P_SPKTERRS) {
  1708. if ((errs & QIB_E_P_LINK_PKTERRS) &&
  1709. !(ppd->lflags & QIBL_LINKACTIVE)) {
  1710. /*
  1711. * This can happen when trying to bring the link
  1712. * up, but the IB link changes state at the "wrong"
  1713. * time. The IB logic then complains that the packet
  1714. * isn't valid. We don't want to confuse people, so
  1715. * we just don't print them, except at debug
  1716. */
  1717. err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
  1718. (errs & QIB_E_P_LINK_PKTERRS),
  1719. qib_7322p_error_msgs);
  1720. *msg = '\0';
  1721. ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
  1722. }
  1723. qib_disarm_7322_senderrbufs(ppd);
  1724. } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
  1725. !(ppd->lflags & QIBL_LINKACTIVE)) {
  1726. /*
  1727. * This can happen when SMA is trying to bring the link
  1728. * up, but the IB link changes state at the "wrong" time.
  1729. * The IB logic then complains that the packet isn't
  1730. * valid. We don't want to confuse people, so we just
  1731. * don't print them, except at debug
  1732. */
  1733. err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
  1734. qib_7322p_error_msgs);
  1735. ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
  1736. *msg = '\0';
  1737. }
  1738. qib_write_kreg_port(ppd, krp_errclear, errs);
  1739. errs &= ~ignore_this_time;
  1740. if (!errs)
  1741. goto done;
  1742. if (errs & QIB_E_P_RPKTERRS)
  1743. qib_stats.sps_rcverrs++;
  1744. if (errs & QIB_E_P_SPKTERRS)
  1745. qib_stats.sps_txerrs++;
  1746. iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
  1747. if (errs & QIB_E_P_SDMAERRS)
  1748. sdma_7322_p_errors(ppd, errs);
  1749. if (errs & QIB_E_P_IBSTATUSCHANGED) {
  1750. u64 ibcs;
  1751. u8 ltstate;
  1752. ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
  1753. ltstate = qib_7322_phys_portstate(ibcs);
  1754. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
  1755. handle_serdes_issues(ppd, ibcs);
  1756. if (!(ppd->cpspec->ibcctrl_a &
  1757. SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
  1758. /*
  1759. * We got our interrupt, so init code should be
  1760. * happy and not try alternatives. Now squelch
  1761. * other "chatter" from link-negotiation (pre Init)
  1762. */
  1763. ppd->cpspec->ibcctrl_a |=
  1764. SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  1765. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  1766. ppd->cpspec->ibcctrl_a);
  1767. }
  1768. /* Update our picture of width and speed from chip */
  1769. ppd->link_width_active =
  1770. (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
  1771. IB_WIDTH_4X : IB_WIDTH_1X;
  1772. ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
  1773. LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
  1774. SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
  1775. QIB_IB_DDR : QIB_IB_SDR;
  1776. if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
  1777. IB_PHYSPORTSTATE_DISABLED)
  1778. qib_set_ib_7322_lstate(ppd, 0,
  1779. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  1780. else
  1781. /*
  1782. * Since going into a recovery state causes the link
  1783. * state to go down and since recovery is transitory,
  1784. * it is better if we "miss" ever seeing the link
  1785. * training state go into recovery (i.e., ignore this
  1786. * transition for link state special handling purposes)
  1787. * without updating lastibcstat.
  1788. */
  1789. if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
  1790. ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
  1791. ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
  1792. ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
  1793. qib_handle_e_ibstatuschanged(ppd, ibcs);
  1794. }
  1795. if (*msg && iserr)
  1796. qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
  1797. if (ppd->state_wanted & ppd->lflags)
  1798. wake_up_interruptible(&ppd->state_wait);
  1799. done:
  1800. return;
  1801. }
  1802. /* enable/disable chip from delivering interrupts */
  1803. static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
  1804. {
  1805. if (enable) {
  1806. if (dd->flags & QIB_BADINTR)
  1807. return;
  1808. qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
  1809. /* cause any pending enabled interrupts to be re-delivered */
  1810. qib_write_kreg(dd, kr_intclear, 0ULL);
  1811. if (dd->cspec->num_msix_entries) {
  1812. /* and same for MSIx */
  1813. u64 val = qib_read_kreg64(dd, kr_intgranted);
  1814. if (val)
  1815. qib_write_kreg(dd, kr_intgranted, val);
  1816. }
  1817. } else
  1818. qib_write_kreg(dd, kr_intmask, 0ULL);
  1819. }
  1820. /*
  1821. * Try to cleanup as much as possible for anything that might have gone
  1822. * wrong while in freeze mode, such as pio buffers being written by user
  1823. * processes (causing armlaunch), send errors due to going into freeze mode,
  1824. * etc., and try to avoid causing extra interrupts while doing so.
  1825. * Forcibly update the in-memory pioavail register copies after cleanup
  1826. * because the chip won't do it while in freeze mode (the register values
  1827. * themselves are kept correct).
  1828. * Make sure that we don't lose any important interrupts by using the chip
  1829. * feature that says that writing 0 to a bit in *clear that is set in
  1830. * *status will cause an interrupt to be generated again (if allowed by
  1831. * the *mask value).
  1832. * This is in chip-specific code because of all of the register accesses,
  1833. * even though the details are similar on most chips.
  1834. */
  1835. static void qib_7322_clear_freeze(struct qib_devdata *dd)
  1836. {
  1837. int pidx;
  1838. /* disable error interrupts, to avoid confusion */
  1839. qib_write_kreg(dd, kr_errmask, 0ULL);
  1840. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  1841. if (dd->pport[pidx].link_speed_supported)
  1842. qib_write_kreg_port(dd->pport + pidx, krp_errmask,
  1843. 0ULL);
  1844. /* also disable interrupts; errormask is sometimes overwriten */
  1845. qib_7322_set_intr_state(dd, 0);
  1846. /* clear the freeze, and be sure chip saw it */
  1847. qib_write_kreg(dd, kr_control, dd->control);
  1848. qib_read_kreg32(dd, kr_scratch);
  1849. /*
  1850. * Force new interrupt if any hwerr, error or interrupt bits are
  1851. * still set, and clear "safe" send packet errors related to freeze
  1852. * and cancelling sends. Re-enable error interrupts before possible
  1853. * force of re-interrupt on pending interrupts.
  1854. */
  1855. qib_write_kreg(dd, kr_hwerrclear, 0ULL);
  1856. qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
  1857. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  1858. /* We need to purge per-port errs and reset mask, too */
  1859. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  1860. if (!dd->pport[pidx].link_speed_supported)
  1861. continue;
  1862. qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
  1863. qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
  1864. }
  1865. qib_7322_set_intr_state(dd, 1);
  1866. }
  1867. /* no error handling to speak of */
  1868. /**
  1869. * qib_7322_handle_hwerrors - display hardware errors.
  1870. * @dd: the qlogic_ib device
  1871. * @msg: the output buffer
  1872. * @msgl: the size of the output buffer
  1873. *
  1874. * Use same msg buffer as regular errors to avoid excessive stack
  1875. * use. Most hardware errors are catastrophic, but for right now,
  1876. * we'll print them and continue. We reuse the same message buffer as
  1877. * qib_handle_errors() to avoid excessive stack usage.
  1878. */
  1879. static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
  1880. size_t msgl)
  1881. {
  1882. u64 hwerrs;
  1883. u32 ctrl;
  1884. int isfatal = 0;
  1885. hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
  1886. if (!hwerrs)
  1887. goto bail;
  1888. if (hwerrs == ~0ULL) {
  1889. qib_dev_err(dd,
  1890. "Read of hardware error status failed (all bits set); ignoring\n");
  1891. goto bail;
  1892. }
  1893. qib_stats.sps_hwerrs++;
  1894. /* Always clear the error status register, except BIST fail */
  1895. qib_write_kreg(dd, kr_hwerrclear, hwerrs &
  1896. ~HWE_MASK(PowerOnBISTFailed));
  1897. hwerrs &= dd->cspec->hwerrmask;
  1898. /* no EEPROM logging, yet */
  1899. if (hwerrs)
  1900. qib_devinfo(dd->pcidev,
  1901. "Hardware error: hwerr=0x%llx (cleared)\n",
  1902. (unsigned long long) hwerrs);
  1903. ctrl = qib_read_kreg32(dd, kr_control);
  1904. if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
  1905. /*
  1906. * No recovery yet...
  1907. */
  1908. if ((hwerrs & ~HWE_MASK(LATriggered)) ||
  1909. dd->cspec->stay_in_freeze) {
  1910. /*
  1911. * If any set that we aren't ignoring only make the
  1912. * complaint once, in case it's stuck or recurring,
  1913. * and we get here multiple times
  1914. * Force link down, so switch knows, and
  1915. * LEDs are turned off.
  1916. */
  1917. if (dd->flags & QIB_INITTED)
  1918. isfatal = 1;
  1919. } else
  1920. qib_7322_clear_freeze(dd);
  1921. }
  1922. if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
  1923. isfatal = 1;
  1924. strlcpy(msg,
  1925. "[Memory BIST test failed, InfiniPath hardware unusable]",
  1926. msgl);
  1927. /* ignore from now on, so disable until driver reloaded */
  1928. dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
  1929. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  1930. }
  1931. err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
  1932. /* Ignore esoteric PLL failures et al. */
  1933. qib_dev_err(dd, "%s hardware error\n", msg);
  1934. if (hwerrs &
  1935. (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
  1936. SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
  1937. int pidx = 0;
  1938. int err;
  1939. unsigned long flags;
  1940. struct qib_pportdata *ppd = dd->pport;
  1941. for (; pidx < dd->num_pports; ++pidx, ppd++) {
  1942. err = 0;
  1943. if (pidx == 0 && (hwerrs &
  1944. SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
  1945. err++;
  1946. if (pidx == 1 && (hwerrs &
  1947. SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
  1948. err++;
  1949. if (err) {
  1950. spin_lock_irqsave(&ppd->sdma_lock, flags);
  1951. dump_sdma_7322_state(ppd);
  1952. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  1953. }
  1954. }
  1955. }
  1956. if (isfatal && !dd->diag_client) {
  1957. qib_dev_err(dd,
  1958. "Fatal Hardware Error, no longer usable, SN %.16s\n",
  1959. dd->serial);
  1960. /*
  1961. * for /sys status file and user programs to print; if no
  1962. * trailing brace is copied, we'll know it was truncated.
  1963. */
  1964. if (dd->freezemsg)
  1965. snprintf(dd->freezemsg, dd->freezelen,
  1966. "{%s}", msg);
  1967. qib_disable_after_error(dd);
  1968. }
  1969. bail:;
  1970. }
  1971. /**
  1972. * qib_7322_init_hwerrors - enable hardware errors
  1973. * @dd: the qlogic_ib device
  1974. *
  1975. * now that we have finished initializing everything that might reasonably
  1976. * cause a hardware error, and cleared those errors bits as they occur,
  1977. * we can enable hardware errors in the mask (potentially enabling
  1978. * freeze mode), and enable hardware errors as errors (along with
  1979. * everything else) in errormask
  1980. */
  1981. static void qib_7322_init_hwerrors(struct qib_devdata *dd)
  1982. {
  1983. int pidx;
  1984. u64 extsval;
  1985. extsval = qib_read_kreg64(dd, kr_extstatus);
  1986. if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
  1987. QIB_EXTS_MEMBIST_ENDTEST)))
  1988. qib_dev_err(dd, "MemBIST did not complete!\n");
  1989. /* never clear BIST failure, so reported on each driver load */
  1990. qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
  1991. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  1992. /* clear all */
  1993. qib_write_kreg(dd, kr_errclear, ~0ULL);
  1994. /* enable errors that are masked, at least this first time. */
  1995. qib_write_kreg(dd, kr_errmask, ~0ULL);
  1996. dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
  1997. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  1998. if (dd->pport[pidx].link_speed_supported)
  1999. qib_write_kreg_port(dd->pport + pidx, krp_errmask,
  2000. ~0ULL);
  2001. }
  2002. /*
  2003. * Disable and enable the armlaunch error. Used for PIO bandwidth testing
  2004. * on chips that are count-based, rather than trigger-based. There is no
  2005. * reference counting, but that's also fine, given the intended use.
  2006. * Only chip-specific because it's all register accesses
  2007. */
  2008. static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
  2009. {
  2010. if (enable) {
  2011. qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
  2012. dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
  2013. } else
  2014. dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
  2015. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  2016. }
  2017. /*
  2018. * Formerly took parameter <which> in pre-shifted,
  2019. * pre-merged form with LinkCmd and LinkInitCmd
  2020. * together, and assuming the zero was NOP.
  2021. */
  2022. static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  2023. u16 linitcmd)
  2024. {
  2025. u64 mod_wd;
  2026. struct qib_devdata *dd = ppd->dd;
  2027. unsigned long flags;
  2028. if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
  2029. /*
  2030. * If we are told to disable, note that so link-recovery
  2031. * code does not attempt to bring us back up.
  2032. * Also reset everything that we can, so we start
  2033. * completely clean when re-enabled (before we
  2034. * actually issue the disable to the IBC)
  2035. */
  2036. qib_7322_mini_pcs_reset(ppd);
  2037. spin_lock_irqsave(&ppd->lflags_lock, flags);
  2038. ppd->lflags |= QIBL_IB_LINK_DISABLED;
  2039. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  2040. } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
  2041. /*
  2042. * Any other linkinitcmd will lead to LINKDOWN and then
  2043. * to INIT (if all is well), so clear flag to let
  2044. * link-recovery code attempt to bring us back up.
  2045. */
  2046. spin_lock_irqsave(&ppd->lflags_lock, flags);
  2047. ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
  2048. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  2049. /*
  2050. * Clear status change interrupt reduction so the
  2051. * new state is seen.
  2052. */
  2053. ppd->cpspec->ibcctrl_a &=
  2054. ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  2055. }
  2056. mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
  2057. (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
  2058. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
  2059. mod_wd);
  2060. /* write to chip to prevent back-to-back writes of ibc reg */
  2061. qib_write_kreg(dd, kr_scratch, 0);
  2062. }
  2063. /*
  2064. * The total RCV buffer memory is 64KB, used for both ports, and is
  2065. * in units of 64 bytes (same as IB flow control credit unit).
  2066. * The consumedVL unit in the same registers are in 32 byte units!
  2067. * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
  2068. * and we can therefore allocate just 9 IB credits for 2 VL15 packets
  2069. * in krp_rxcreditvl15, rather than 10.
  2070. */
  2071. #define RCV_BUF_UNITSZ 64
  2072. #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
  2073. static void set_vls(struct qib_pportdata *ppd)
  2074. {
  2075. int i, numvls, totcred, cred_vl, vl0extra;
  2076. struct qib_devdata *dd = ppd->dd;
  2077. u64 val;
  2078. numvls = qib_num_vls(ppd->vls_operational);
  2079. /*
  2080. * Set up per-VL credits. Below is kluge based on these assumptions:
  2081. * 1) port is disabled at the time early_init is called.
  2082. * 2) give VL15 17 credits, for two max-plausible packets.
  2083. * 3) Give VL0-N the rest, with any rounding excess used for VL0
  2084. */
  2085. /* 2 VL15 packets @ 288 bytes each (including IB headers) */
  2086. totcred = NUM_RCV_BUF_UNITS(dd);
  2087. cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
  2088. totcred -= cred_vl;
  2089. qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
  2090. cred_vl = totcred / numvls;
  2091. vl0extra = totcred - cred_vl * numvls;
  2092. qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
  2093. for (i = 1; i < numvls; i++)
  2094. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
  2095. for (; i < 8; i++) /* no buffer space for other VLs */
  2096. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
  2097. /* Notify IBC that credits need to be recalculated */
  2098. val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
  2099. val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
  2100. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  2101. qib_write_kreg(dd, kr_scratch, 0ULL);
  2102. val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
  2103. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  2104. for (i = 0; i < numvls; i++)
  2105. val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
  2106. val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
  2107. /* Change the number of operational VLs */
  2108. ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
  2109. ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
  2110. ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
  2111. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  2112. qib_write_kreg(dd, kr_scratch, 0ULL);
  2113. }
  2114. /*
  2115. * The code that deals with actual SerDes is in serdes_7322_init().
  2116. * Compared to the code for iba7220, it is minimal.
  2117. */
  2118. static int serdes_7322_init(struct qib_pportdata *ppd);
  2119. /**
  2120. * qib_7322_bringup_serdes - bring up the serdes
  2121. * @ppd: physical port on the qlogic_ib device
  2122. */
  2123. static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
  2124. {
  2125. struct qib_devdata *dd = ppd->dd;
  2126. u64 val, guid, ibc;
  2127. unsigned long flags;
  2128. int ret = 0;
  2129. /*
  2130. * SerDes model not in Pd, but still need to
  2131. * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
  2132. * eventually.
  2133. */
  2134. /* Put IBC in reset, sends disabled (should be in reset already) */
  2135. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2136. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  2137. qib_write_kreg(dd, kr_scratch, 0ULL);
  2138. if (qib_compat_ddr_negotiate) {
  2139. ppd->cpspec->ibdeltainprog = 1;
  2140. ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
  2141. crp_ibsymbolerr);
  2142. ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
  2143. crp_iblinkerrrecov);
  2144. }
  2145. /* flowcontrolwatermark is in units of KBytes */
  2146. ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
  2147. /*
  2148. * Flow control is sent this often, even if no changes in
  2149. * buffer space occur. Units are 128ns for this chip.
  2150. * Set to 3usec.
  2151. */
  2152. ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
  2153. /* max error tolerance */
  2154. ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
  2155. /* IB credit flow control. */
  2156. ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
  2157. /*
  2158. * set initial max size pkt IBC will send, including ICRC; it's the
  2159. * PIO buffer size in dwords, less 1; also see qib_set_mtu()
  2160. */
  2161. ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
  2162. SYM_LSB(IBCCtrlA_0, MaxPktLen);
  2163. ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
  2164. /*
  2165. * Reset the PCS interface to the serdes (and also ibc, which is still
  2166. * in reset from above). Writes new value of ibcctrl_a as last step.
  2167. */
  2168. qib_7322_mini_pcs_reset(ppd);
  2169. if (!ppd->cpspec->ibcctrl_b) {
  2170. unsigned lse = ppd->link_speed_enabled;
  2171. /*
  2172. * Not on re-init after reset, establish shadow
  2173. * and force initial config.
  2174. */
  2175. ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
  2176. krp_ibcctrl_b);
  2177. ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
  2178. IBA7322_IBC_SPEED_DDR |
  2179. IBA7322_IBC_SPEED_SDR |
  2180. IBA7322_IBC_WIDTH_AUTONEG |
  2181. SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
  2182. if (lse & (lse - 1)) /* Muliple speeds enabled */
  2183. ppd->cpspec->ibcctrl_b |=
  2184. (lse << IBA7322_IBC_SPEED_LSB) |
  2185. IBA7322_IBC_IBTA_1_2_MASK |
  2186. IBA7322_IBC_MAX_SPEED_MASK;
  2187. else
  2188. ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
  2189. IBA7322_IBC_SPEED_QDR |
  2190. IBA7322_IBC_IBTA_1_2_MASK :
  2191. (lse == QIB_IB_DDR) ?
  2192. IBA7322_IBC_SPEED_DDR :
  2193. IBA7322_IBC_SPEED_SDR;
  2194. if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
  2195. (IB_WIDTH_1X | IB_WIDTH_4X))
  2196. ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
  2197. else
  2198. ppd->cpspec->ibcctrl_b |=
  2199. ppd->link_width_enabled == IB_WIDTH_4X ?
  2200. IBA7322_IBC_WIDTH_4X_ONLY :
  2201. IBA7322_IBC_WIDTH_1X_ONLY;
  2202. /* always enable these on driver reload, not sticky */
  2203. ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
  2204. IBA7322_IBC_HRTBT_MASK);
  2205. }
  2206. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  2207. /* setup so we have more time at CFGTEST to change H1 */
  2208. val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
  2209. val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
  2210. val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
  2211. qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
  2212. serdes_7322_init(ppd);
  2213. guid = be64_to_cpu(ppd->guid);
  2214. if (!guid) {
  2215. if (dd->base_guid)
  2216. guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
  2217. ppd->guid = cpu_to_be64(guid);
  2218. }
  2219. qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
  2220. /* write to chip to prevent back-to-back writes of ibc reg */
  2221. qib_write_kreg(dd, kr_scratch, 0);
  2222. /* Enable port */
  2223. ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2224. set_vls(ppd);
  2225. /* initially come up DISABLED, without sending anything. */
  2226. val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
  2227. QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
  2228. qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
  2229. qib_write_kreg(dd, kr_scratch, 0ULL);
  2230. /* clear the linkinit cmds */
  2231. ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
  2232. /* be paranoid against later code motion, etc. */
  2233. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  2234. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
  2235. qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
  2236. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  2237. /* Also enable IBSTATUSCHG interrupt. */
  2238. val = qib_read_kreg_port(ppd, krp_errmask);
  2239. qib_write_kreg_port(ppd, krp_errmask,
  2240. val | ERR_MASK_N(IBStatusChanged));
  2241. /* Always zero until we start messing with SerDes for real */
  2242. return ret;
  2243. }
  2244. /**
  2245. * qib_7322_quiet_serdes - set serdes to txidle
  2246. * @dd: the qlogic_ib device
  2247. * Called when driver is being unloaded
  2248. */
  2249. static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
  2250. {
  2251. u64 val;
  2252. unsigned long flags;
  2253. qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  2254. spin_lock_irqsave(&ppd->lflags_lock, flags);
  2255. ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
  2256. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  2257. wake_up(&ppd->cpspec->autoneg_wait);
  2258. cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
  2259. if (ppd->dd->cspec->r1)
  2260. cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
  2261. ppd->cpspec->chase_end = 0;
  2262. if (ppd->cpspec->chase_timer.data) /* if initted */
  2263. del_timer_sync(&ppd->cpspec->chase_timer);
  2264. /*
  2265. * Despite the name, actually disables IBC as well. Do it when
  2266. * we are as sure as possible that no more packets can be
  2267. * received, following the down and the PCS reset.
  2268. * The actual disabling happens in qib_7322_mini_pci_reset(),
  2269. * along with the PCS being reset.
  2270. */
  2271. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2272. qib_7322_mini_pcs_reset(ppd);
  2273. /*
  2274. * Update the adjusted counters so the adjustment persists
  2275. * across driver reload.
  2276. */
  2277. if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
  2278. ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
  2279. struct qib_devdata *dd = ppd->dd;
  2280. u64 diagc;
  2281. /* enable counter writes */
  2282. diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
  2283. qib_write_kreg(dd, kr_hwdiagctrl,
  2284. diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
  2285. if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
  2286. val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
  2287. if (ppd->cpspec->ibdeltainprog)
  2288. val -= val - ppd->cpspec->ibsymsnap;
  2289. val -= ppd->cpspec->ibsymdelta;
  2290. write_7322_creg_port(ppd, crp_ibsymbolerr, val);
  2291. }
  2292. if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
  2293. val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
  2294. if (ppd->cpspec->ibdeltainprog)
  2295. val -= val - ppd->cpspec->iblnkerrsnap;
  2296. val -= ppd->cpspec->iblnkerrdelta;
  2297. write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
  2298. }
  2299. if (ppd->cpspec->iblnkdowndelta) {
  2300. val = read_7322_creg32_port(ppd, crp_iblinkdown);
  2301. val += ppd->cpspec->iblnkdowndelta;
  2302. write_7322_creg_port(ppd, crp_iblinkdown, val);
  2303. }
  2304. /*
  2305. * No need to save ibmalfdelta since IB perfcounters
  2306. * are cleared on driver reload.
  2307. */
  2308. /* and disable counter writes */
  2309. qib_write_kreg(dd, kr_hwdiagctrl, diagc);
  2310. }
  2311. }
  2312. /**
  2313. * qib_setup_7322_setextled - set the state of the two external LEDs
  2314. * @ppd: physical port on the qlogic_ib device
  2315. * @on: whether the link is up or not
  2316. *
  2317. * The exact combo of LEDs if on is true is determined by looking
  2318. * at the ibcstatus.
  2319. *
  2320. * These LEDs indicate the physical and logical state of IB link.
  2321. * For this chip (at least with recommended board pinouts), LED1
  2322. * is Yellow (logical state) and LED2 is Green (physical state),
  2323. *
  2324. * Note: We try to match the Mellanox HCA LED behavior as best
  2325. * we can. Green indicates physical link state is OK (something is
  2326. * plugged in, and we can train).
  2327. * Amber indicates the link is logically up (ACTIVE).
  2328. * Mellanox further blinks the amber LED to indicate data packet
  2329. * activity, but we have no hardware support for that, so it would
  2330. * require waking up every 10-20 msecs and checking the counters
  2331. * on the chip, and then turning the LED off if appropriate. That's
  2332. * visible overhead, so not something we will do.
  2333. */
  2334. static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
  2335. {
  2336. struct qib_devdata *dd = ppd->dd;
  2337. u64 extctl, ledblink = 0, val;
  2338. unsigned long flags;
  2339. int yel, grn;
  2340. /*
  2341. * The diags use the LED to indicate diag info, so we leave
  2342. * the external LED alone when the diags are running.
  2343. */
  2344. if (dd->diag_client)
  2345. return;
  2346. /* Allow override of LED display for, e.g. Locating system in rack */
  2347. if (ppd->led_override) {
  2348. grn = (ppd->led_override & QIB_LED_PHYS);
  2349. yel = (ppd->led_override & QIB_LED_LOG);
  2350. } else if (on) {
  2351. val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
  2352. grn = qib_7322_phys_portstate(val) ==
  2353. IB_PHYSPORTSTATE_LINKUP;
  2354. yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
  2355. } else {
  2356. grn = 0;
  2357. yel = 0;
  2358. }
  2359. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  2360. extctl = dd->cspec->extctrl & (ppd->port == 1 ?
  2361. ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
  2362. if (grn) {
  2363. extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
  2364. /*
  2365. * Counts are in chip clock (4ns) periods.
  2366. * This is 1/16 sec (66.6ms) on,
  2367. * 3/16 sec (187.5 ms) off, with packets rcvd.
  2368. */
  2369. ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
  2370. ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
  2371. }
  2372. if (yel)
  2373. extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
  2374. dd->cspec->extctrl = extctl;
  2375. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  2376. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  2377. if (ledblink) /* blink the LED on packet receive */
  2378. qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
  2379. }
  2380. #ifdef CONFIG_INFINIBAND_QIB_DCA
  2381. static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
  2382. {
  2383. switch (event) {
  2384. case DCA_PROVIDER_ADD:
  2385. if (dd->flags & QIB_DCA_ENABLED)
  2386. break;
  2387. if (!dca_add_requester(&dd->pcidev->dev)) {
  2388. qib_devinfo(dd->pcidev, "DCA enabled\n");
  2389. dd->flags |= QIB_DCA_ENABLED;
  2390. qib_setup_dca(dd);
  2391. }
  2392. break;
  2393. case DCA_PROVIDER_REMOVE:
  2394. if (dd->flags & QIB_DCA_ENABLED) {
  2395. dca_remove_requester(&dd->pcidev->dev);
  2396. dd->flags &= ~QIB_DCA_ENABLED;
  2397. dd->cspec->dca_ctrl = 0;
  2398. qib_write_kreg(dd, KREG_IDX(DCACtrlA),
  2399. dd->cspec->dca_ctrl);
  2400. }
  2401. break;
  2402. }
  2403. return 0;
  2404. }
  2405. static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
  2406. {
  2407. struct qib_devdata *dd = rcd->dd;
  2408. struct qib_chip_specific *cspec = dd->cspec;
  2409. if (!(dd->flags & QIB_DCA_ENABLED))
  2410. return;
  2411. if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
  2412. const struct dca_reg_map *rmp;
  2413. cspec->rhdr_cpu[rcd->ctxt] = cpu;
  2414. rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
  2415. cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
  2416. cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
  2417. (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
  2418. qib_devinfo(dd->pcidev,
  2419. "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
  2420. (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
  2421. qib_write_kreg(dd, rmp->regno,
  2422. cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
  2423. cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
  2424. qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
  2425. }
  2426. }
  2427. static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
  2428. {
  2429. struct qib_devdata *dd = ppd->dd;
  2430. struct qib_chip_specific *cspec = dd->cspec;
  2431. unsigned pidx = ppd->port - 1;
  2432. if (!(dd->flags & QIB_DCA_ENABLED))
  2433. return;
  2434. if (cspec->sdma_cpu[pidx] != cpu) {
  2435. cspec->sdma_cpu[pidx] = cpu;
  2436. cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
  2437. SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
  2438. SYM_MASK(DCACtrlF, SendDma0DCAOPH));
  2439. cspec->dca_rcvhdr_ctrl[4] |=
  2440. (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
  2441. (ppd->hw_pidx ?
  2442. SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
  2443. SYM_LSB(DCACtrlF, SendDma0DCAOPH));
  2444. qib_devinfo(dd->pcidev,
  2445. "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
  2446. (long long) cspec->dca_rcvhdr_ctrl[4]);
  2447. qib_write_kreg(dd, KREG_IDX(DCACtrlF),
  2448. cspec->dca_rcvhdr_ctrl[4]);
  2449. cspec->dca_ctrl |= ppd->hw_pidx ?
  2450. SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
  2451. SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
  2452. qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
  2453. }
  2454. }
  2455. static void qib_setup_dca(struct qib_devdata *dd)
  2456. {
  2457. struct qib_chip_specific *cspec = dd->cspec;
  2458. int i;
  2459. for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
  2460. cspec->rhdr_cpu[i] = -1;
  2461. for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
  2462. cspec->sdma_cpu[i] = -1;
  2463. cspec->dca_rcvhdr_ctrl[0] =
  2464. (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
  2465. (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
  2466. (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
  2467. (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
  2468. cspec->dca_rcvhdr_ctrl[1] =
  2469. (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
  2470. (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
  2471. (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
  2472. (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
  2473. cspec->dca_rcvhdr_ctrl[2] =
  2474. (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
  2475. (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
  2476. (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
  2477. (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
  2478. cspec->dca_rcvhdr_ctrl[3] =
  2479. (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
  2480. (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
  2481. (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
  2482. (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
  2483. cspec->dca_rcvhdr_ctrl[4] =
  2484. (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
  2485. (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
  2486. for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
  2487. qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
  2488. cspec->dca_rcvhdr_ctrl[i]);
  2489. for (i = 0; i < cspec->num_msix_entries; i++)
  2490. setup_dca_notifier(dd, &cspec->msix_entries[i]);
  2491. }
  2492. static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
  2493. const cpumask_t *mask)
  2494. {
  2495. struct qib_irq_notify *n =
  2496. container_of(notify, struct qib_irq_notify, notify);
  2497. int cpu = cpumask_first(mask);
  2498. if (n->rcv) {
  2499. struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
  2500. qib_update_rhdrq_dca(rcd, cpu);
  2501. } else {
  2502. struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
  2503. qib_update_sdma_dca(ppd, cpu);
  2504. }
  2505. }
  2506. static void qib_irq_notifier_release(struct kref *ref)
  2507. {
  2508. struct qib_irq_notify *n =
  2509. container_of(ref, struct qib_irq_notify, notify.kref);
  2510. struct qib_devdata *dd;
  2511. if (n->rcv) {
  2512. struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
  2513. dd = rcd->dd;
  2514. } else {
  2515. struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
  2516. dd = ppd->dd;
  2517. }
  2518. qib_devinfo(dd->pcidev,
  2519. "release on HCA notify 0x%p n 0x%p\n", ref, n);
  2520. kfree(n);
  2521. }
  2522. #endif
  2523. /*
  2524. * Disable MSIx interrupt if enabled, call generic MSIx code
  2525. * to cleanup, and clear pending MSIx interrupts.
  2526. * Used for fallback to INTx, after reset, and when MSIx setup fails.
  2527. */
  2528. static void qib_7322_nomsix(struct qib_devdata *dd)
  2529. {
  2530. u64 intgranted;
  2531. int n;
  2532. dd->cspec->main_int_mask = ~0ULL;
  2533. n = dd->cspec->num_msix_entries;
  2534. if (n) {
  2535. int i;
  2536. dd->cspec->num_msix_entries = 0;
  2537. for (i = 0; i < n; i++) {
  2538. #ifdef CONFIG_INFINIBAND_QIB_DCA
  2539. reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
  2540. #endif
  2541. irq_set_affinity_hint(
  2542. dd->cspec->msix_entries[i].msix.vector, NULL);
  2543. free_cpumask_var(dd->cspec->msix_entries[i].mask);
  2544. free_irq(dd->cspec->msix_entries[i].msix.vector,
  2545. dd->cspec->msix_entries[i].arg);
  2546. }
  2547. qib_nomsix(dd);
  2548. }
  2549. /* make sure no MSIx interrupts are left pending */
  2550. intgranted = qib_read_kreg64(dd, kr_intgranted);
  2551. if (intgranted)
  2552. qib_write_kreg(dd, kr_intgranted, intgranted);
  2553. }
  2554. static void qib_7322_free_irq(struct qib_devdata *dd)
  2555. {
  2556. if (dd->cspec->irq) {
  2557. free_irq(dd->cspec->irq, dd);
  2558. dd->cspec->irq = 0;
  2559. }
  2560. qib_7322_nomsix(dd);
  2561. }
  2562. static void qib_setup_7322_cleanup(struct qib_devdata *dd)
  2563. {
  2564. int i;
  2565. #ifdef CONFIG_INFINIBAND_QIB_DCA
  2566. if (dd->flags & QIB_DCA_ENABLED) {
  2567. dca_remove_requester(&dd->pcidev->dev);
  2568. dd->flags &= ~QIB_DCA_ENABLED;
  2569. dd->cspec->dca_ctrl = 0;
  2570. qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
  2571. }
  2572. #endif
  2573. qib_7322_free_irq(dd);
  2574. kfree(dd->cspec->cntrs);
  2575. kfree(dd->cspec->sendchkenable);
  2576. kfree(dd->cspec->sendgrhchk);
  2577. kfree(dd->cspec->sendibchk);
  2578. kfree(dd->cspec->msix_entries);
  2579. for (i = 0; i < dd->num_pports; i++) {
  2580. unsigned long flags;
  2581. u32 mask = QSFP_GPIO_MOD_PRS_N |
  2582. (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
  2583. kfree(dd->pport[i].cpspec->portcntrs);
  2584. if (dd->flags & QIB_HAS_QSFP) {
  2585. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  2586. dd->cspec->gpio_mask &= ~mask;
  2587. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  2588. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  2589. qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
  2590. }
  2591. if (dd->pport[i].ibport_data.smi_ah)
  2592. ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
  2593. }
  2594. }
  2595. /* handle SDMA interrupts */
  2596. static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
  2597. {
  2598. struct qib_pportdata *ppd0 = &dd->pport[0];
  2599. struct qib_pportdata *ppd1 = &dd->pport[1];
  2600. u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
  2601. INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
  2602. u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
  2603. INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
  2604. if (intr0)
  2605. qib_sdma_intr(ppd0);
  2606. if (intr1)
  2607. qib_sdma_intr(ppd1);
  2608. if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
  2609. qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
  2610. if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
  2611. qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
  2612. }
  2613. /*
  2614. * Set or clear the Send buffer available interrupt enable bit.
  2615. */
  2616. static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
  2617. {
  2618. unsigned long flags;
  2619. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  2620. if (needint)
  2621. dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
  2622. else
  2623. dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
  2624. qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
  2625. qib_write_kreg(dd, kr_scratch, 0ULL);
  2626. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  2627. }
  2628. /*
  2629. * Somehow got an interrupt with reserved bits set in interrupt status.
  2630. * Print a message so we know it happened, then clear them.
  2631. * keep mainline interrupt handler cache-friendly
  2632. */
  2633. static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
  2634. {
  2635. u64 kills;
  2636. char msg[128];
  2637. kills = istat & ~QIB_I_BITSEXTANT;
  2638. qib_dev_err(dd,
  2639. "Clearing reserved interrupt(s) 0x%016llx: %s\n",
  2640. (unsigned long long) kills, msg);
  2641. qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
  2642. }
  2643. /* keep mainline interrupt handler cache-friendly */
  2644. static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
  2645. {
  2646. u32 gpiostatus;
  2647. int handled = 0;
  2648. int pidx;
  2649. /*
  2650. * Boards for this chip currently don't use GPIO interrupts,
  2651. * so clear by writing GPIOstatus to GPIOclear, and complain
  2652. * to developer. To avoid endless repeats, clear
  2653. * the bits in the mask, since there is some kind of
  2654. * programming error or chip problem.
  2655. */
  2656. gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
  2657. /*
  2658. * In theory, writing GPIOstatus to GPIOclear could
  2659. * have a bad side-effect on some diagnostic that wanted
  2660. * to poll for a status-change, but the various shadows
  2661. * make that problematic at best. Diags will just suppress
  2662. * all GPIO interrupts during such tests.
  2663. */
  2664. qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
  2665. /*
  2666. * Check for QSFP MOD_PRS changes
  2667. * only works for single port if IB1 != pidx1
  2668. */
  2669. for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
  2670. ++pidx) {
  2671. struct qib_pportdata *ppd;
  2672. struct qib_qsfp_data *qd;
  2673. u32 mask;
  2674. if (!dd->pport[pidx].link_speed_supported)
  2675. continue;
  2676. mask = QSFP_GPIO_MOD_PRS_N;
  2677. ppd = dd->pport + pidx;
  2678. mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
  2679. if (gpiostatus & dd->cspec->gpio_mask & mask) {
  2680. u64 pins;
  2681. qd = &ppd->cpspec->qsfp_data;
  2682. gpiostatus &= ~mask;
  2683. pins = qib_read_kreg64(dd, kr_extstatus);
  2684. pins >>= SYM_LSB(EXTStatus, GPIOIn);
  2685. if (!(pins & mask)) {
  2686. ++handled;
  2687. qd->t_insert = jiffies;
  2688. queue_work(ib_wq, &qd->work);
  2689. }
  2690. }
  2691. }
  2692. if (gpiostatus && !handled) {
  2693. const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
  2694. u32 gpio_irq = mask & gpiostatus;
  2695. /*
  2696. * Clear any troublemakers, and update chip from shadow
  2697. */
  2698. dd->cspec->gpio_mask &= ~gpio_irq;
  2699. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  2700. }
  2701. }
  2702. /*
  2703. * Handle errors and unusual events first, separate function
  2704. * to improve cache hits for fast path interrupt handling.
  2705. */
  2706. static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
  2707. {
  2708. if (istat & ~QIB_I_BITSEXTANT)
  2709. unknown_7322_ibits(dd, istat);
  2710. if (istat & QIB_I_GPIO)
  2711. unknown_7322_gpio_intr(dd);
  2712. if (istat & QIB_I_C_ERROR) {
  2713. qib_write_kreg(dd, kr_errmask, 0ULL);
  2714. tasklet_schedule(&dd->error_tasklet);
  2715. }
  2716. if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
  2717. handle_7322_p_errors(dd->rcd[0]->ppd);
  2718. if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
  2719. handle_7322_p_errors(dd->rcd[1]->ppd);
  2720. }
  2721. /*
  2722. * Dynamically adjust the rcv int timeout for a context based on incoming
  2723. * packet rate.
  2724. */
  2725. static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
  2726. {
  2727. struct qib_devdata *dd = rcd->dd;
  2728. u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
  2729. /*
  2730. * Dynamically adjust idle timeout on chip
  2731. * based on number of packets processed.
  2732. */
  2733. if (npkts < rcv_int_count && timeout > 2)
  2734. timeout >>= 1;
  2735. else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
  2736. timeout = min(timeout << 1, rcv_int_timeout);
  2737. else
  2738. return;
  2739. dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
  2740. qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
  2741. }
  2742. /*
  2743. * This is the main interrupt handler.
  2744. * It will normally only be used for low frequency interrupts but may
  2745. * have to handle all interrupts if INTx is enabled or fewer than normal
  2746. * MSIx interrupts were allocated.
  2747. * This routine should ignore the interrupt bits for any of the
  2748. * dedicated MSIx handlers.
  2749. */
  2750. static irqreturn_t qib_7322intr(int irq, void *data)
  2751. {
  2752. struct qib_devdata *dd = data;
  2753. irqreturn_t ret;
  2754. u64 istat;
  2755. u64 ctxtrbits;
  2756. u64 rmask;
  2757. unsigned i;
  2758. u32 npkts;
  2759. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
  2760. /*
  2761. * This return value is not great, but we do not want the
  2762. * interrupt core code to remove our interrupt handler
  2763. * because we don't appear to be handling an interrupt
  2764. * during a chip reset.
  2765. */
  2766. ret = IRQ_HANDLED;
  2767. goto bail;
  2768. }
  2769. istat = qib_read_kreg64(dd, kr_intstatus);
  2770. if (unlikely(istat == ~0ULL)) {
  2771. qib_bad_intrstatus(dd);
  2772. qib_dev_err(dd, "Interrupt status all f's, skipping\n");
  2773. /* don't know if it was our interrupt or not */
  2774. ret = IRQ_NONE;
  2775. goto bail;
  2776. }
  2777. istat &= dd->cspec->main_int_mask;
  2778. if (unlikely(!istat)) {
  2779. /* already handled, or shared and not us */
  2780. ret = IRQ_NONE;
  2781. goto bail;
  2782. }
  2783. qib_stats.sps_ints++;
  2784. if (dd->int_counter != (u32) -1)
  2785. dd->int_counter++;
  2786. /* handle "errors" of various kinds first, device ahead of port */
  2787. if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
  2788. QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
  2789. INT_MASK_P(Err, 1))))
  2790. unlikely_7322_intr(dd, istat);
  2791. /*
  2792. * Clear the interrupt bits we found set, relatively early, so we
  2793. * "know" know the chip will have seen this by the time we process
  2794. * the queue, and will re-interrupt if necessary. The processor
  2795. * itself won't take the interrupt again until we return.
  2796. */
  2797. qib_write_kreg(dd, kr_intclear, istat);
  2798. /*
  2799. * Handle kernel receive queues before checking for pio buffers
  2800. * available since receives can overflow; piobuf waiters can afford
  2801. * a few extra cycles, since they were waiting anyway.
  2802. */
  2803. ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
  2804. if (ctxtrbits) {
  2805. rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
  2806. (1ULL << QIB_I_RCVURG_LSB);
  2807. for (i = 0; i < dd->first_user_ctxt; i++) {
  2808. if (ctxtrbits & rmask) {
  2809. ctxtrbits &= ~rmask;
  2810. if (dd->rcd[i])
  2811. qib_kreceive(dd->rcd[i], NULL, &npkts);
  2812. }
  2813. rmask <<= 1;
  2814. }
  2815. if (ctxtrbits) {
  2816. ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
  2817. (ctxtrbits >> QIB_I_RCVURG_LSB);
  2818. qib_handle_urcv(dd, ctxtrbits);
  2819. }
  2820. }
  2821. if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
  2822. sdma_7322_intr(dd, istat);
  2823. if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
  2824. qib_ib_piobufavail(dd);
  2825. ret = IRQ_HANDLED;
  2826. bail:
  2827. return ret;
  2828. }
  2829. /*
  2830. * Dedicated receive packet available interrupt handler.
  2831. */
  2832. static irqreturn_t qib_7322pintr(int irq, void *data)
  2833. {
  2834. struct qib_ctxtdata *rcd = data;
  2835. struct qib_devdata *dd = rcd->dd;
  2836. u32 npkts;
  2837. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2838. /*
  2839. * This return value is not great, but we do not want the
  2840. * interrupt core code to remove our interrupt handler
  2841. * because we don't appear to be handling an interrupt
  2842. * during a chip reset.
  2843. */
  2844. return IRQ_HANDLED;
  2845. qib_stats.sps_ints++;
  2846. if (dd->int_counter != (u32) -1)
  2847. dd->int_counter++;
  2848. /* Clear the interrupt bit we expect to be set. */
  2849. qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
  2850. (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
  2851. qib_kreceive(rcd, NULL, &npkts);
  2852. return IRQ_HANDLED;
  2853. }
  2854. /*
  2855. * Dedicated Send buffer available interrupt handler.
  2856. */
  2857. static irqreturn_t qib_7322bufavail(int irq, void *data)
  2858. {
  2859. struct qib_devdata *dd = data;
  2860. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2861. /*
  2862. * This return value is not great, but we do not want the
  2863. * interrupt core code to remove our interrupt handler
  2864. * because we don't appear to be handling an interrupt
  2865. * during a chip reset.
  2866. */
  2867. return IRQ_HANDLED;
  2868. qib_stats.sps_ints++;
  2869. if (dd->int_counter != (u32) -1)
  2870. dd->int_counter++;
  2871. /* Clear the interrupt bit we expect to be set. */
  2872. qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
  2873. /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
  2874. if (dd->flags & QIB_INITTED)
  2875. qib_ib_piobufavail(dd);
  2876. else
  2877. qib_wantpiobuf_7322_intr(dd, 0);
  2878. return IRQ_HANDLED;
  2879. }
  2880. /*
  2881. * Dedicated Send DMA interrupt handler.
  2882. */
  2883. static irqreturn_t sdma_intr(int irq, void *data)
  2884. {
  2885. struct qib_pportdata *ppd = data;
  2886. struct qib_devdata *dd = ppd->dd;
  2887. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2888. /*
  2889. * This return value is not great, but we do not want the
  2890. * interrupt core code to remove our interrupt handler
  2891. * because we don't appear to be handling an interrupt
  2892. * during a chip reset.
  2893. */
  2894. return IRQ_HANDLED;
  2895. qib_stats.sps_ints++;
  2896. if (dd->int_counter != (u32) -1)
  2897. dd->int_counter++;
  2898. /* Clear the interrupt bit we expect to be set. */
  2899. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2900. INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
  2901. qib_sdma_intr(ppd);
  2902. return IRQ_HANDLED;
  2903. }
  2904. /*
  2905. * Dedicated Send DMA idle interrupt handler.
  2906. */
  2907. static irqreturn_t sdma_idle_intr(int irq, void *data)
  2908. {
  2909. struct qib_pportdata *ppd = data;
  2910. struct qib_devdata *dd = ppd->dd;
  2911. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2912. /*
  2913. * This return value is not great, but we do not want the
  2914. * interrupt core code to remove our interrupt handler
  2915. * because we don't appear to be handling an interrupt
  2916. * during a chip reset.
  2917. */
  2918. return IRQ_HANDLED;
  2919. qib_stats.sps_ints++;
  2920. if (dd->int_counter != (u32) -1)
  2921. dd->int_counter++;
  2922. /* Clear the interrupt bit we expect to be set. */
  2923. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2924. INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
  2925. qib_sdma_intr(ppd);
  2926. return IRQ_HANDLED;
  2927. }
  2928. /*
  2929. * Dedicated Send DMA progress interrupt handler.
  2930. */
  2931. static irqreturn_t sdma_progress_intr(int irq, void *data)
  2932. {
  2933. struct qib_pportdata *ppd = data;
  2934. struct qib_devdata *dd = ppd->dd;
  2935. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2936. /*
  2937. * This return value is not great, but we do not want the
  2938. * interrupt core code to remove our interrupt handler
  2939. * because we don't appear to be handling an interrupt
  2940. * during a chip reset.
  2941. */
  2942. return IRQ_HANDLED;
  2943. qib_stats.sps_ints++;
  2944. if (dd->int_counter != (u32) -1)
  2945. dd->int_counter++;
  2946. /* Clear the interrupt bit we expect to be set. */
  2947. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2948. INT_MASK_P(SDmaProgress, 1) :
  2949. INT_MASK_P(SDmaProgress, 0));
  2950. qib_sdma_intr(ppd);
  2951. return IRQ_HANDLED;
  2952. }
  2953. /*
  2954. * Dedicated Send DMA cleanup interrupt handler.
  2955. */
  2956. static irqreturn_t sdma_cleanup_intr(int irq, void *data)
  2957. {
  2958. struct qib_pportdata *ppd = data;
  2959. struct qib_devdata *dd = ppd->dd;
  2960. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2961. /*
  2962. * This return value is not great, but we do not want the
  2963. * interrupt core code to remove our interrupt handler
  2964. * because we don't appear to be handling an interrupt
  2965. * during a chip reset.
  2966. */
  2967. return IRQ_HANDLED;
  2968. qib_stats.sps_ints++;
  2969. if (dd->int_counter != (u32) -1)
  2970. dd->int_counter++;
  2971. /* Clear the interrupt bit we expect to be set. */
  2972. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2973. INT_MASK_PM(SDmaCleanupDone, 1) :
  2974. INT_MASK_PM(SDmaCleanupDone, 0));
  2975. qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
  2976. return IRQ_HANDLED;
  2977. }
  2978. #ifdef CONFIG_INFINIBAND_QIB_DCA
  2979. static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
  2980. {
  2981. if (!m->dca)
  2982. return;
  2983. qib_devinfo(dd->pcidev,
  2984. "Disabling notifier on HCA %d irq %d\n",
  2985. dd->unit,
  2986. m->msix.vector);
  2987. irq_set_affinity_notifier(
  2988. m->msix.vector,
  2989. NULL);
  2990. m->notifier = NULL;
  2991. }
  2992. static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
  2993. {
  2994. struct qib_irq_notify *n;
  2995. if (!m->dca)
  2996. return;
  2997. n = kzalloc(sizeof(*n), GFP_KERNEL);
  2998. if (n) {
  2999. int ret;
  3000. m->notifier = n;
  3001. n->notify.irq = m->msix.vector;
  3002. n->notify.notify = qib_irq_notifier_notify;
  3003. n->notify.release = qib_irq_notifier_release;
  3004. n->arg = m->arg;
  3005. n->rcv = m->rcv;
  3006. qib_devinfo(dd->pcidev,
  3007. "set notifier irq %d rcv %d notify %p\n",
  3008. n->notify.irq, n->rcv, &n->notify);
  3009. ret = irq_set_affinity_notifier(
  3010. n->notify.irq,
  3011. &n->notify);
  3012. if (ret) {
  3013. m->notifier = NULL;
  3014. kfree(n);
  3015. }
  3016. }
  3017. }
  3018. #endif
  3019. /*
  3020. * Set up our chip-specific interrupt handler.
  3021. * The interrupt type has already been setup, so
  3022. * we just need to do the registration and error checking.
  3023. * If we are using MSIx interrupts, we may fall back to
  3024. * INTx later, if the interrupt handler doesn't get called
  3025. * within 1/2 second (see verify_interrupt()).
  3026. */
  3027. static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
  3028. {
  3029. int ret, i, msixnum;
  3030. u64 redirect[6];
  3031. u64 mask;
  3032. const struct cpumask *local_mask;
  3033. int firstcpu, secondcpu = 0, currrcvcpu = 0;
  3034. if (!dd->num_pports)
  3035. return;
  3036. if (clearpend) {
  3037. /*
  3038. * if not switching interrupt types, be sure interrupts are
  3039. * disabled, and then clear anything pending at this point,
  3040. * because we are starting clean.
  3041. */
  3042. qib_7322_set_intr_state(dd, 0);
  3043. /* clear the reset error, init error/hwerror mask */
  3044. qib_7322_init_hwerrors(dd);
  3045. /* clear any interrupt bits that might be set */
  3046. qib_write_kreg(dd, kr_intclear, ~0ULL);
  3047. /* make sure no pending MSIx intr, and clear diag reg */
  3048. qib_write_kreg(dd, kr_intgranted, ~0ULL);
  3049. qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
  3050. }
  3051. if (!dd->cspec->num_msix_entries) {
  3052. /* Try to get INTx interrupt */
  3053. try_intx:
  3054. if (!dd->pcidev->irq) {
  3055. qib_dev_err(dd,
  3056. "irq is 0, BIOS error? Interrupts won't work\n");
  3057. goto bail;
  3058. }
  3059. ret = request_irq(dd->pcidev->irq, qib_7322intr,
  3060. IRQF_SHARED, QIB_DRV_NAME, dd);
  3061. if (ret) {
  3062. qib_dev_err(dd,
  3063. "Couldn't setup INTx interrupt (irq=%d): %d\n",
  3064. dd->pcidev->irq, ret);
  3065. goto bail;
  3066. }
  3067. dd->cspec->irq = dd->pcidev->irq;
  3068. dd->cspec->main_int_mask = ~0ULL;
  3069. goto bail;
  3070. }
  3071. /* Try to get MSIx interrupts */
  3072. memset(redirect, 0, sizeof redirect);
  3073. mask = ~0ULL;
  3074. msixnum = 0;
  3075. local_mask = cpumask_of_pcibus(dd->pcidev->bus);
  3076. firstcpu = cpumask_first(local_mask);
  3077. if (firstcpu >= nr_cpu_ids ||
  3078. cpumask_weight(local_mask) == num_online_cpus()) {
  3079. local_mask = topology_core_cpumask(0);
  3080. firstcpu = cpumask_first(local_mask);
  3081. }
  3082. if (firstcpu < nr_cpu_ids) {
  3083. secondcpu = cpumask_next(firstcpu, local_mask);
  3084. if (secondcpu >= nr_cpu_ids)
  3085. secondcpu = firstcpu;
  3086. currrcvcpu = secondcpu;
  3087. }
  3088. for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
  3089. irq_handler_t handler;
  3090. void *arg;
  3091. u64 val;
  3092. int lsb, reg, sh;
  3093. #ifdef CONFIG_INFINIBAND_QIB_DCA
  3094. int dca = 0;
  3095. #endif
  3096. dd->cspec->msix_entries[msixnum].
  3097. name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
  3098. = '\0';
  3099. if (i < ARRAY_SIZE(irq_table)) {
  3100. if (irq_table[i].port) {
  3101. /* skip if for a non-configured port */
  3102. if (irq_table[i].port > dd->num_pports)
  3103. continue;
  3104. arg = dd->pport + irq_table[i].port - 1;
  3105. } else
  3106. arg = dd;
  3107. #ifdef CONFIG_INFINIBAND_QIB_DCA
  3108. dca = irq_table[i].dca;
  3109. #endif
  3110. lsb = irq_table[i].lsb;
  3111. handler = irq_table[i].handler;
  3112. snprintf(dd->cspec->msix_entries[msixnum].name,
  3113. sizeof(dd->cspec->msix_entries[msixnum].name)
  3114. - 1,
  3115. QIB_DRV_NAME "%d%s", dd->unit,
  3116. irq_table[i].name);
  3117. } else {
  3118. unsigned ctxt;
  3119. ctxt = i - ARRAY_SIZE(irq_table);
  3120. /* per krcvq context receive interrupt */
  3121. arg = dd->rcd[ctxt];
  3122. if (!arg)
  3123. continue;
  3124. if (qib_krcvq01_no_msi && ctxt < 2)
  3125. continue;
  3126. #ifdef CONFIG_INFINIBAND_QIB_DCA
  3127. dca = 1;
  3128. #endif
  3129. lsb = QIB_I_RCVAVAIL_LSB + ctxt;
  3130. handler = qib_7322pintr;
  3131. snprintf(dd->cspec->msix_entries[msixnum].name,
  3132. sizeof(dd->cspec->msix_entries[msixnum].name)
  3133. - 1,
  3134. QIB_DRV_NAME "%d (kctx)", dd->unit);
  3135. }
  3136. ret = request_irq(
  3137. dd->cspec->msix_entries[msixnum].msix.vector,
  3138. handler, 0, dd->cspec->msix_entries[msixnum].name,
  3139. arg);
  3140. if (ret) {
  3141. /*
  3142. * Shouldn't happen since the enable said we could
  3143. * have as many as we are trying to setup here.
  3144. */
  3145. qib_dev_err(dd,
  3146. "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
  3147. msixnum,
  3148. dd->cspec->msix_entries[msixnum].msix.vector,
  3149. ret);
  3150. qib_7322_nomsix(dd);
  3151. goto try_intx;
  3152. }
  3153. dd->cspec->msix_entries[msixnum].arg = arg;
  3154. #ifdef CONFIG_INFINIBAND_QIB_DCA
  3155. dd->cspec->msix_entries[msixnum].dca = dca;
  3156. dd->cspec->msix_entries[msixnum].rcv =
  3157. handler == qib_7322pintr;
  3158. #endif
  3159. if (lsb >= 0) {
  3160. reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
  3161. sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
  3162. SYM_LSB(IntRedirect0, vec1);
  3163. mask &= ~(1ULL << lsb);
  3164. redirect[reg] |= ((u64) msixnum) << sh;
  3165. }
  3166. val = qib_read_kreg64(dd, 2 * msixnum + 1 +
  3167. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  3168. if (firstcpu < nr_cpu_ids &&
  3169. zalloc_cpumask_var(
  3170. &dd->cspec->msix_entries[msixnum].mask,
  3171. GFP_KERNEL)) {
  3172. if (handler == qib_7322pintr) {
  3173. cpumask_set_cpu(currrcvcpu,
  3174. dd->cspec->msix_entries[msixnum].mask);
  3175. currrcvcpu = cpumask_next(currrcvcpu,
  3176. local_mask);
  3177. if (currrcvcpu >= nr_cpu_ids)
  3178. currrcvcpu = secondcpu;
  3179. } else {
  3180. cpumask_set_cpu(firstcpu,
  3181. dd->cspec->msix_entries[msixnum].mask);
  3182. }
  3183. irq_set_affinity_hint(
  3184. dd->cspec->msix_entries[msixnum].msix.vector,
  3185. dd->cspec->msix_entries[msixnum].mask);
  3186. }
  3187. msixnum++;
  3188. }
  3189. /* Initialize the vector mapping */
  3190. for (i = 0; i < ARRAY_SIZE(redirect); i++)
  3191. qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
  3192. dd->cspec->main_int_mask = mask;
  3193. tasklet_init(&dd->error_tasklet, qib_error_tasklet,
  3194. (unsigned long)dd);
  3195. bail:;
  3196. }
  3197. /**
  3198. * qib_7322_boardname - fill in the board name and note features
  3199. * @dd: the qlogic_ib device
  3200. *
  3201. * info will be based on the board revision register
  3202. */
  3203. static unsigned qib_7322_boardname(struct qib_devdata *dd)
  3204. {
  3205. /* Will need enumeration of board-types here */
  3206. char *n;
  3207. u32 boardid, namelen;
  3208. unsigned features = DUAL_PORT_CAP;
  3209. boardid = SYM_FIELD(dd->revision, Revision, BoardID);
  3210. switch (boardid) {
  3211. case 0:
  3212. n = "InfiniPath_QLE7342_Emulation";
  3213. break;
  3214. case 1:
  3215. n = "InfiniPath_QLE7340";
  3216. dd->flags |= QIB_HAS_QSFP;
  3217. features = PORT_SPD_CAP;
  3218. break;
  3219. case 2:
  3220. n = "InfiniPath_QLE7342";
  3221. dd->flags |= QIB_HAS_QSFP;
  3222. break;
  3223. case 3:
  3224. n = "InfiniPath_QMI7342";
  3225. break;
  3226. case 4:
  3227. n = "InfiniPath_Unsupported7342";
  3228. qib_dev_err(dd, "Unsupported version of QMH7342\n");
  3229. features = 0;
  3230. break;
  3231. case BOARD_QMH7342:
  3232. n = "InfiniPath_QMH7342";
  3233. features = 0x24;
  3234. break;
  3235. case BOARD_QME7342:
  3236. n = "InfiniPath_QME7342";
  3237. break;
  3238. case 8:
  3239. n = "InfiniPath_QME7362";
  3240. dd->flags |= QIB_HAS_QSFP;
  3241. break;
  3242. case 15:
  3243. n = "InfiniPath_QLE7342_TEST";
  3244. dd->flags |= QIB_HAS_QSFP;
  3245. break;
  3246. default:
  3247. n = "InfiniPath_QLE73xy_UNKNOWN";
  3248. qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
  3249. break;
  3250. }
  3251. dd->board_atten = 1; /* index into txdds_Xdr */
  3252. namelen = strlen(n) + 1;
  3253. dd->boardname = kmalloc(namelen, GFP_KERNEL);
  3254. if (!dd->boardname)
  3255. qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
  3256. else
  3257. snprintf(dd->boardname, namelen, "%s", n);
  3258. snprintf(dd->boardversion, sizeof(dd->boardversion),
  3259. "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
  3260. QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
  3261. (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
  3262. dd->majrev, dd->minrev,
  3263. (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
  3264. if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
  3265. qib_devinfo(dd->pcidev,
  3266. "IB%u: Forced to single port mode by module parameter\n",
  3267. dd->unit);
  3268. features &= PORT_SPD_CAP;
  3269. }
  3270. return features;
  3271. }
  3272. /*
  3273. * This routine sleeps, so it can only be called from user context, not
  3274. * from interrupt context.
  3275. */
  3276. static int qib_do_7322_reset(struct qib_devdata *dd)
  3277. {
  3278. u64 val;
  3279. u64 *msix_vecsave;
  3280. int i, msix_entries, ret = 1;
  3281. u16 cmdval;
  3282. u8 int_line, clinesz;
  3283. unsigned long flags;
  3284. /* Use dev_err so it shows up in logs, etc. */
  3285. qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
  3286. qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
  3287. msix_entries = dd->cspec->num_msix_entries;
  3288. /* no interrupts till re-initted */
  3289. qib_7322_set_intr_state(dd, 0);
  3290. if (msix_entries) {
  3291. qib_7322_nomsix(dd);
  3292. /* can be up to 512 bytes, too big for stack */
  3293. msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
  3294. sizeof(u64), GFP_KERNEL);
  3295. if (!msix_vecsave)
  3296. qib_dev_err(dd, "No mem to save MSIx data\n");
  3297. } else
  3298. msix_vecsave = NULL;
  3299. /*
  3300. * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
  3301. * info that is set up by the BIOS, so we have to save and restore
  3302. * it ourselves. There is some risk something could change it,
  3303. * after we save it, but since we have disabled the MSIx, it
  3304. * shouldn't be touched...
  3305. */
  3306. for (i = 0; i < msix_entries; i++) {
  3307. u64 vecaddr, vecdata;
  3308. vecaddr = qib_read_kreg64(dd, 2 * i +
  3309. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  3310. vecdata = qib_read_kreg64(dd, 1 + 2 * i +
  3311. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  3312. if (msix_vecsave) {
  3313. msix_vecsave[2 * i] = vecaddr;
  3314. /* save it without the masked bit set */
  3315. msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
  3316. }
  3317. }
  3318. dd->pport->cpspec->ibdeltainprog = 0;
  3319. dd->pport->cpspec->ibsymdelta = 0;
  3320. dd->pport->cpspec->iblnkerrdelta = 0;
  3321. dd->pport->cpspec->ibmalfdelta = 0;
  3322. dd->int_counter = 0; /* so we check interrupts work again */
  3323. /*
  3324. * Keep chip from being accessed until we are ready. Use
  3325. * writeq() directly, to allow the write even though QIB_PRESENT
  3326. * isn't set.
  3327. */
  3328. dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
  3329. dd->flags |= QIB_DOING_RESET;
  3330. val = dd->control | QLOGIC_IB_C_RESET;
  3331. writeq(val, &dd->kregbase[kr_control]);
  3332. for (i = 1; i <= 5; i++) {
  3333. /*
  3334. * Allow MBIST, etc. to complete; longer on each retry.
  3335. * We sometimes get machine checks from bus timeout if no
  3336. * response, so for now, make it *really* long.
  3337. */
  3338. msleep(1000 + (1 + i) * 3000);
  3339. qib_pcie_reenable(dd, cmdval, int_line, clinesz);
  3340. /*
  3341. * Use readq directly, so we don't need to mark it as PRESENT
  3342. * until we get a successful indication that all is well.
  3343. */
  3344. val = readq(&dd->kregbase[kr_revision]);
  3345. if (val == dd->revision)
  3346. break;
  3347. if (i == 5) {
  3348. qib_dev_err(dd,
  3349. "Failed to initialize after reset, unusable\n");
  3350. ret = 0;
  3351. goto bail;
  3352. }
  3353. }
  3354. dd->flags |= QIB_PRESENT; /* it's back */
  3355. if (msix_entries) {
  3356. /* restore the MSIx vector address and data if saved above */
  3357. for (i = 0; i < msix_entries; i++) {
  3358. dd->cspec->msix_entries[i].msix.entry = i;
  3359. if (!msix_vecsave || !msix_vecsave[2 * i])
  3360. continue;
  3361. qib_write_kreg(dd, 2 * i +
  3362. (QIB_7322_MsixTable_OFFS / sizeof(u64)),
  3363. msix_vecsave[2 * i]);
  3364. qib_write_kreg(dd, 1 + 2 * i +
  3365. (QIB_7322_MsixTable_OFFS / sizeof(u64)),
  3366. msix_vecsave[1 + 2 * i]);
  3367. }
  3368. }
  3369. /* initialize the remaining registers. */
  3370. for (i = 0; i < dd->num_pports; ++i)
  3371. write_7322_init_portregs(&dd->pport[i]);
  3372. write_7322_initregs(dd);
  3373. if (qib_pcie_params(dd, dd->lbus_width,
  3374. &dd->cspec->num_msix_entries,
  3375. dd->cspec->msix_entries))
  3376. qib_dev_err(dd,
  3377. "Reset failed to setup PCIe or interrupts; continuing anyway\n");
  3378. qib_setup_7322_interrupt(dd, 1);
  3379. for (i = 0; i < dd->num_pports; ++i) {
  3380. struct qib_pportdata *ppd = &dd->pport[i];
  3381. spin_lock_irqsave(&ppd->lflags_lock, flags);
  3382. ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
  3383. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  3384. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  3385. }
  3386. bail:
  3387. dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
  3388. kfree(msix_vecsave);
  3389. return ret;
  3390. }
  3391. /**
  3392. * qib_7322_put_tid - write a TID to the chip
  3393. * @dd: the qlogic_ib device
  3394. * @tidptr: pointer to the expected TID (in chip) to update
  3395. * @tidtype: 0 for eager, 1 for expected
  3396. * @pa: physical address of in memory buffer; tidinvalid if freeing
  3397. */
  3398. static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
  3399. u32 type, unsigned long pa)
  3400. {
  3401. if (!(dd->flags & QIB_PRESENT))
  3402. return;
  3403. if (pa != dd->tidinvalid) {
  3404. u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
  3405. /* paranoia checks */
  3406. if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
  3407. qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
  3408. pa);
  3409. return;
  3410. }
  3411. if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
  3412. qib_dev_err(dd,
  3413. "Physical page address 0x%lx larger than supported\n",
  3414. pa);
  3415. return;
  3416. }
  3417. if (type == RCVHQ_RCV_TYPE_EAGER)
  3418. chippa |= dd->tidtemplate;
  3419. else /* for now, always full 4KB page */
  3420. chippa |= IBA7322_TID_SZ_4K;
  3421. pa = chippa;
  3422. }
  3423. writeq(pa, tidptr);
  3424. mmiowb();
  3425. }
  3426. /**
  3427. * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
  3428. * @dd: the qlogic_ib device
  3429. * @ctxt: the ctxt
  3430. *
  3431. * clear all TID entries for a ctxt, expected and eager.
  3432. * Used from qib_close().
  3433. */
  3434. static void qib_7322_clear_tids(struct qib_devdata *dd,
  3435. struct qib_ctxtdata *rcd)
  3436. {
  3437. u64 __iomem *tidbase;
  3438. unsigned long tidinv;
  3439. u32 ctxt;
  3440. int i;
  3441. if (!dd->kregbase || !rcd)
  3442. return;
  3443. ctxt = rcd->ctxt;
  3444. tidinv = dd->tidinvalid;
  3445. tidbase = (u64 __iomem *)
  3446. ((char __iomem *) dd->kregbase +
  3447. dd->rcvtidbase +
  3448. ctxt * dd->rcvtidcnt * sizeof(*tidbase));
  3449. for (i = 0; i < dd->rcvtidcnt; i++)
  3450. qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
  3451. tidinv);
  3452. tidbase = (u64 __iomem *)
  3453. ((char __iomem *) dd->kregbase +
  3454. dd->rcvegrbase +
  3455. rcd->rcvegr_tid_base * sizeof(*tidbase));
  3456. for (i = 0; i < rcd->rcvegrcnt; i++)
  3457. qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
  3458. tidinv);
  3459. }
  3460. /**
  3461. * qib_7322_tidtemplate - setup constants for TID updates
  3462. * @dd: the qlogic_ib device
  3463. *
  3464. * We setup stuff that we use a lot, to avoid calculating each time
  3465. */
  3466. static void qib_7322_tidtemplate(struct qib_devdata *dd)
  3467. {
  3468. /*
  3469. * For now, we always allocate 4KB buffers (at init) so we can
  3470. * receive max size packets. We may want a module parameter to
  3471. * specify 2KB or 4KB and/or make it per port instead of per device
  3472. * for those who want to reduce memory footprint. Note that the
  3473. * rcvhdrentsize size must be large enough to hold the largest
  3474. * IB header (currently 96 bytes) that we expect to handle (plus of
  3475. * course the 2 dwords of RHF).
  3476. */
  3477. if (dd->rcvegrbufsize == 2048)
  3478. dd->tidtemplate = IBA7322_TID_SZ_2K;
  3479. else if (dd->rcvegrbufsize == 4096)
  3480. dd->tidtemplate = IBA7322_TID_SZ_4K;
  3481. dd->tidinvalid = 0;
  3482. }
  3483. /**
  3484. * qib_init_7322_get_base_info - set chip-specific flags for user code
  3485. * @rcd: the qlogic_ib ctxt
  3486. * @kbase: qib_base_info pointer
  3487. *
  3488. * We set the PCIE flag because the lower bandwidth on PCIe vs
  3489. * HyperTransport can affect some user packet algorithims.
  3490. */
  3491. static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
  3492. struct qib_base_info *kinfo)
  3493. {
  3494. kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
  3495. QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
  3496. QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
  3497. if (rcd->dd->cspec->r1)
  3498. kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
  3499. if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
  3500. kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
  3501. return 0;
  3502. }
  3503. static struct qib_message_header *
  3504. qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
  3505. {
  3506. u32 offset = qib_hdrget_offset(rhf_addr);
  3507. return (struct qib_message_header *)
  3508. (rhf_addr - dd->rhf_offset + offset);
  3509. }
  3510. /*
  3511. * Configure number of contexts.
  3512. */
  3513. static void qib_7322_config_ctxts(struct qib_devdata *dd)
  3514. {
  3515. unsigned long flags;
  3516. u32 nchipctxts;
  3517. nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
  3518. dd->cspec->numctxts = nchipctxts;
  3519. if (qib_n_krcv_queues > 1 && dd->num_pports) {
  3520. dd->first_user_ctxt = NUM_IB_PORTS +
  3521. (qib_n_krcv_queues - 1) * dd->num_pports;
  3522. if (dd->first_user_ctxt > nchipctxts)
  3523. dd->first_user_ctxt = nchipctxts;
  3524. dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
  3525. } else {
  3526. dd->first_user_ctxt = NUM_IB_PORTS;
  3527. dd->n_krcv_queues = 1;
  3528. }
  3529. if (!qib_cfgctxts) {
  3530. int nctxts = dd->first_user_ctxt + num_online_cpus();
  3531. if (nctxts <= 6)
  3532. dd->ctxtcnt = 6;
  3533. else if (nctxts <= 10)
  3534. dd->ctxtcnt = 10;
  3535. else if (nctxts <= nchipctxts)
  3536. dd->ctxtcnt = nchipctxts;
  3537. } else if (qib_cfgctxts < dd->num_pports)
  3538. dd->ctxtcnt = dd->num_pports;
  3539. else if (qib_cfgctxts <= nchipctxts)
  3540. dd->ctxtcnt = qib_cfgctxts;
  3541. if (!dd->ctxtcnt) /* none of the above, set to max */
  3542. dd->ctxtcnt = nchipctxts;
  3543. /*
  3544. * Chip can be configured for 6, 10, or 18 ctxts, and choice
  3545. * affects number of eager TIDs per ctxt (1K, 2K, 4K).
  3546. * Lock to be paranoid about later motion, etc.
  3547. */
  3548. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  3549. if (dd->ctxtcnt > 10)
  3550. dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
  3551. else if (dd->ctxtcnt > 6)
  3552. dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
  3553. /* else configure for default 6 receive ctxts */
  3554. /* The XRC opcode is 5. */
  3555. dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
  3556. /*
  3557. * RcvCtrl *must* be written here so that the
  3558. * chip understands how to change rcvegrcnt below.
  3559. */
  3560. qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
  3561. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  3562. /* kr_rcvegrcnt changes based on the number of contexts enabled */
  3563. dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
  3564. if (qib_rcvhdrcnt)
  3565. dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
  3566. else
  3567. dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
  3568. dd->num_pports > 1 ? 1024U : 2048U);
  3569. }
  3570. static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
  3571. {
  3572. int lsb, ret = 0;
  3573. u64 maskr; /* right-justified mask */
  3574. switch (which) {
  3575. case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
  3576. ret = ppd->link_width_enabled;
  3577. goto done;
  3578. case QIB_IB_CFG_LWID: /* Get currently active Link-width */
  3579. ret = ppd->link_width_active;
  3580. goto done;
  3581. case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
  3582. ret = ppd->link_speed_enabled;
  3583. goto done;
  3584. case QIB_IB_CFG_SPD: /* Get current Link spd */
  3585. ret = ppd->link_speed_active;
  3586. goto done;
  3587. case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
  3588. lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3589. maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3590. break;
  3591. case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
  3592. lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3593. maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3594. break;
  3595. case QIB_IB_CFG_LINKLATENCY:
  3596. ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
  3597. SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
  3598. goto done;
  3599. case QIB_IB_CFG_OP_VLS:
  3600. ret = ppd->vls_operational;
  3601. goto done;
  3602. case QIB_IB_CFG_VL_HIGH_CAP:
  3603. ret = 16;
  3604. goto done;
  3605. case QIB_IB_CFG_VL_LOW_CAP:
  3606. ret = 16;
  3607. goto done;
  3608. case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  3609. ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3610. OverrunThreshold);
  3611. goto done;
  3612. case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  3613. ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3614. PhyerrThreshold);
  3615. goto done;
  3616. case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  3617. /* will only take effect when the link state changes */
  3618. ret = (ppd->cpspec->ibcctrl_a &
  3619. SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
  3620. IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
  3621. goto done;
  3622. case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
  3623. lsb = IBA7322_IBC_HRTBT_LSB;
  3624. maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
  3625. break;
  3626. case QIB_IB_CFG_PMA_TICKS:
  3627. /*
  3628. * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
  3629. * Since the clock is always 250MHz, the value is 3, 1 or 0.
  3630. */
  3631. if (ppd->link_speed_active == QIB_IB_QDR)
  3632. ret = 3;
  3633. else if (ppd->link_speed_active == QIB_IB_DDR)
  3634. ret = 1;
  3635. else
  3636. ret = 0;
  3637. goto done;
  3638. default:
  3639. ret = -EINVAL;
  3640. goto done;
  3641. }
  3642. ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
  3643. done:
  3644. return ret;
  3645. }
  3646. /*
  3647. * Below again cribbed liberally from older version. Do not lean
  3648. * heavily on it.
  3649. */
  3650. #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
  3651. #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
  3652. | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
  3653. static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
  3654. {
  3655. struct qib_devdata *dd = ppd->dd;
  3656. u64 maskr; /* right-justified mask */
  3657. int lsb, ret = 0;
  3658. u16 lcmd, licmd;
  3659. unsigned long flags;
  3660. switch (which) {
  3661. case QIB_IB_CFG_LIDLMC:
  3662. /*
  3663. * Set LID and LMC. Combined to avoid possible hazard
  3664. * caller puts LMC in 16MSbits, DLID in 16LSbits of val
  3665. */
  3666. lsb = IBA7322_IBC_DLIDLMC_SHIFT;
  3667. maskr = IBA7322_IBC_DLIDLMC_MASK;
  3668. /*
  3669. * For header-checking, the SLID in the packet will
  3670. * be masked with SendIBSLMCMask, and compared
  3671. * with SendIBSLIDAssignMask. Make sure we do not
  3672. * set any bits not covered by the mask, or we get
  3673. * false-positives.
  3674. */
  3675. qib_write_kreg_port(ppd, krp_sendslid,
  3676. val & (val >> 16) & SendIBSLIDAssignMask);
  3677. qib_write_kreg_port(ppd, krp_sendslidmask,
  3678. (val >> 16) & SendIBSLMCMask);
  3679. break;
  3680. case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
  3681. ppd->link_width_enabled = val;
  3682. /* convert IB value to chip register value */
  3683. if (val == IB_WIDTH_1X)
  3684. val = 0;
  3685. else if (val == IB_WIDTH_4X)
  3686. val = 1;
  3687. else
  3688. val = 3;
  3689. maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
  3690. lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
  3691. break;
  3692. case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
  3693. /*
  3694. * As with width, only write the actual register if the
  3695. * link is currently down, otherwise takes effect on next
  3696. * link change. Since setting is being explicitly requested
  3697. * (via MAD or sysfs), clear autoneg failure status if speed
  3698. * autoneg is enabled.
  3699. */
  3700. ppd->link_speed_enabled = val;
  3701. val <<= IBA7322_IBC_SPEED_LSB;
  3702. maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
  3703. IBA7322_IBC_MAX_SPEED_MASK;
  3704. if (val & (val - 1)) {
  3705. /* Muliple speeds enabled */
  3706. val |= IBA7322_IBC_IBTA_1_2_MASK |
  3707. IBA7322_IBC_MAX_SPEED_MASK;
  3708. spin_lock_irqsave(&ppd->lflags_lock, flags);
  3709. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  3710. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  3711. } else if (val & IBA7322_IBC_SPEED_QDR)
  3712. val |= IBA7322_IBC_IBTA_1_2_MASK;
  3713. /* IBTA 1.2 mode + min/max + speed bits are contiguous */
  3714. lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
  3715. break;
  3716. case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
  3717. lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3718. maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3719. break;
  3720. case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
  3721. lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3722. maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3723. break;
  3724. case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  3725. maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3726. OverrunThreshold);
  3727. if (maskr != val) {
  3728. ppd->cpspec->ibcctrl_a &=
  3729. ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
  3730. ppd->cpspec->ibcctrl_a |= (u64) val <<
  3731. SYM_LSB(IBCCtrlA_0, OverrunThreshold);
  3732. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3733. ppd->cpspec->ibcctrl_a);
  3734. qib_write_kreg(dd, kr_scratch, 0ULL);
  3735. }
  3736. goto bail;
  3737. case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  3738. maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3739. PhyerrThreshold);
  3740. if (maskr != val) {
  3741. ppd->cpspec->ibcctrl_a &=
  3742. ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
  3743. ppd->cpspec->ibcctrl_a |= (u64) val <<
  3744. SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
  3745. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3746. ppd->cpspec->ibcctrl_a);
  3747. qib_write_kreg(dd, kr_scratch, 0ULL);
  3748. }
  3749. goto bail;
  3750. case QIB_IB_CFG_PKEYS: /* update pkeys */
  3751. maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
  3752. ((u64) ppd->pkeys[2] << 32) |
  3753. ((u64) ppd->pkeys[3] << 48);
  3754. qib_write_kreg_port(ppd, krp_partitionkey, maskr);
  3755. goto bail;
  3756. case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  3757. /* will only take effect when the link state changes */
  3758. if (val == IB_LINKINITCMD_POLL)
  3759. ppd->cpspec->ibcctrl_a &=
  3760. ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
  3761. else /* SLEEP */
  3762. ppd->cpspec->ibcctrl_a |=
  3763. SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
  3764. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  3765. qib_write_kreg(dd, kr_scratch, 0ULL);
  3766. goto bail;
  3767. case QIB_IB_CFG_MTU: /* update the MTU in IBC */
  3768. /*
  3769. * Update our housekeeping variables, and set IBC max
  3770. * size, same as init code; max IBC is max we allow in
  3771. * buffer, less the qword pbc, plus 1 for ICRC, in dwords
  3772. * Set even if it's unchanged, print debug message only
  3773. * on changes.
  3774. */
  3775. val = (ppd->ibmaxlen >> 2) + 1;
  3776. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
  3777. ppd->cpspec->ibcctrl_a |= (u64)val <<
  3778. SYM_LSB(IBCCtrlA_0, MaxPktLen);
  3779. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3780. ppd->cpspec->ibcctrl_a);
  3781. qib_write_kreg(dd, kr_scratch, 0ULL);
  3782. goto bail;
  3783. case QIB_IB_CFG_LSTATE: /* set the IB link state */
  3784. switch (val & 0xffff0000) {
  3785. case IB_LINKCMD_DOWN:
  3786. lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
  3787. ppd->cpspec->ibmalfusesnap = 1;
  3788. ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
  3789. crp_errlink);
  3790. if (!ppd->cpspec->ibdeltainprog &&
  3791. qib_compat_ddr_negotiate) {
  3792. ppd->cpspec->ibdeltainprog = 1;
  3793. ppd->cpspec->ibsymsnap =
  3794. read_7322_creg32_port(ppd,
  3795. crp_ibsymbolerr);
  3796. ppd->cpspec->iblnkerrsnap =
  3797. read_7322_creg32_port(ppd,
  3798. crp_iblinkerrrecov);
  3799. }
  3800. break;
  3801. case IB_LINKCMD_ARMED:
  3802. lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
  3803. if (ppd->cpspec->ibmalfusesnap) {
  3804. ppd->cpspec->ibmalfusesnap = 0;
  3805. ppd->cpspec->ibmalfdelta +=
  3806. read_7322_creg32_port(ppd,
  3807. crp_errlink) -
  3808. ppd->cpspec->ibmalfsnap;
  3809. }
  3810. break;
  3811. case IB_LINKCMD_ACTIVE:
  3812. lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
  3813. break;
  3814. default:
  3815. ret = -EINVAL;
  3816. qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
  3817. goto bail;
  3818. }
  3819. switch (val & 0xffff) {
  3820. case IB_LINKINITCMD_NOP:
  3821. licmd = 0;
  3822. break;
  3823. case IB_LINKINITCMD_POLL:
  3824. licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
  3825. break;
  3826. case IB_LINKINITCMD_SLEEP:
  3827. licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
  3828. break;
  3829. case IB_LINKINITCMD_DISABLE:
  3830. licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
  3831. ppd->cpspec->chase_end = 0;
  3832. /*
  3833. * stop state chase counter and timer, if running.
  3834. * wait forpending timer, but don't clear .data (ppd)!
  3835. */
  3836. if (ppd->cpspec->chase_timer.expires) {
  3837. del_timer_sync(&ppd->cpspec->chase_timer);
  3838. ppd->cpspec->chase_timer.expires = 0;
  3839. }
  3840. break;
  3841. default:
  3842. ret = -EINVAL;
  3843. qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
  3844. val & 0xffff);
  3845. goto bail;
  3846. }
  3847. qib_set_ib_7322_lstate(ppd, lcmd, licmd);
  3848. goto bail;
  3849. case QIB_IB_CFG_OP_VLS:
  3850. if (ppd->vls_operational != val) {
  3851. ppd->vls_operational = val;
  3852. set_vls(ppd);
  3853. }
  3854. goto bail;
  3855. case QIB_IB_CFG_VL_HIGH_LIMIT:
  3856. qib_write_kreg_port(ppd, krp_highprio_limit, val);
  3857. goto bail;
  3858. case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
  3859. if (val > 3) {
  3860. ret = -EINVAL;
  3861. goto bail;
  3862. }
  3863. lsb = IBA7322_IBC_HRTBT_LSB;
  3864. maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
  3865. break;
  3866. case QIB_IB_CFG_PORT:
  3867. /* val is the port number of the switch we are connected to. */
  3868. if (ppd->dd->cspec->r1) {
  3869. cancel_delayed_work(&ppd->cpspec->ipg_work);
  3870. ppd->cpspec->ipg_tries = 0;
  3871. }
  3872. goto bail;
  3873. default:
  3874. ret = -EINVAL;
  3875. goto bail;
  3876. }
  3877. ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
  3878. ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
  3879. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  3880. qib_write_kreg(dd, kr_scratch, 0);
  3881. bail:
  3882. return ret;
  3883. }
  3884. static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
  3885. {
  3886. int ret = 0;
  3887. u64 val, ctrlb;
  3888. /* only IBC loopback, may add serdes and xgxs loopbacks later */
  3889. if (!strncmp(what, "ibc", 3)) {
  3890. ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
  3891. Loopback);
  3892. val = 0; /* disable heart beat, so link will come up */
  3893. qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
  3894. ppd->dd->unit, ppd->port);
  3895. } else if (!strncmp(what, "off", 3)) {
  3896. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
  3897. Loopback);
  3898. /* enable heart beat again */
  3899. val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
  3900. qib_devinfo(ppd->dd->pcidev,
  3901. "Disabling IB%u:%u IBC loopback (normal)\n",
  3902. ppd->dd->unit, ppd->port);
  3903. } else
  3904. ret = -EINVAL;
  3905. if (!ret) {
  3906. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3907. ppd->cpspec->ibcctrl_a);
  3908. ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
  3909. << IBA7322_IBC_HRTBT_LSB);
  3910. ppd->cpspec->ibcctrl_b = ctrlb | val;
  3911. qib_write_kreg_port(ppd, krp_ibcctrl_b,
  3912. ppd->cpspec->ibcctrl_b);
  3913. qib_write_kreg(ppd->dd, kr_scratch, 0);
  3914. }
  3915. return ret;
  3916. }
  3917. static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
  3918. struct ib_vl_weight_elem *vl)
  3919. {
  3920. unsigned i;
  3921. for (i = 0; i < 16; i++, regno++, vl++) {
  3922. u32 val = qib_read_kreg_port(ppd, regno);
  3923. vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
  3924. SYM_RMASK(LowPriority0_0, VirtualLane);
  3925. vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
  3926. SYM_RMASK(LowPriority0_0, Weight);
  3927. }
  3928. }
  3929. static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
  3930. struct ib_vl_weight_elem *vl)
  3931. {
  3932. unsigned i;
  3933. for (i = 0; i < 16; i++, regno++, vl++) {
  3934. u64 val;
  3935. val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
  3936. SYM_LSB(LowPriority0_0, VirtualLane)) |
  3937. ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
  3938. SYM_LSB(LowPriority0_0, Weight));
  3939. qib_write_kreg_port(ppd, regno, val);
  3940. }
  3941. if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
  3942. struct qib_devdata *dd = ppd->dd;
  3943. unsigned long flags;
  3944. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  3945. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
  3946. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  3947. qib_write_kreg(dd, kr_scratch, 0);
  3948. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  3949. }
  3950. }
  3951. static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
  3952. {
  3953. switch (which) {
  3954. case QIB_IB_TBL_VL_HIGH_ARB:
  3955. get_vl_weights(ppd, krp_highprio_0, t);
  3956. break;
  3957. case QIB_IB_TBL_VL_LOW_ARB:
  3958. get_vl_weights(ppd, krp_lowprio_0, t);
  3959. break;
  3960. default:
  3961. return -EINVAL;
  3962. }
  3963. return 0;
  3964. }
  3965. static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
  3966. {
  3967. switch (which) {
  3968. case QIB_IB_TBL_VL_HIGH_ARB:
  3969. set_vl_weights(ppd, krp_highprio_0, t);
  3970. break;
  3971. case QIB_IB_TBL_VL_LOW_ARB:
  3972. set_vl_weights(ppd, krp_lowprio_0, t);
  3973. break;
  3974. default:
  3975. return -EINVAL;
  3976. }
  3977. return 0;
  3978. }
  3979. static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
  3980. u32 updegr, u32 egrhd, u32 npkts)
  3981. {
  3982. /*
  3983. * Need to write timeout register before updating rcvhdrhead to ensure
  3984. * that the timer is enabled on reception of a packet.
  3985. */
  3986. if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
  3987. adjust_rcv_timeout(rcd, npkts);
  3988. if (updegr)
  3989. qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
  3990. mmiowb();
  3991. qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
  3992. qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
  3993. mmiowb();
  3994. }
  3995. static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
  3996. {
  3997. u32 head, tail;
  3998. head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
  3999. if (rcd->rcvhdrtail_kvaddr)
  4000. tail = qib_get_rcvhdrtail(rcd);
  4001. else
  4002. tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
  4003. return head == tail;
  4004. }
  4005. #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
  4006. QIB_RCVCTRL_CTXT_DIS | \
  4007. QIB_RCVCTRL_TIDFLOW_ENB | \
  4008. QIB_RCVCTRL_TIDFLOW_DIS | \
  4009. QIB_RCVCTRL_TAILUPD_ENB | \
  4010. QIB_RCVCTRL_TAILUPD_DIS | \
  4011. QIB_RCVCTRL_INTRAVAIL_ENB | \
  4012. QIB_RCVCTRL_INTRAVAIL_DIS | \
  4013. QIB_RCVCTRL_BP_ENB | \
  4014. QIB_RCVCTRL_BP_DIS)
  4015. #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
  4016. QIB_RCVCTRL_CTXT_DIS | \
  4017. QIB_RCVCTRL_PKEY_DIS | \
  4018. QIB_RCVCTRL_PKEY_ENB)
  4019. /*
  4020. * Modify the RCVCTRL register in chip-specific way. This
  4021. * is a function because bit positions and (future) register
  4022. * location is chip-specifc, but the needed operations are
  4023. * generic. <op> is a bit-mask because we often want to
  4024. * do multiple modifications.
  4025. */
  4026. static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
  4027. int ctxt)
  4028. {
  4029. struct qib_devdata *dd = ppd->dd;
  4030. struct qib_ctxtdata *rcd;
  4031. u64 mask, val;
  4032. unsigned long flags;
  4033. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  4034. if (op & QIB_RCVCTRL_TIDFLOW_ENB)
  4035. dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
  4036. if (op & QIB_RCVCTRL_TIDFLOW_DIS)
  4037. dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
  4038. if (op & QIB_RCVCTRL_TAILUPD_ENB)
  4039. dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
  4040. if (op & QIB_RCVCTRL_TAILUPD_DIS)
  4041. dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
  4042. if (op & QIB_RCVCTRL_PKEY_ENB)
  4043. ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
  4044. if (op & QIB_RCVCTRL_PKEY_DIS)
  4045. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
  4046. if (ctxt < 0) {
  4047. mask = (1ULL << dd->ctxtcnt) - 1;
  4048. rcd = NULL;
  4049. } else {
  4050. mask = (1ULL << ctxt);
  4051. rcd = dd->rcd[ctxt];
  4052. }
  4053. if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
  4054. ppd->p_rcvctrl |=
  4055. (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
  4056. if (!(dd->flags & QIB_NODMA_RTAIL)) {
  4057. op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
  4058. dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
  4059. }
  4060. /* Write these registers before the context is enabled. */
  4061. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
  4062. rcd->rcvhdrqtailaddr_phys);
  4063. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
  4064. rcd->rcvhdrq_phys);
  4065. rcd->seq_cnt = 1;
  4066. }
  4067. if (op & QIB_RCVCTRL_CTXT_DIS)
  4068. ppd->p_rcvctrl &=
  4069. ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
  4070. if (op & QIB_RCVCTRL_BP_ENB)
  4071. dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
  4072. if (op & QIB_RCVCTRL_BP_DIS)
  4073. dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
  4074. if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
  4075. dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
  4076. if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
  4077. dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
  4078. /*
  4079. * Decide which registers to write depending on the ops enabled.
  4080. * Special case is "flush" (no bits set at all)
  4081. * which needs to write both.
  4082. */
  4083. if (op == 0 || (op & RCVCTRL_COMMON_MODS))
  4084. qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
  4085. if (op == 0 || (op & RCVCTRL_PORT_MODS))
  4086. qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
  4087. if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
  4088. /*
  4089. * Init the context registers also; if we were
  4090. * disabled, tail and head should both be zero
  4091. * already from the enable, but since we don't
  4092. * know, we have to do it explicitly.
  4093. */
  4094. val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
  4095. qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
  4096. /* be sure enabling write seen; hd/tl should be 0 */
  4097. (void) qib_read_kreg32(dd, kr_scratch);
  4098. val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
  4099. dd->rcd[ctxt]->head = val;
  4100. /* If kctxt, interrupt on next receive. */
  4101. if (ctxt < dd->first_user_ctxt)
  4102. val |= dd->rhdrhead_intr_off;
  4103. qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
  4104. } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
  4105. dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
  4106. /* arm rcv interrupt */
  4107. val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
  4108. qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
  4109. }
  4110. if (op & QIB_RCVCTRL_CTXT_DIS) {
  4111. unsigned f;
  4112. /* Now that the context is disabled, clear these registers. */
  4113. if (ctxt >= 0) {
  4114. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
  4115. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
  4116. for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
  4117. qib_write_ureg(dd, ur_rcvflowtable + f,
  4118. TIDFLOW_ERRBITS, ctxt);
  4119. } else {
  4120. unsigned i;
  4121. for (i = 0; i < dd->cfgctxts; i++) {
  4122. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
  4123. i, 0);
  4124. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
  4125. for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
  4126. qib_write_ureg(dd, ur_rcvflowtable + f,
  4127. TIDFLOW_ERRBITS, i);
  4128. }
  4129. }
  4130. }
  4131. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  4132. }
  4133. /*
  4134. * Modify the SENDCTRL register in chip-specific way. This
  4135. * is a function where there are multiple such registers with
  4136. * slightly different layouts.
  4137. * The chip doesn't allow back-to-back sendctrl writes, so write
  4138. * the scratch register after writing sendctrl.
  4139. *
  4140. * Which register is written depends on the operation.
  4141. * Most operate on the common register, while
  4142. * SEND_ENB and SEND_DIS operate on the per-port ones.
  4143. * SEND_ENB is included in common because it can change SPCL_TRIG
  4144. */
  4145. #define SENDCTRL_COMMON_MODS (\
  4146. QIB_SENDCTRL_CLEAR | \
  4147. QIB_SENDCTRL_AVAIL_DIS | \
  4148. QIB_SENDCTRL_AVAIL_ENB | \
  4149. QIB_SENDCTRL_AVAIL_BLIP | \
  4150. QIB_SENDCTRL_DISARM | \
  4151. QIB_SENDCTRL_DISARM_ALL | \
  4152. QIB_SENDCTRL_SEND_ENB)
  4153. #define SENDCTRL_PORT_MODS (\
  4154. QIB_SENDCTRL_CLEAR | \
  4155. QIB_SENDCTRL_SEND_ENB | \
  4156. QIB_SENDCTRL_SEND_DIS | \
  4157. QIB_SENDCTRL_FLUSH)
  4158. static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
  4159. {
  4160. struct qib_devdata *dd = ppd->dd;
  4161. u64 tmp_dd_sendctrl;
  4162. unsigned long flags;
  4163. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  4164. /* First the dd ones that are "sticky", saved in shadow */
  4165. if (op & QIB_SENDCTRL_CLEAR)
  4166. dd->sendctrl = 0;
  4167. if (op & QIB_SENDCTRL_AVAIL_DIS)
  4168. dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  4169. else if (op & QIB_SENDCTRL_AVAIL_ENB) {
  4170. dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
  4171. if (dd->flags & QIB_USE_SPCL_TRIG)
  4172. dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
  4173. }
  4174. /* Then the ppd ones that are "sticky", saved in shadow */
  4175. if (op & QIB_SENDCTRL_SEND_DIS)
  4176. ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
  4177. else if (op & QIB_SENDCTRL_SEND_ENB)
  4178. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
  4179. if (op & QIB_SENDCTRL_DISARM_ALL) {
  4180. u32 i, last;
  4181. tmp_dd_sendctrl = dd->sendctrl;
  4182. last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  4183. /*
  4184. * Disarm any buffers that are not yet launched,
  4185. * disabling updates until done.
  4186. */
  4187. tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  4188. for (i = 0; i < last; i++) {
  4189. qib_write_kreg(dd, kr_sendctrl,
  4190. tmp_dd_sendctrl |
  4191. SYM_MASK(SendCtrl, Disarm) | i);
  4192. qib_write_kreg(dd, kr_scratch, 0);
  4193. }
  4194. }
  4195. if (op & QIB_SENDCTRL_FLUSH) {
  4196. u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
  4197. /*
  4198. * Now drain all the fifos. The Abort bit should never be
  4199. * needed, so for now, at least, we don't use it.
  4200. */
  4201. tmp_ppd_sendctrl |=
  4202. SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
  4203. SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
  4204. SYM_MASK(SendCtrl_0, TxeBypassIbc);
  4205. qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
  4206. qib_write_kreg(dd, kr_scratch, 0);
  4207. }
  4208. tmp_dd_sendctrl = dd->sendctrl;
  4209. if (op & QIB_SENDCTRL_DISARM)
  4210. tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
  4211. ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
  4212. SYM_LSB(SendCtrl, DisarmSendBuf));
  4213. if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
  4214. (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
  4215. tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  4216. if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
  4217. qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
  4218. qib_write_kreg(dd, kr_scratch, 0);
  4219. }
  4220. if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
  4221. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  4222. qib_write_kreg(dd, kr_scratch, 0);
  4223. }
  4224. if (op & QIB_SENDCTRL_AVAIL_BLIP) {
  4225. qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
  4226. qib_write_kreg(dd, kr_scratch, 0);
  4227. }
  4228. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  4229. if (op & QIB_SENDCTRL_FLUSH) {
  4230. u32 v;
  4231. /*
  4232. * ensure writes have hit chip, then do a few
  4233. * more reads, to allow DMA of pioavail registers
  4234. * to occur, so in-memory copy is in sync with
  4235. * the chip. Not always safe to sleep.
  4236. */
  4237. v = qib_read_kreg32(dd, kr_scratch);
  4238. qib_write_kreg(dd, kr_scratch, v);
  4239. v = qib_read_kreg32(dd, kr_scratch);
  4240. qib_write_kreg(dd, kr_scratch, v);
  4241. qib_read_kreg32(dd, kr_scratch);
  4242. }
  4243. }
  4244. #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
  4245. #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
  4246. #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
  4247. /**
  4248. * qib_portcntr_7322 - read a per-port chip counter
  4249. * @ppd: the qlogic_ib pport
  4250. * @creg: the counter to read (not a chip offset)
  4251. */
  4252. static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
  4253. {
  4254. struct qib_devdata *dd = ppd->dd;
  4255. u64 ret = 0ULL;
  4256. u16 creg;
  4257. /* 0xffff for unimplemented or synthesized counters */
  4258. static const u32 xlator[] = {
  4259. [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
  4260. [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
  4261. [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
  4262. [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
  4263. [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
  4264. [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
  4265. [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
  4266. [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
  4267. [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
  4268. [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
  4269. [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
  4270. [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
  4271. [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
  4272. [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
  4273. [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
  4274. [QIBPORTCNTR_ERRICRC] = crp_erricrc,
  4275. [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
  4276. [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
  4277. [QIBPORTCNTR_BADFORMAT] = crp_badformat,
  4278. [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
  4279. [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
  4280. [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
  4281. [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
  4282. [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
  4283. [QIBPORTCNTR_ERRLINK] = crp_errlink,
  4284. [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
  4285. [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
  4286. [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
  4287. [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
  4288. [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
  4289. /*
  4290. * the next 3 aren't really counters, but were implemented
  4291. * as counters in older chips, so still get accessed as
  4292. * though they were counters from this code.
  4293. */
  4294. [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
  4295. [QIBPORTCNTR_PSSTART] = krp_psstart,
  4296. [QIBPORTCNTR_PSSTAT] = krp_psstat,
  4297. /* pseudo-counter, summed for all ports */
  4298. [QIBPORTCNTR_KHDROVFL] = 0xffff,
  4299. };
  4300. if (reg >= ARRAY_SIZE(xlator)) {
  4301. qib_devinfo(ppd->dd->pcidev,
  4302. "Unimplemented portcounter %u\n", reg);
  4303. goto done;
  4304. }
  4305. creg = xlator[reg] & _PORT_CNTR_IDXMASK;
  4306. /* handle non-counters and special cases first */
  4307. if (reg == QIBPORTCNTR_KHDROVFL) {
  4308. int i;
  4309. /* sum over all kernel contexts (skip if mini_init) */
  4310. for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
  4311. struct qib_ctxtdata *rcd = dd->rcd[i];
  4312. if (!rcd || rcd->ppd != ppd)
  4313. continue;
  4314. ret += read_7322_creg32(dd, cr_base_egrovfl + i);
  4315. }
  4316. goto done;
  4317. } else if (reg == QIBPORTCNTR_RXDROPPKT) {
  4318. /*
  4319. * Used as part of the synthesis of port_rcv_errors
  4320. * in the verbs code for IBTA counters. Not needed for 7322,
  4321. * because all the errors are already counted by other cntrs.
  4322. */
  4323. goto done;
  4324. } else if (reg == QIBPORTCNTR_PSINTERVAL ||
  4325. reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
  4326. /* were counters in older chips, now per-port kernel regs */
  4327. ret = qib_read_kreg_port(ppd, creg);
  4328. goto done;
  4329. }
  4330. /*
  4331. * Only fast increment counters are 64 bits; use 32 bit reads to
  4332. * avoid two independent reads when on Opteron.
  4333. */
  4334. if (xlator[reg] & _PORT_64BIT_FLAG)
  4335. ret = read_7322_creg_port(ppd, creg);
  4336. else
  4337. ret = read_7322_creg32_port(ppd, creg);
  4338. if (creg == crp_ibsymbolerr) {
  4339. if (ppd->cpspec->ibdeltainprog)
  4340. ret -= ret - ppd->cpspec->ibsymsnap;
  4341. ret -= ppd->cpspec->ibsymdelta;
  4342. } else if (creg == crp_iblinkerrrecov) {
  4343. if (ppd->cpspec->ibdeltainprog)
  4344. ret -= ret - ppd->cpspec->iblnkerrsnap;
  4345. ret -= ppd->cpspec->iblnkerrdelta;
  4346. } else if (creg == crp_errlink)
  4347. ret -= ppd->cpspec->ibmalfdelta;
  4348. else if (creg == crp_iblinkdown)
  4349. ret += ppd->cpspec->iblnkdowndelta;
  4350. done:
  4351. return ret;
  4352. }
  4353. /*
  4354. * Device counter names (not port-specific), one line per stat,
  4355. * single string. Used by utilities like ipathstats to print the stats
  4356. * in a way which works for different versions of drivers, without changing
  4357. * the utility. Names need to be 12 chars or less (w/o newline), for proper
  4358. * display by utility.
  4359. * Non-error counters are first.
  4360. * Start of "error" conters is indicated by a leading "E " on the first
  4361. * "error" counter, and doesn't count in label length.
  4362. * The EgrOvfl list needs to be last so we truncate them at the configured
  4363. * context count for the device.
  4364. * cntr7322indices contains the corresponding register indices.
  4365. */
  4366. static const char cntr7322names[] =
  4367. "Interrupts\n"
  4368. "HostBusStall\n"
  4369. "E RxTIDFull\n"
  4370. "RxTIDInvalid\n"
  4371. "RxTIDFloDrop\n" /* 7322 only */
  4372. "Ctxt0EgrOvfl\n"
  4373. "Ctxt1EgrOvfl\n"
  4374. "Ctxt2EgrOvfl\n"
  4375. "Ctxt3EgrOvfl\n"
  4376. "Ctxt4EgrOvfl\n"
  4377. "Ctxt5EgrOvfl\n"
  4378. "Ctxt6EgrOvfl\n"
  4379. "Ctxt7EgrOvfl\n"
  4380. "Ctxt8EgrOvfl\n"
  4381. "Ctxt9EgrOvfl\n"
  4382. "Ctx10EgrOvfl\n"
  4383. "Ctx11EgrOvfl\n"
  4384. "Ctx12EgrOvfl\n"
  4385. "Ctx13EgrOvfl\n"
  4386. "Ctx14EgrOvfl\n"
  4387. "Ctx15EgrOvfl\n"
  4388. "Ctx16EgrOvfl\n"
  4389. "Ctx17EgrOvfl\n"
  4390. ;
  4391. static const u32 cntr7322indices[] = {
  4392. cr_lbint | _PORT_64BIT_FLAG,
  4393. cr_lbstall | _PORT_64BIT_FLAG,
  4394. cr_tidfull,
  4395. cr_tidinvalid,
  4396. cr_rxtidflowdrop,
  4397. cr_base_egrovfl + 0,
  4398. cr_base_egrovfl + 1,
  4399. cr_base_egrovfl + 2,
  4400. cr_base_egrovfl + 3,
  4401. cr_base_egrovfl + 4,
  4402. cr_base_egrovfl + 5,
  4403. cr_base_egrovfl + 6,
  4404. cr_base_egrovfl + 7,
  4405. cr_base_egrovfl + 8,
  4406. cr_base_egrovfl + 9,
  4407. cr_base_egrovfl + 10,
  4408. cr_base_egrovfl + 11,
  4409. cr_base_egrovfl + 12,
  4410. cr_base_egrovfl + 13,
  4411. cr_base_egrovfl + 14,
  4412. cr_base_egrovfl + 15,
  4413. cr_base_egrovfl + 16,
  4414. cr_base_egrovfl + 17,
  4415. };
  4416. /*
  4417. * same as cntr7322names and cntr7322indices, but for port-specific counters.
  4418. * portcntr7322indices is somewhat complicated by some registers needing
  4419. * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
  4420. */
  4421. static const char portcntr7322names[] =
  4422. "TxPkt\n"
  4423. "TxFlowPkt\n"
  4424. "TxWords\n"
  4425. "RxPkt\n"
  4426. "RxFlowPkt\n"
  4427. "RxWords\n"
  4428. "TxFlowStall\n"
  4429. "TxDmaDesc\n" /* 7220 and 7322-only */
  4430. "E RxDlidFltr\n" /* 7220 and 7322-only */
  4431. "IBStatusChng\n"
  4432. "IBLinkDown\n"
  4433. "IBLnkRecov\n"
  4434. "IBRxLinkErr\n"
  4435. "IBSymbolErr\n"
  4436. "RxLLIErr\n"
  4437. "RxBadFormat\n"
  4438. "RxBadLen\n"
  4439. "RxBufOvrfl\n"
  4440. "RxEBP\n"
  4441. "RxFlowCtlErr\n"
  4442. "RxICRCerr\n"
  4443. "RxLPCRCerr\n"
  4444. "RxVCRCerr\n"
  4445. "RxInvalLen\n"
  4446. "RxInvalPKey\n"
  4447. "RxPktDropped\n"
  4448. "TxBadLength\n"
  4449. "TxDropped\n"
  4450. "TxInvalLen\n"
  4451. "TxUnderrun\n"
  4452. "TxUnsupVL\n"
  4453. "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
  4454. "RxVL15Drop\n"
  4455. "RxVlErr\n"
  4456. "XcessBufOvfl\n"
  4457. "RxQPBadCtxt\n" /* 7322-only from here down */
  4458. "TXBadHeader\n"
  4459. ;
  4460. static const u32 portcntr7322indices[] = {
  4461. QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
  4462. crp_pktsendflow,
  4463. QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
  4464. QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
  4465. crp_pktrcvflowctrl,
  4466. QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
  4467. QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
  4468. crp_txsdmadesc | _PORT_64BIT_FLAG,
  4469. crp_rxdlidfltr,
  4470. crp_ibstatuschange,
  4471. QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
  4472. QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
  4473. QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
  4474. QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
  4475. QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
  4476. QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
  4477. QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
  4478. QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
  4479. QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
  4480. crp_rcvflowctrlviol,
  4481. QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
  4482. QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
  4483. QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
  4484. QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
  4485. QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
  4486. QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
  4487. crp_txminmaxlenerr,
  4488. crp_txdroppedpkt,
  4489. crp_txlenerr,
  4490. crp_txunderrun,
  4491. crp_txunsupvl,
  4492. QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
  4493. QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
  4494. QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
  4495. QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
  4496. crp_rxqpinvalidctxt,
  4497. crp_txhdrerr,
  4498. };
  4499. /* do all the setup to make the counter reads efficient later */
  4500. static void init_7322_cntrnames(struct qib_devdata *dd)
  4501. {
  4502. int i, j = 0;
  4503. char *s;
  4504. for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
  4505. i++) {
  4506. /* we always have at least one counter before the egrovfl */
  4507. if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
  4508. j = 1;
  4509. s = strchr(s + 1, '\n');
  4510. if (s && j)
  4511. j++;
  4512. }
  4513. dd->cspec->ncntrs = i;
  4514. if (!s)
  4515. /* full list; size is without terminating null */
  4516. dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
  4517. else
  4518. dd->cspec->cntrnamelen = 1 + s - cntr7322names;
  4519. dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
  4520. * sizeof(u64), GFP_KERNEL);
  4521. if (!dd->cspec->cntrs)
  4522. qib_dev_err(dd, "Failed allocation for counters\n");
  4523. for (i = 0, s = (char *)portcntr7322names; s; i++)
  4524. s = strchr(s + 1, '\n');
  4525. dd->cspec->nportcntrs = i - 1;
  4526. dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
  4527. for (i = 0; i < dd->num_pports; ++i) {
  4528. dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
  4529. * sizeof(u64), GFP_KERNEL);
  4530. if (!dd->pport[i].cpspec->portcntrs)
  4531. qib_dev_err(dd,
  4532. "Failed allocation for portcounters\n");
  4533. }
  4534. }
  4535. static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
  4536. u64 **cntrp)
  4537. {
  4538. u32 ret;
  4539. if (namep) {
  4540. ret = dd->cspec->cntrnamelen;
  4541. if (pos >= ret)
  4542. ret = 0; /* final read after getting everything */
  4543. else
  4544. *namep = (char *) cntr7322names;
  4545. } else {
  4546. u64 *cntr = dd->cspec->cntrs;
  4547. int i;
  4548. ret = dd->cspec->ncntrs * sizeof(u64);
  4549. if (!cntr || pos >= ret) {
  4550. /* everything read, or couldn't get memory */
  4551. ret = 0;
  4552. goto done;
  4553. }
  4554. *cntrp = cntr;
  4555. for (i = 0; i < dd->cspec->ncntrs; i++)
  4556. if (cntr7322indices[i] & _PORT_64BIT_FLAG)
  4557. *cntr++ = read_7322_creg(dd,
  4558. cntr7322indices[i] &
  4559. _PORT_CNTR_IDXMASK);
  4560. else
  4561. *cntr++ = read_7322_creg32(dd,
  4562. cntr7322indices[i]);
  4563. }
  4564. done:
  4565. return ret;
  4566. }
  4567. static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
  4568. char **namep, u64 **cntrp)
  4569. {
  4570. u32 ret;
  4571. if (namep) {
  4572. ret = dd->cspec->portcntrnamelen;
  4573. if (pos >= ret)
  4574. ret = 0; /* final read after getting everything */
  4575. else
  4576. *namep = (char *)portcntr7322names;
  4577. } else {
  4578. struct qib_pportdata *ppd = &dd->pport[port];
  4579. u64 *cntr = ppd->cpspec->portcntrs;
  4580. int i;
  4581. ret = dd->cspec->nportcntrs * sizeof(u64);
  4582. if (!cntr || pos >= ret) {
  4583. /* everything read, or couldn't get memory */
  4584. ret = 0;
  4585. goto done;
  4586. }
  4587. *cntrp = cntr;
  4588. for (i = 0; i < dd->cspec->nportcntrs; i++) {
  4589. if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
  4590. *cntr++ = qib_portcntr_7322(ppd,
  4591. portcntr7322indices[i] &
  4592. _PORT_CNTR_IDXMASK);
  4593. else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
  4594. *cntr++ = read_7322_creg_port(ppd,
  4595. portcntr7322indices[i] &
  4596. _PORT_CNTR_IDXMASK);
  4597. else
  4598. *cntr++ = read_7322_creg32_port(ppd,
  4599. portcntr7322indices[i]);
  4600. }
  4601. }
  4602. done:
  4603. return ret;
  4604. }
  4605. /**
  4606. * qib_get_7322_faststats - get word counters from chip before they overflow
  4607. * @opaque - contains a pointer to the qlogic_ib device qib_devdata
  4608. *
  4609. * VESTIGIAL IBA7322 has no "small fast counters", so the only
  4610. * real purpose of this function is to maintain the notion of
  4611. * "active time", which in turn is only logged into the eeprom,
  4612. * which we don;t have, yet, for 7322-based boards.
  4613. *
  4614. * called from add_timer
  4615. */
  4616. static void qib_get_7322_faststats(unsigned long opaque)
  4617. {
  4618. struct qib_devdata *dd = (struct qib_devdata *) opaque;
  4619. struct qib_pportdata *ppd;
  4620. unsigned long flags;
  4621. u64 traffic_wds;
  4622. int pidx;
  4623. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  4624. ppd = dd->pport + pidx;
  4625. /*
  4626. * If port isn't enabled or not operational ports, or
  4627. * diags is running (can cause memory diags to fail)
  4628. * skip this port this time.
  4629. */
  4630. if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
  4631. || dd->diag_client)
  4632. continue;
  4633. /*
  4634. * Maintain an activity timer, based on traffic
  4635. * exceeding a threshold, so we need to check the word-counts
  4636. * even if they are 64-bit.
  4637. */
  4638. traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
  4639. qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
  4640. spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
  4641. traffic_wds -= ppd->dd->traffic_wds;
  4642. ppd->dd->traffic_wds += traffic_wds;
  4643. if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
  4644. atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
  4645. spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
  4646. if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
  4647. QIB_IB_QDR) &&
  4648. (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
  4649. QIBL_LINKACTIVE)) &&
  4650. ppd->cpspec->qdr_dfe_time &&
  4651. time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
  4652. ppd->cpspec->qdr_dfe_on = 0;
  4653. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  4654. ppd->dd->cspec->r1 ?
  4655. QDR_STATIC_ADAPT_INIT_R1 :
  4656. QDR_STATIC_ADAPT_INIT);
  4657. force_h1(ppd);
  4658. }
  4659. }
  4660. mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
  4661. }
  4662. /*
  4663. * If we were using MSIx, try to fallback to INTx.
  4664. */
  4665. static int qib_7322_intr_fallback(struct qib_devdata *dd)
  4666. {
  4667. if (!dd->cspec->num_msix_entries)
  4668. return 0; /* already using INTx */
  4669. qib_devinfo(dd->pcidev,
  4670. "MSIx interrupt not detected, trying INTx interrupts\n");
  4671. qib_7322_nomsix(dd);
  4672. qib_enable_intx(dd->pcidev);
  4673. qib_setup_7322_interrupt(dd, 0);
  4674. return 1;
  4675. }
  4676. /*
  4677. * Reset the XGXS (between serdes and IBC). Slightly less intrusive
  4678. * than resetting the IBC or external link state, and useful in some
  4679. * cases to cause some retraining. To do this right, we reset IBC
  4680. * as well, then return to previous state (which may be still in reset)
  4681. * NOTE: some callers of this "know" this writes the current value
  4682. * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
  4683. * check all callers.
  4684. */
  4685. static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
  4686. {
  4687. u64 val;
  4688. struct qib_devdata *dd = ppd->dd;
  4689. const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
  4690. SYM_MASK(IBPCSConfig_0, xcv_treset) |
  4691. SYM_MASK(IBPCSConfig_0, tx_rx_reset);
  4692. val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
  4693. qib_write_kreg(dd, kr_hwerrmask,
  4694. dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
  4695. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  4696. ppd->cpspec->ibcctrl_a &
  4697. ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
  4698. qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
  4699. qib_read_kreg32(dd, kr_scratch);
  4700. qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
  4701. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  4702. qib_write_kreg(dd, kr_scratch, 0ULL);
  4703. qib_write_kreg(dd, kr_hwerrclear,
  4704. SYM_MASK(HwErrClear, statusValidNoEopClear));
  4705. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  4706. }
  4707. /*
  4708. * This code for non-IBTA-compliant IB speed negotiation is only known to
  4709. * work for the SDR to DDR transition, and only between an HCA and a switch
  4710. * with recent firmware. It is based on observed heuristics, rather than
  4711. * actual knowledge of the non-compliant speed negotiation.
  4712. * It has a number of hard-coded fields, since the hope is to rewrite this
  4713. * when a spec is available on how the negoation is intended to work.
  4714. */
  4715. static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
  4716. u32 dcnt, u32 *data)
  4717. {
  4718. int i;
  4719. u64 pbc;
  4720. u32 __iomem *piobuf;
  4721. u32 pnum, control, len;
  4722. struct qib_devdata *dd = ppd->dd;
  4723. i = 0;
  4724. len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
  4725. control = qib_7322_setpbc_control(ppd, len, 0, 15);
  4726. pbc = ((u64) control << 32) | len;
  4727. while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
  4728. if (i++ > 15)
  4729. return;
  4730. udelay(2);
  4731. }
  4732. /* disable header check on this packet, since it can't be valid */
  4733. dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
  4734. writeq(pbc, piobuf);
  4735. qib_flush_wc();
  4736. qib_pio_copy(piobuf + 2, hdr, 7);
  4737. qib_pio_copy(piobuf + 9, data, dcnt);
  4738. if (dd->flags & QIB_USE_SPCL_TRIG) {
  4739. u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
  4740. qib_flush_wc();
  4741. __raw_writel(0xaebecede, piobuf + spcl_off);
  4742. }
  4743. qib_flush_wc();
  4744. qib_sendbuf_done(dd, pnum);
  4745. /* and re-enable hdr check */
  4746. dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
  4747. }
  4748. /*
  4749. * _start packet gets sent twice at start, _done gets sent twice at end
  4750. */
  4751. static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
  4752. {
  4753. struct qib_devdata *dd = ppd->dd;
  4754. static u32 swapped;
  4755. u32 dw, i, hcnt, dcnt, *data;
  4756. static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
  4757. static u32 madpayload_start[0x40] = {
  4758. 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
  4759. 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
  4760. 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
  4761. };
  4762. static u32 madpayload_done[0x40] = {
  4763. 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
  4764. 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
  4765. 0x40000001, 0x1388, 0x15e, /* rest 0's */
  4766. };
  4767. dcnt = ARRAY_SIZE(madpayload_start);
  4768. hcnt = ARRAY_SIZE(hdr);
  4769. if (!swapped) {
  4770. /* for maintainability, do it at runtime */
  4771. for (i = 0; i < hcnt; i++) {
  4772. dw = (__force u32) cpu_to_be32(hdr[i]);
  4773. hdr[i] = dw;
  4774. }
  4775. for (i = 0; i < dcnt; i++) {
  4776. dw = (__force u32) cpu_to_be32(madpayload_start[i]);
  4777. madpayload_start[i] = dw;
  4778. dw = (__force u32) cpu_to_be32(madpayload_done[i]);
  4779. madpayload_done[i] = dw;
  4780. }
  4781. swapped = 1;
  4782. }
  4783. data = which ? madpayload_done : madpayload_start;
  4784. autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
  4785. qib_read_kreg64(dd, kr_scratch);
  4786. udelay(2);
  4787. autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
  4788. qib_read_kreg64(dd, kr_scratch);
  4789. udelay(2);
  4790. }
  4791. /*
  4792. * Do the absolute minimum to cause an IB speed change, and make it
  4793. * ready, but don't actually trigger the change. The caller will
  4794. * do that when ready (if link is in Polling training state, it will
  4795. * happen immediately, otherwise when link next goes down)
  4796. *
  4797. * This routine should only be used as part of the DDR autonegotation
  4798. * code for devices that are not compliant with IB 1.2 (or code that
  4799. * fixes things up for same).
  4800. *
  4801. * When link has gone down, and autoneg enabled, or autoneg has
  4802. * failed and we give up until next time we set both speeds, and
  4803. * then we want IBTA enabled as well as "use max enabled speed.
  4804. */
  4805. static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
  4806. {
  4807. u64 newctrlb;
  4808. newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
  4809. IBA7322_IBC_IBTA_1_2_MASK |
  4810. IBA7322_IBC_MAX_SPEED_MASK);
  4811. if (speed & (speed - 1)) /* multiple speeds */
  4812. newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
  4813. IBA7322_IBC_IBTA_1_2_MASK |
  4814. IBA7322_IBC_MAX_SPEED_MASK;
  4815. else
  4816. newctrlb |= speed == QIB_IB_QDR ?
  4817. IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
  4818. ((speed == QIB_IB_DDR ?
  4819. IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
  4820. if (newctrlb == ppd->cpspec->ibcctrl_b)
  4821. return;
  4822. ppd->cpspec->ibcctrl_b = newctrlb;
  4823. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  4824. qib_write_kreg(ppd->dd, kr_scratch, 0);
  4825. }
  4826. /*
  4827. * This routine is only used when we are not talking to another
  4828. * IB 1.2-compliant device that we think can do DDR.
  4829. * (This includes all existing switch chips as of Oct 2007.)
  4830. * 1.2-compliant devices go directly to DDR prior to reaching INIT
  4831. */
  4832. static void try_7322_autoneg(struct qib_pportdata *ppd)
  4833. {
  4834. unsigned long flags;
  4835. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4836. ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
  4837. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4838. qib_autoneg_7322_send(ppd, 0);
  4839. set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
  4840. qib_7322_mini_pcs_reset(ppd);
  4841. /* 2 msec is minimum length of a poll cycle */
  4842. queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
  4843. msecs_to_jiffies(2));
  4844. }
  4845. /*
  4846. * Handle the empirically determined mechanism for auto-negotiation
  4847. * of DDR speed with switches.
  4848. */
  4849. static void autoneg_7322_work(struct work_struct *work)
  4850. {
  4851. struct qib_pportdata *ppd;
  4852. struct qib_devdata *dd;
  4853. u64 startms;
  4854. u32 i;
  4855. unsigned long flags;
  4856. ppd = container_of(work, struct qib_chippport_specific,
  4857. autoneg_work.work)->ppd;
  4858. dd = ppd->dd;
  4859. startms = jiffies_to_msecs(jiffies);
  4860. /*
  4861. * Busy wait for this first part, it should be at most a
  4862. * few hundred usec, since we scheduled ourselves for 2msec.
  4863. */
  4864. for (i = 0; i < 25; i++) {
  4865. if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
  4866. == IB_7322_LT_STATE_POLLQUIET) {
  4867. qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
  4868. break;
  4869. }
  4870. udelay(100);
  4871. }
  4872. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
  4873. goto done; /* we got there early or told to stop */
  4874. /* we expect this to timeout */
  4875. if (wait_event_timeout(ppd->cpspec->autoneg_wait,
  4876. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4877. msecs_to_jiffies(90)))
  4878. goto done;
  4879. qib_7322_mini_pcs_reset(ppd);
  4880. /* we expect this to timeout */
  4881. if (wait_event_timeout(ppd->cpspec->autoneg_wait,
  4882. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4883. msecs_to_jiffies(1700)))
  4884. goto done;
  4885. qib_7322_mini_pcs_reset(ppd);
  4886. set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
  4887. /*
  4888. * Wait up to 250 msec for link to train and get to INIT at DDR;
  4889. * this should terminate early.
  4890. */
  4891. wait_event_timeout(ppd->cpspec->autoneg_wait,
  4892. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4893. msecs_to_jiffies(250));
  4894. done:
  4895. if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
  4896. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4897. ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
  4898. if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
  4899. ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
  4900. ppd->cpspec->autoneg_tries = 0;
  4901. }
  4902. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4903. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  4904. }
  4905. }
  4906. /*
  4907. * This routine is used to request IPG set in the QLogic switch.
  4908. * Only called if r1.
  4909. */
  4910. static void try_7322_ipg(struct qib_pportdata *ppd)
  4911. {
  4912. struct qib_ibport *ibp = &ppd->ibport_data;
  4913. struct ib_mad_send_buf *send_buf;
  4914. struct ib_mad_agent *agent;
  4915. struct ib_smp *smp;
  4916. unsigned delay;
  4917. int ret;
  4918. agent = ibp->send_agent;
  4919. if (!agent)
  4920. goto retry;
  4921. send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
  4922. IB_MGMT_MAD_DATA, GFP_ATOMIC);
  4923. if (IS_ERR(send_buf))
  4924. goto retry;
  4925. if (!ibp->smi_ah) {
  4926. struct ib_ah *ah;
  4927. ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
  4928. if (IS_ERR(ah))
  4929. ret = PTR_ERR(ah);
  4930. else {
  4931. send_buf->ah = ah;
  4932. ibp->smi_ah = to_iah(ah);
  4933. ret = 0;
  4934. }
  4935. } else {
  4936. send_buf->ah = &ibp->smi_ah->ibah;
  4937. ret = 0;
  4938. }
  4939. smp = send_buf->mad;
  4940. smp->base_version = IB_MGMT_BASE_VERSION;
  4941. smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
  4942. smp->class_version = 1;
  4943. smp->method = IB_MGMT_METHOD_SEND;
  4944. smp->hop_cnt = 1;
  4945. smp->attr_id = QIB_VENDOR_IPG;
  4946. smp->attr_mod = 0;
  4947. if (!ret)
  4948. ret = ib_post_send_mad(send_buf, NULL);
  4949. if (ret)
  4950. ib_free_send_mad(send_buf);
  4951. retry:
  4952. delay = 2 << ppd->cpspec->ipg_tries;
  4953. queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
  4954. msecs_to_jiffies(delay));
  4955. }
  4956. /*
  4957. * Timeout handler for setting IPG.
  4958. * Only called if r1.
  4959. */
  4960. static void ipg_7322_work(struct work_struct *work)
  4961. {
  4962. struct qib_pportdata *ppd;
  4963. ppd = container_of(work, struct qib_chippport_specific,
  4964. ipg_work.work)->ppd;
  4965. if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
  4966. && ++ppd->cpspec->ipg_tries <= 10)
  4967. try_7322_ipg(ppd);
  4968. }
  4969. static u32 qib_7322_iblink_state(u64 ibcs)
  4970. {
  4971. u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
  4972. switch (state) {
  4973. case IB_7322_L_STATE_INIT:
  4974. state = IB_PORT_INIT;
  4975. break;
  4976. case IB_7322_L_STATE_ARM:
  4977. state = IB_PORT_ARMED;
  4978. break;
  4979. case IB_7322_L_STATE_ACTIVE:
  4980. /* fall through */
  4981. case IB_7322_L_STATE_ACT_DEFER:
  4982. state = IB_PORT_ACTIVE;
  4983. break;
  4984. default: /* fall through */
  4985. case IB_7322_L_STATE_DOWN:
  4986. state = IB_PORT_DOWN;
  4987. break;
  4988. }
  4989. return state;
  4990. }
  4991. /* returns the IBTA port state, rather than the IBC link training state */
  4992. static u8 qib_7322_phys_portstate(u64 ibcs)
  4993. {
  4994. u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
  4995. return qib_7322_physportstate[state];
  4996. }
  4997. static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
  4998. {
  4999. int ret = 0, symadj = 0;
  5000. unsigned long flags;
  5001. int mult;
  5002. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5003. ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
  5004. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5005. /* Update our picture of width and speed from chip */
  5006. if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
  5007. ppd->link_speed_active = QIB_IB_QDR;
  5008. mult = 4;
  5009. } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
  5010. ppd->link_speed_active = QIB_IB_DDR;
  5011. mult = 2;
  5012. } else {
  5013. ppd->link_speed_active = QIB_IB_SDR;
  5014. mult = 1;
  5015. }
  5016. if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
  5017. ppd->link_width_active = IB_WIDTH_4X;
  5018. mult *= 4;
  5019. } else
  5020. ppd->link_width_active = IB_WIDTH_1X;
  5021. ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
  5022. if (!ibup) {
  5023. u64 clr;
  5024. /* Link went down. */
  5025. /* do IPG MAD again after linkdown, even if last time failed */
  5026. ppd->cpspec->ipg_tries = 0;
  5027. clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
  5028. (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
  5029. SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
  5030. if (clr)
  5031. qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
  5032. if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
  5033. QIBL_IB_AUTONEG_INPROG)))
  5034. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  5035. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  5036. struct qib_qsfp_data *qd =
  5037. &ppd->cpspec->qsfp_data;
  5038. /* unlock the Tx settings, speed may change */
  5039. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  5040. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  5041. reset_tx_deemphasis_override));
  5042. qib_cancel_sends(ppd);
  5043. /* on link down, ensure sane pcs state */
  5044. qib_7322_mini_pcs_reset(ppd);
  5045. /* schedule the qsfp refresh which should turn the link
  5046. off */
  5047. if (ppd->dd->flags & QIB_HAS_QSFP) {
  5048. qd->t_insert = jiffies;
  5049. queue_work(ib_wq, &qd->work);
  5050. }
  5051. spin_lock_irqsave(&ppd->sdma_lock, flags);
  5052. if (__qib_sdma_running(ppd))
  5053. __qib_sdma_process_event(ppd,
  5054. qib_sdma_event_e70_go_idle);
  5055. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  5056. }
  5057. clr = read_7322_creg32_port(ppd, crp_iblinkdown);
  5058. if (clr == ppd->cpspec->iblnkdownsnap)
  5059. ppd->cpspec->iblnkdowndelta++;
  5060. } else {
  5061. if (qib_compat_ddr_negotiate &&
  5062. !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
  5063. QIBL_IB_AUTONEG_INPROG)) &&
  5064. ppd->link_speed_active == QIB_IB_SDR &&
  5065. (ppd->link_speed_enabled & QIB_IB_DDR)
  5066. && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
  5067. /* we are SDR, and auto-negotiation enabled */
  5068. ++ppd->cpspec->autoneg_tries;
  5069. if (!ppd->cpspec->ibdeltainprog) {
  5070. ppd->cpspec->ibdeltainprog = 1;
  5071. ppd->cpspec->ibsymdelta +=
  5072. read_7322_creg32_port(ppd,
  5073. crp_ibsymbolerr) -
  5074. ppd->cpspec->ibsymsnap;
  5075. ppd->cpspec->iblnkerrdelta +=
  5076. read_7322_creg32_port(ppd,
  5077. crp_iblinkerrrecov) -
  5078. ppd->cpspec->iblnkerrsnap;
  5079. }
  5080. try_7322_autoneg(ppd);
  5081. ret = 1; /* no other IB status change processing */
  5082. } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
  5083. ppd->link_speed_active == QIB_IB_SDR) {
  5084. qib_autoneg_7322_send(ppd, 1);
  5085. set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
  5086. qib_7322_mini_pcs_reset(ppd);
  5087. udelay(2);
  5088. ret = 1; /* no other IB status change processing */
  5089. } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
  5090. (ppd->link_speed_active & QIB_IB_DDR)) {
  5091. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5092. ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
  5093. QIBL_IB_AUTONEG_FAILED);
  5094. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5095. ppd->cpspec->autoneg_tries = 0;
  5096. /* re-enable SDR, for next link down */
  5097. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  5098. wake_up(&ppd->cpspec->autoneg_wait);
  5099. symadj = 1;
  5100. } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
  5101. /*
  5102. * Clear autoneg failure flag, and do setup
  5103. * so we'll try next time link goes down and
  5104. * back to INIT (possibly connected to a
  5105. * different device).
  5106. */
  5107. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5108. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  5109. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5110. ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
  5111. symadj = 1;
  5112. }
  5113. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  5114. symadj = 1;
  5115. if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
  5116. try_7322_ipg(ppd);
  5117. if (!ppd->cpspec->recovery_init)
  5118. setup_7322_link_recovery(ppd, 0);
  5119. ppd->cpspec->qdr_dfe_time = jiffies +
  5120. msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
  5121. }
  5122. ppd->cpspec->ibmalfusesnap = 0;
  5123. ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
  5124. crp_errlink);
  5125. }
  5126. if (symadj) {
  5127. ppd->cpspec->iblnkdownsnap =
  5128. read_7322_creg32_port(ppd, crp_iblinkdown);
  5129. if (ppd->cpspec->ibdeltainprog) {
  5130. ppd->cpspec->ibdeltainprog = 0;
  5131. ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
  5132. crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
  5133. ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
  5134. crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
  5135. }
  5136. } else if (!ibup && qib_compat_ddr_negotiate &&
  5137. !ppd->cpspec->ibdeltainprog &&
  5138. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  5139. ppd->cpspec->ibdeltainprog = 1;
  5140. ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
  5141. crp_ibsymbolerr);
  5142. ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
  5143. crp_iblinkerrrecov);
  5144. }
  5145. if (!ret)
  5146. qib_setup_7322_setextled(ppd, ibup);
  5147. return ret;
  5148. }
  5149. /*
  5150. * Does read/modify/write to appropriate registers to
  5151. * set output and direction bits selected by mask.
  5152. * these are in their canonical postions (e.g. lsb of
  5153. * dir will end up in D48 of extctrl on existing chips).
  5154. * returns contents of GP Inputs.
  5155. */
  5156. static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
  5157. {
  5158. u64 read_val, new_out;
  5159. unsigned long flags;
  5160. if (mask) {
  5161. /* some bits being written, lock access to GPIO */
  5162. dir &= mask;
  5163. out &= mask;
  5164. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  5165. dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
  5166. dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
  5167. new_out = (dd->cspec->gpio_out & ~mask) | out;
  5168. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  5169. qib_write_kreg(dd, kr_gpio_out, new_out);
  5170. dd->cspec->gpio_out = new_out;
  5171. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  5172. }
  5173. /*
  5174. * It is unlikely that a read at this time would get valid
  5175. * data on a pin whose direction line was set in the same
  5176. * call to this function. We include the read here because
  5177. * that allows us to potentially combine a change on one pin with
  5178. * a read on another, and because the old code did something like
  5179. * this.
  5180. */
  5181. read_val = qib_read_kreg64(dd, kr_extstatus);
  5182. return SYM_FIELD(read_val, EXTStatus, GPIOIn);
  5183. }
  5184. /* Enable writes to config EEPROM, if possible. Returns previous state */
  5185. static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
  5186. {
  5187. int prev_wen;
  5188. u32 mask;
  5189. mask = 1 << QIB_EEPROM_WEN_NUM;
  5190. prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
  5191. gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
  5192. return prev_wen & 1;
  5193. }
  5194. /*
  5195. * Read fundamental info we need to use the chip. These are
  5196. * the registers that describe chip capabilities, and are
  5197. * saved in shadow registers.
  5198. */
  5199. static void get_7322_chip_params(struct qib_devdata *dd)
  5200. {
  5201. u64 val;
  5202. u32 piobufs;
  5203. int mtu;
  5204. dd->palign = qib_read_kreg32(dd, kr_pagealign);
  5205. dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
  5206. dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
  5207. dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
  5208. dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
  5209. dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
  5210. dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
  5211. val = qib_read_kreg64(dd, kr_sendpiobufcnt);
  5212. dd->piobcnt2k = val & ~0U;
  5213. dd->piobcnt4k = val >> 32;
  5214. val = qib_read_kreg64(dd, kr_sendpiosize);
  5215. dd->piosize2k = val & ~0U;
  5216. dd->piosize4k = val >> 32;
  5217. mtu = ib_mtu_enum_to_int(qib_ibmtu);
  5218. if (mtu == -1)
  5219. mtu = QIB_DEFAULT_MTU;
  5220. dd->pport[0].ibmtu = (u32)mtu;
  5221. dd->pport[1].ibmtu = (u32)mtu;
  5222. /* these may be adjusted in init_chip_wc_pat() */
  5223. dd->pio2kbase = (u32 __iomem *)
  5224. ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
  5225. dd->pio4kbase = (u32 __iomem *)
  5226. ((char __iomem *) dd->kregbase +
  5227. (dd->piobufbase >> 32));
  5228. /*
  5229. * 4K buffers take 2 pages; we use roundup just to be
  5230. * paranoid; we calculate it once here, rather than on
  5231. * ever buf allocate
  5232. */
  5233. dd->align4k = ALIGN(dd->piosize4k, dd->palign);
  5234. piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
  5235. dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
  5236. (sizeof(u64) * BITS_PER_BYTE / 2);
  5237. }
  5238. /*
  5239. * The chip base addresses in cspec and cpspec have to be set
  5240. * after possible init_chip_wc_pat(), rather than in
  5241. * get_7322_chip_params(), so split out as separate function
  5242. */
  5243. static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
  5244. {
  5245. u32 cregbase;
  5246. cregbase = qib_read_kreg32(dd, kr_counterregbase);
  5247. dd->cspec->cregbase = (u64 __iomem *)(cregbase +
  5248. (char __iomem *)dd->kregbase);
  5249. dd->egrtidbase = (u64 __iomem *)
  5250. ((char __iomem *) dd->kregbase + dd->rcvegrbase);
  5251. /* port registers are defined as relative to base of chip */
  5252. dd->pport[0].cpspec->kpregbase =
  5253. (u64 __iomem *)((char __iomem *)dd->kregbase);
  5254. dd->pport[1].cpspec->kpregbase =
  5255. (u64 __iomem *)(dd->palign +
  5256. (char __iomem *)dd->kregbase);
  5257. dd->pport[0].cpspec->cpregbase =
  5258. (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
  5259. kr_counterregbase) + (char __iomem *)dd->kregbase);
  5260. dd->pport[1].cpspec->cpregbase =
  5261. (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
  5262. kr_counterregbase) + (char __iomem *)dd->kregbase);
  5263. }
  5264. /*
  5265. * This is a fairly special-purpose observer, so we only support
  5266. * the port-specific parts of SendCtrl
  5267. */
  5268. #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
  5269. SYM_MASK(SendCtrl_0, SDmaEnable) | \
  5270. SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
  5271. SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
  5272. SYM_MASK(SendCtrl_0, SDmaHalt) | \
  5273. SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
  5274. SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
  5275. static int sendctrl_hook(struct qib_devdata *dd,
  5276. const struct diag_observer *op, u32 offs,
  5277. u64 *data, u64 mask, int only_32)
  5278. {
  5279. unsigned long flags;
  5280. unsigned idx;
  5281. unsigned pidx;
  5282. struct qib_pportdata *ppd = NULL;
  5283. u64 local_data, all_bits;
  5284. /*
  5285. * The fixed correspondence between Physical ports and pports is
  5286. * severed. We need to hunt for the ppd that corresponds
  5287. * to the offset we got. And we have to do that without admitting
  5288. * we know the stride, apparently.
  5289. */
  5290. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  5291. u64 __iomem *psptr;
  5292. u32 psoffs;
  5293. ppd = dd->pport + pidx;
  5294. if (!ppd->cpspec->kpregbase)
  5295. continue;
  5296. psptr = ppd->cpspec->kpregbase + krp_sendctrl;
  5297. psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
  5298. if (psoffs == offs)
  5299. break;
  5300. }
  5301. /* If pport is not being managed by driver, just avoid shadows. */
  5302. if (pidx >= dd->num_pports)
  5303. ppd = NULL;
  5304. /* In any case, "idx" is flat index in kreg space */
  5305. idx = offs / sizeof(u64);
  5306. all_bits = ~0ULL;
  5307. if (only_32)
  5308. all_bits >>= 32;
  5309. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  5310. if (!ppd || (mask & all_bits) != all_bits) {
  5311. /*
  5312. * At least some mask bits are zero, so we need
  5313. * to read. The judgement call is whether from
  5314. * reg or shadow. First-cut: read reg, and complain
  5315. * if any bits which should be shadowed are different
  5316. * from their shadowed value.
  5317. */
  5318. if (only_32)
  5319. local_data = (u64)qib_read_kreg32(dd, idx);
  5320. else
  5321. local_data = qib_read_kreg64(dd, idx);
  5322. *data = (local_data & ~mask) | (*data & mask);
  5323. }
  5324. if (mask) {
  5325. /*
  5326. * At least some mask bits are one, so we need
  5327. * to write, but only shadow some bits.
  5328. */
  5329. u64 sval, tval; /* Shadowed, transient */
  5330. /*
  5331. * New shadow val is bits we don't want to touch,
  5332. * ORed with bits we do, that are intended for shadow.
  5333. */
  5334. if (ppd) {
  5335. sval = ppd->p_sendctrl & ~mask;
  5336. sval |= *data & SENDCTRL_SHADOWED & mask;
  5337. ppd->p_sendctrl = sval;
  5338. } else
  5339. sval = *data & SENDCTRL_SHADOWED & mask;
  5340. tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
  5341. qib_write_kreg(dd, idx, tval);
  5342. qib_write_kreg(dd, kr_scratch, 0Ull);
  5343. }
  5344. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  5345. return only_32 ? 4 : 8;
  5346. }
  5347. static const struct diag_observer sendctrl_0_observer = {
  5348. sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
  5349. KREG_IDX(SendCtrl_0) * sizeof(u64)
  5350. };
  5351. static const struct diag_observer sendctrl_1_observer = {
  5352. sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
  5353. KREG_IDX(SendCtrl_1) * sizeof(u64)
  5354. };
  5355. static ushort sdma_fetch_prio = 8;
  5356. module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
  5357. MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
  5358. /* Besides logging QSFP events, we set appropriate TxDDS values */
  5359. static void init_txdds_table(struct qib_pportdata *ppd, int override);
  5360. static void qsfp_7322_event(struct work_struct *work)
  5361. {
  5362. struct qib_qsfp_data *qd;
  5363. struct qib_pportdata *ppd;
  5364. unsigned long pwrup;
  5365. unsigned long flags;
  5366. int ret;
  5367. u32 le2;
  5368. qd = container_of(work, struct qib_qsfp_data, work);
  5369. ppd = qd->ppd;
  5370. pwrup = qd->t_insert +
  5371. msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
  5372. /* Delay for 20 msecs to allow ModPrs resistor to setup */
  5373. mdelay(QSFP_MODPRS_LAG_MSEC);
  5374. if (!qib_qsfp_mod_present(ppd)) {
  5375. ppd->cpspec->qsfp_data.modpresent = 0;
  5376. /* Set the physical link to disabled */
  5377. qib_set_ib_7322_lstate(ppd, 0,
  5378. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  5379. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5380. ppd->lflags &= ~QIBL_LINKV;
  5381. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5382. } else {
  5383. /*
  5384. * Some QSFP's not only do not respond until the full power-up
  5385. * time, but may behave badly if we try. So hold off responding
  5386. * to insertion.
  5387. */
  5388. while (1) {
  5389. if (time_is_before_jiffies(pwrup))
  5390. break;
  5391. msleep(20);
  5392. }
  5393. ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
  5394. /*
  5395. * Need to change LE2 back to defaults if we couldn't
  5396. * read the cable type (to handle cable swaps), so do this
  5397. * even on failure to read cable information. We don't
  5398. * get here for QME, so IS_QME check not needed here.
  5399. */
  5400. if (!ret && !ppd->dd->cspec->r1) {
  5401. if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
  5402. le2 = LE2_QME;
  5403. else if (qd->cache.atten[1] >= qib_long_atten &&
  5404. QSFP_IS_CU(qd->cache.tech))
  5405. le2 = LE2_5m;
  5406. else
  5407. le2 = LE2_DEFAULT;
  5408. } else
  5409. le2 = LE2_DEFAULT;
  5410. ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
  5411. /*
  5412. * We always change parameteters, since we can choose
  5413. * values for cables without eeproms, and the cable may have
  5414. * changed from a cable with full or partial eeprom content
  5415. * to one with partial or no content.
  5416. */
  5417. init_txdds_table(ppd, 0);
  5418. /* The physical link is being re-enabled only when the
  5419. * previous state was DISABLED and the VALID bit is not
  5420. * set. This should only happen when the cable has been
  5421. * physically pulled. */
  5422. if (!ppd->cpspec->qsfp_data.modpresent &&
  5423. (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
  5424. ppd->cpspec->qsfp_data.modpresent = 1;
  5425. qib_set_ib_7322_lstate(ppd, 0,
  5426. QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
  5427. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5428. ppd->lflags |= QIBL_LINKV;
  5429. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5430. }
  5431. }
  5432. }
  5433. /*
  5434. * There is little we can do but complain to the user if QSFP
  5435. * initialization fails.
  5436. */
  5437. static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
  5438. {
  5439. unsigned long flags;
  5440. struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
  5441. struct qib_devdata *dd = ppd->dd;
  5442. u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
  5443. mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
  5444. qd->ppd = ppd;
  5445. qib_qsfp_init(qd, qsfp_7322_event);
  5446. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  5447. dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
  5448. dd->cspec->gpio_mask |= mod_prs_bit;
  5449. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  5450. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  5451. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  5452. }
  5453. /*
  5454. * called at device initialization time, and also if the txselect
  5455. * module parameter is changed. This is used for cables that don't
  5456. * have valid QSFP EEPROMs (not present, or attenuation is zero).
  5457. * We initialize to the default, then if there is a specific
  5458. * unit,port match, we use that (and set it immediately, for the
  5459. * current speed, if the link is at INIT or better).
  5460. * String format is "default# unit#,port#=# ... u,p=#", separators must
  5461. * be a SPACE character. A newline terminates. The u,p=# tuples may
  5462. * optionally have "u,p=#,#", where the final # is the H1 value
  5463. * The last specific match is used (actually, all are used, but last
  5464. * one is the one that winds up set); if none at all, fall back on default.
  5465. */
  5466. static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
  5467. {
  5468. char *nxt, *str;
  5469. u32 pidx, unit, port, deflt, h1;
  5470. unsigned long val;
  5471. int any = 0, seth1;
  5472. int txdds_size;
  5473. str = txselect_list;
  5474. /* default number is validated in setup_txselect() */
  5475. deflt = simple_strtoul(str, &nxt, 0);
  5476. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  5477. dd->pport[pidx].cpspec->no_eep = deflt;
  5478. txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
  5479. if (IS_QME(dd) || IS_QMH(dd))
  5480. txdds_size += TXDDS_MFG_SZ;
  5481. while (*nxt && nxt[1]) {
  5482. str = ++nxt;
  5483. unit = simple_strtoul(str, &nxt, 0);
  5484. if (nxt == str || !*nxt || *nxt != ',') {
  5485. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5486. ;
  5487. continue;
  5488. }
  5489. str = ++nxt;
  5490. port = simple_strtoul(str, &nxt, 0);
  5491. if (nxt == str || *nxt != '=') {
  5492. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5493. ;
  5494. continue;
  5495. }
  5496. str = ++nxt;
  5497. val = simple_strtoul(str, &nxt, 0);
  5498. if (nxt == str) {
  5499. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5500. ;
  5501. continue;
  5502. }
  5503. if (val >= txdds_size)
  5504. continue;
  5505. seth1 = 0;
  5506. h1 = 0; /* gcc thinks it might be used uninitted */
  5507. if (*nxt == ',' && nxt[1]) {
  5508. str = ++nxt;
  5509. h1 = (u32)simple_strtoul(str, &nxt, 0);
  5510. if (nxt == str)
  5511. while (*nxt && *nxt++ != ' ') /* skip */
  5512. ;
  5513. else
  5514. seth1 = 1;
  5515. }
  5516. for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
  5517. ++pidx) {
  5518. struct qib_pportdata *ppd = &dd->pport[pidx];
  5519. if (ppd->port != port || !ppd->link_speed_supported)
  5520. continue;
  5521. ppd->cpspec->no_eep = val;
  5522. if (seth1)
  5523. ppd->cpspec->h1_val = h1;
  5524. /* now change the IBC and serdes, overriding generic */
  5525. init_txdds_table(ppd, 1);
  5526. /* Re-enable the physical state machine on mezz boards
  5527. * now that the correct settings have been set.
  5528. * QSFP boards are handles by the QSFP event handler */
  5529. if (IS_QMH(dd) || IS_QME(dd))
  5530. qib_set_ib_7322_lstate(ppd, 0,
  5531. QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
  5532. any++;
  5533. }
  5534. if (*nxt == '\n')
  5535. break; /* done */
  5536. }
  5537. if (change && !any) {
  5538. /* no specific setting, use the default.
  5539. * Change the IBC and serdes, but since it's
  5540. * general, don't override specific settings.
  5541. */
  5542. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  5543. if (dd->pport[pidx].link_speed_supported)
  5544. init_txdds_table(&dd->pport[pidx], 0);
  5545. }
  5546. }
  5547. /* handle the txselect parameter changing */
  5548. static int setup_txselect(const char *str, struct kernel_param *kp)
  5549. {
  5550. struct qib_devdata *dd;
  5551. unsigned long val;
  5552. int ret;
  5553. if (strlen(str) >= MAX_ATTEN_LEN) {
  5554. pr_info("txselect_values string too long\n");
  5555. return -ENOSPC;
  5556. }
  5557. ret = kstrtoul(str, 0, &val);
  5558. if (ret || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
  5559. TXDDS_MFG_SZ)) {
  5560. pr_info("txselect_values must start with a number < %d\n",
  5561. TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
  5562. return ret ? ret : -EINVAL;
  5563. }
  5564. strcpy(txselect_list, str);
  5565. list_for_each_entry(dd, &qib_dev_list, list)
  5566. if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
  5567. set_no_qsfp_atten(dd, 1);
  5568. return 0;
  5569. }
  5570. /*
  5571. * Write the final few registers that depend on some of the
  5572. * init setup. Done late in init, just before bringing up
  5573. * the serdes.
  5574. */
  5575. static int qib_late_7322_initreg(struct qib_devdata *dd)
  5576. {
  5577. int ret = 0, n;
  5578. u64 val;
  5579. qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
  5580. qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
  5581. qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
  5582. qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
  5583. val = qib_read_kreg64(dd, kr_sendpioavailaddr);
  5584. if (val != dd->pioavailregs_phys) {
  5585. qib_dev_err(dd,
  5586. "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
  5587. (unsigned long) dd->pioavailregs_phys,
  5588. (unsigned long long) val);
  5589. ret = -EINVAL;
  5590. }
  5591. n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  5592. qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
  5593. /* driver sends get pkey, lid, etc. checking also, to catch bugs */
  5594. qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
  5595. qib_register_observer(dd, &sendctrl_0_observer);
  5596. qib_register_observer(dd, &sendctrl_1_observer);
  5597. dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
  5598. qib_write_kreg(dd, kr_control, dd->control);
  5599. /*
  5600. * Set SendDmaFetchPriority and init Tx params, including
  5601. * QSFP handler on boards that have QSFP.
  5602. * First set our default attenuation entry for cables that
  5603. * don't have valid attenuation.
  5604. */
  5605. set_no_qsfp_atten(dd, 0);
  5606. for (n = 0; n < dd->num_pports; ++n) {
  5607. struct qib_pportdata *ppd = dd->pport + n;
  5608. qib_write_kreg_port(ppd, krp_senddmaprioritythld,
  5609. sdma_fetch_prio & 0xf);
  5610. /* Initialize qsfp if present on board. */
  5611. if (dd->flags & QIB_HAS_QSFP)
  5612. qib_init_7322_qsfp(ppd);
  5613. }
  5614. dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
  5615. qib_write_kreg(dd, kr_control, dd->control);
  5616. return ret;
  5617. }
  5618. /* per IB port errors. */
  5619. #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
  5620. MASK_ACROSS(8, 15))
  5621. #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
  5622. #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
  5623. MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
  5624. MASK_ACROSS(0, 11))
  5625. /*
  5626. * Write the initialization per-port registers that need to be done at
  5627. * driver load and after reset completes (i.e., that aren't done as part
  5628. * of other init procedures called from qib_init.c).
  5629. * Some of these should be redundant on reset, but play safe.
  5630. */
  5631. static void write_7322_init_portregs(struct qib_pportdata *ppd)
  5632. {
  5633. u64 val;
  5634. int i;
  5635. if (!ppd->link_speed_supported) {
  5636. /* no buffer credits for this port */
  5637. for (i = 1; i < 8; i++)
  5638. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
  5639. qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
  5640. qib_write_kreg(ppd->dd, kr_scratch, 0);
  5641. return;
  5642. }
  5643. /*
  5644. * Set the number of supported virtual lanes in IBC,
  5645. * for flow control packet handling on unsupported VLs
  5646. */
  5647. val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
  5648. val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
  5649. val |= (u64)(ppd->vls_supported - 1) <<
  5650. SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
  5651. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  5652. qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
  5653. /* enable tx header checking */
  5654. qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
  5655. IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
  5656. IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
  5657. qib_write_kreg_port(ppd, krp_ncmodectrl,
  5658. SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
  5659. /*
  5660. * Unconditionally clear the bufmask bits. If SDMA is
  5661. * enabled, we'll set them appropriately later.
  5662. */
  5663. qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
  5664. qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
  5665. qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
  5666. if (ppd->dd->cspec->r1)
  5667. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
  5668. }
  5669. /*
  5670. * Write the initialization per-device registers that need to be done at
  5671. * driver load and after reset completes (i.e., that aren't done as part
  5672. * of other init procedures called from qib_init.c). Also write per-port
  5673. * registers that are affected by overall device config, such as QP mapping
  5674. * Some of these should be redundant on reset, but play safe.
  5675. */
  5676. static void write_7322_initregs(struct qib_devdata *dd)
  5677. {
  5678. struct qib_pportdata *ppd;
  5679. int i, pidx;
  5680. u64 val;
  5681. /* Set Multicast QPs received by port 2 to map to context one. */
  5682. qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
  5683. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  5684. unsigned n, regno;
  5685. unsigned long flags;
  5686. if (dd->n_krcv_queues < 2 ||
  5687. !dd->pport[pidx].link_speed_supported)
  5688. continue;
  5689. ppd = &dd->pport[pidx];
  5690. /* be paranoid against later code motion, etc. */
  5691. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  5692. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
  5693. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  5694. /* Initialize QP to context mapping */
  5695. regno = krp_rcvqpmaptable;
  5696. val = 0;
  5697. if (dd->num_pports > 1)
  5698. n = dd->first_user_ctxt / dd->num_pports;
  5699. else
  5700. n = dd->first_user_ctxt - 1;
  5701. for (i = 0; i < 32; ) {
  5702. unsigned ctxt;
  5703. if (dd->num_pports > 1)
  5704. ctxt = (i % n) * dd->num_pports + pidx;
  5705. else if (i % n)
  5706. ctxt = (i % n) + 1;
  5707. else
  5708. ctxt = ppd->hw_pidx;
  5709. val |= ctxt << (5 * (i % 6));
  5710. i++;
  5711. if (i % 6 == 0) {
  5712. qib_write_kreg_port(ppd, regno, val);
  5713. val = 0;
  5714. regno++;
  5715. }
  5716. }
  5717. qib_write_kreg_port(ppd, regno, val);
  5718. }
  5719. /*
  5720. * Setup up interrupt mitigation for kernel contexts, but
  5721. * not user contexts (user contexts use interrupts when
  5722. * stalled waiting for any packet, so want those interrupts
  5723. * right away).
  5724. */
  5725. for (i = 0; i < dd->first_user_ctxt; i++) {
  5726. dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
  5727. qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
  5728. }
  5729. /*
  5730. * Initialize as (disabled) rcvflow tables. Application code
  5731. * will setup each flow as it uses the flow.
  5732. * Doesn't clear any of the error bits that might be set.
  5733. */
  5734. val = TIDFLOW_ERRBITS; /* these are W1C */
  5735. for (i = 0; i < dd->cfgctxts; i++) {
  5736. int flow;
  5737. for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
  5738. qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
  5739. }
  5740. /*
  5741. * dual cards init to dual port recovery, single port cards to
  5742. * the one port. Dual port cards may later adjust to 1 port,
  5743. * and then back to dual port if both ports are connected
  5744. * */
  5745. if (dd->num_pports)
  5746. setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
  5747. }
  5748. static int qib_init_7322_variables(struct qib_devdata *dd)
  5749. {
  5750. struct qib_pportdata *ppd;
  5751. unsigned features, pidx, sbufcnt;
  5752. int ret, mtu;
  5753. u32 sbufs, updthresh;
  5754. /* pport structs are contiguous, allocated after devdata */
  5755. ppd = (struct qib_pportdata *)(dd + 1);
  5756. dd->pport = ppd;
  5757. ppd[0].dd = dd;
  5758. ppd[1].dd = dd;
  5759. dd->cspec = (struct qib_chip_specific *)(ppd + 2);
  5760. ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
  5761. ppd[1].cpspec = &ppd[0].cpspec[1];
  5762. ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
  5763. ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
  5764. spin_lock_init(&dd->cspec->rcvmod_lock);
  5765. spin_lock_init(&dd->cspec->gpio_lock);
  5766. /* we haven't yet set QIB_PRESENT, so use read directly */
  5767. dd->revision = readq(&dd->kregbase[kr_revision]);
  5768. if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
  5769. qib_dev_err(dd,
  5770. "Revision register read failure, giving up initialization\n");
  5771. ret = -ENODEV;
  5772. goto bail;
  5773. }
  5774. dd->flags |= QIB_PRESENT; /* now register routines work */
  5775. dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
  5776. dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
  5777. dd->cspec->r1 = dd->minrev == 1;
  5778. get_7322_chip_params(dd);
  5779. features = qib_7322_boardname(dd);
  5780. /* now that piobcnt2k and 4k set, we can allocate these */
  5781. sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
  5782. NUM_VL15_BUFS + BITS_PER_LONG - 1;
  5783. sbufcnt /= BITS_PER_LONG;
  5784. dd->cspec->sendchkenable = kmalloc(sbufcnt *
  5785. sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
  5786. dd->cspec->sendgrhchk = kmalloc(sbufcnt *
  5787. sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
  5788. dd->cspec->sendibchk = kmalloc(sbufcnt *
  5789. sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
  5790. if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
  5791. !dd->cspec->sendibchk) {
  5792. qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
  5793. ret = -ENOMEM;
  5794. goto bail;
  5795. }
  5796. ppd = dd->pport;
  5797. /*
  5798. * GPIO bits for TWSI data and clock,
  5799. * used for serial EEPROM.
  5800. */
  5801. dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
  5802. dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
  5803. dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
  5804. dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
  5805. QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
  5806. QIB_HAS_THRESH_UPDATE |
  5807. (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
  5808. dd->flags |= qib_special_trigger ?
  5809. QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
  5810. /*
  5811. * Setup initial values. These may change when PAT is enabled, but
  5812. * we need these to do initial chip register accesses.
  5813. */
  5814. qib_7322_set_baseaddrs(dd);
  5815. mtu = ib_mtu_enum_to_int(qib_ibmtu);
  5816. if (mtu == -1)
  5817. mtu = QIB_DEFAULT_MTU;
  5818. dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
  5819. /* all hwerrors become interrupts, unless special purposed */
  5820. dd->cspec->hwerrmask = ~0ULL;
  5821. /* link_recovery setup causes these errors, so ignore them,
  5822. * other than clearing them when they occur */
  5823. dd->cspec->hwerrmask &=
  5824. ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
  5825. SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
  5826. HWE_MASK(LATriggered));
  5827. for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
  5828. struct qib_chippport_specific *cp = ppd->cpspec;
  5829. ppd->link_speed_supported = features & PORT_SPD_CAP;
  5830. features >>= PORT_SPD_CAP_SHIFT;
  5831. if (!ppd->link_speed_supported) {
  5832. /* single port mode (7340, or configured) */
  5833. dd->skip_kctxt_mask |= 1 << pidx;
  5834. if (pidx == 0) {
  5835. /* Make sure port is disabled. */
  5836. qib_write_kreg_port(ppd, krp_rcvctrl, 0);
  5837. qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
  5838. ppd[0] = ppd[1];
  5839. dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
  5840. IBSerdesPClkNotDetectMask_0)
  5841. | SYM_MASK(HwErrMask,
  5842. SDmaMemReadErrMask_0));
  5843. dd->cspec->int_enable_mask &= ~(
  5844. SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
  5845. SYM_MASK(IntMask, SDmaIdleIntMask_0) |
  5846. SYM_MASK(IntMask, SDmaProgressIntMask_0) |
  5847. SYM_MASK(IntMask, SDmaIntMask_0) |
  5848. SYM_MASK(IntMask, ErrIntMask_0) |
  5849. SYM_MASK(IntMask, SendDoneIntMask_0));
  5850. } else {
  5851. /* Make sure port is disabled. */
  5852. qib_write_kreg_port(ppd, krp_rcvctrl, 0);
  5853. qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
  5854. dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
  5855. IBSerdesPClkNotDetectMask_1)
  5856. | SYM_MASK(HwErrMask,
  5857. SDmaMemReadErrMask_1));
  5858. dd->cspec->int_enable_mask &= ~(
  5859. SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
  5860. SYM_MASK(IntMask, SDmaIdleIntMask_1) |
  5861. SYM_MASK(IntMask, SDmaProgressIntMask_1) |
  5862. SYM_MASK(IntMask, SDmaIntMask_1) |
  5863. SYM_MASK(IntMask, ErrIntMask_1) |
  5864. SYM_MASK(IntMask, SendDoneIntMask_1));
  5865. }
  5866. continue;
  5867. }
  5868. dd->num_pports++;
  5869. qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
  5870. ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
  5871. ppd->link_width_enabled = IB_WIDTH_4X;
  5872. ppd->link_speed_enabled = ppd->link_speed_supported;
  5873. /*
  5874. * Set the initial values to reasonable default, will be set
  5875. * for real when link is up.
  5876. */
  5877. ppd->link_width_active = IB_WIDTH_4X;
  5878. ppd->link_speed_active = QIB_IB_SDR;
  5879. ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
  5880. switch (qib_num_cfg_vls) {
  5881. case 1:
  5882. ppd->vls_supported = IB_VL_VL0;
  5883. break;
  5884. case 2:
  5885. ppd->vls_supported = IB_VL_VL0_1;
  5886. break;
  5887. default:
  5888. qib_devinfo(dd->pcidev,
  5889. "Invalid num_vls %u, using 4 VLs\n",
  5890. qib_num_cfg_vls);
  5891. qib_num_cfg_vls = 4;
  5892. /* fall through */
  5893. case 4:
  5894. ppd->vls_supported = IB_VL_VL0_3;
  5895. break;
  5896. case 8:
  5897. if (mtu <= 2048)
  5898. ppd->vls_supported = IB_VL_VL0_7;
  5899. else {
  5900. qib_devinfo(dd->pcidev,
  5901. "Invalid num_vls %u for MTU %d "
  5902. ", using 4 VLs\n",
  5903. qib_num_cfg_vls, mtu);
  5904. ppd->vls_supported = IB_VL_VL0_3;
  5905. qib_num_cfg_vls = 4;
  5906. }
  5907. break;
  5908. }
  5909. ppd->vls_operational = ppd->vls_supported;
  5910. init_waitqueue_head(&cp->autoneg_wait);
  5911. INIT_DELAYED_WORK(&cp->autoneg_work,
  5912. autoneg_7322_work);
  5913. if (ppd->dd->cspec->r1)
  5914. INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
  5915. /*
  5916. * For Mez and similar cards, no qsfp info, so do
  5917. * the "cable info" setup here. Can be overridden
  5918. * in adapter-specific routines.
  5919. */
  5920. if (!(dd->flags & QIB_HAS_QSFP)) {
  5921. if (!IS_QMH(dd) && !IS_QME(dd))
  5922. qib_devinfo(dd->pcidev,
  5923. "IB%u:%u: Unknown mezzanine card type\n",
  5924. dd->unit, ppd->port);
  5925. cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
  5926. /*
  5927. * Choose center value as default tx serdes setting
  5928. * until changed through module parameter.
  5929. */
  5930. ppd->cpspec->no_eep = IS_QMH(dd) ?
  5931. TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
  5932. } else
  5933. cp->h1_val = H1_FORCE_VAL;
  5934. /* Avoid writes to chip for mini_init */
  5935. if (!qib_mini_init)
  5936. write_7322_init_portregs(ppd);
  5937. init_timer(&cp->chase_timer);
  5938. cp->chase_timer.function = reenable_chase;
  5939. cp->chase_timer.data = (unsigned long)ppd;
  5940. ppd++;
  5941. }
  5942. dd->rcvhdrentsize = qib_rcvhdrentsize ?
  5943. qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
  5944. dd->rcvhdrsize = qib_rcvhdrsize ?
  5945. qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
  5946. dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
  5947. /* we always allocate at least 2048 bytes for eager buffers */
  5948. dd->rcvegrbufsize = max(mtu, 2048);
  5949. BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
  5950. dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
  5951. qib_7322_tidtemplate(dd);
  5952. /*
  5953. * We can request a receive interrupt for 1 or
  5954. * more packets from current offset.
  5955. */
  5956. dd->rhdrhead_intr_off =
  5957. (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
  5958. /* setup the stats timer; the add_timer is done at end of init */
  5959. init_timer(&dd->stats_timer);
  5960. dd->stats_timer.function = qib_get_7322_faststats;
  5961. dd->stats_timer.data = (unsigned long) dd;
  5962. dd->ureg_align = 0x10000; /* 64KB alignment */
  5963. dd->piosize2kmax_dwords = dd->piosize2k >> 2;
  5964. qib_7322_config_ctxts(dd);
  5965. qib_set_ctxtcnt(dd);
  5966. if (qib_wc_pat) {
  5967. resource_size_t vl15off;
  5968. /*
  5969. * We do not set WC on the VL15 buffers to avoid
  5970. * a rare problem with unaligned writes from
  5971. * interrupt-flushed store buffers, so we need
  5972. * to map those separately here. We can't solve
  5973. * this for the rarely used mtrr case.
  5974. */
  5975. ret = init_chip_wc_pat(dd, 0);
  5976. if (ret)
  5977. goto bail;
  5978. /* vl15 buffers start just after the 4k buffers */
  5979. vl15off = dd->physaddr + (dd->piobufbase >> 32) +
  5980. dd->piobcnt4k * dd->align4k;
  5981. dd->piovl15base = ioremap_nocache(vl15off,
  5982. NUM_VL15_BUFS * dd->align4k);
  5983. if (!dd->piovl15base) {
  5984. ret = -ENOMEM;
  5985. goto bail;
  5986. }
  5987. }
  5988. qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
  5989. ret = 0;
  5990. if (qib_mini_init)
  5991. goto bail;
  5992. if (!dd->num_pports) {
  5993. qib_dev_err(dd, "No ports enabled, giving up initialization\n");
  5994. goto bail; /* no error, so can still figure out why err */
  5995. }
  5996. write_7322_initregs(dd);
  5997. ret = qib_create_ctxts(dd);
  5998. init_7322_cntrnames(dd);
  5999. updthresh = 8U; /* update threshold */
  6000. /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
  6001. * reserve the update threshold amount for other kernel use, such
  6002. * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
  6003. * unless we aren't enabling SDMA, in which case we want to use
  6004. * all the 4k bufs for the kernel.
  6005. * if this was less than the update threshold, we could wait
  6006. * a long time for an update. Coded this way because we
  6007. * sometimes change the update threshold for various reasons,
  6008. * and we want this to remain robust.
  6009. */
  6010. if (dd->flags & QIB_HAS_SEND_DMA) {
  6011. dd->cspec->sdmabufcnt = dd->piobcnt4k;
  6012. sbufs = updthresh > 3 ? updthresh : 3;
  6013. } else {
  6014. dd->cspec->sdmabufcnt = 0;
  6015. sbufs = dd->piobcnt4k;
  6016. }
  6017. dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
  6018. dd->cspec->sdmabufcnt;
  6019. dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
  6020. dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
  6021. dd->last_pio = dd->cspec->lastbuf_for_pio;
  6022. dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
  6023. dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
  6024. /*
  6025. * If we have 16 user contexts, we will have 7 sbufs
  6026. * per context, so reduce the update threshold to match. We
  6027. * want to update before we actually run out, at low pbufs/ctxt
  6028. * so give ourselves some margin.
  6029. */
  6030. if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
  6031. updthresh = dd->pbufsctxt - 2;
  6032. dd->cspec->updthresh_dflt = updthresh;
  6033. dd->cspec->updthresh = updthresh;
  6034. /* before full enable, no interrupts, no locking needed */
  6035. dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
  6036. << SYM_LSB(SendCtrl, AvailUpdThld)) |
  6037. SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
  6038. dd->psxmitwait_supported = 1;
  6039. dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
  6040. bail:
  6041. if (!dd->ctxtcnt)
  6042. dd->ctxtcnt = 1; /* for other initialization code */
  6043. return ret;
  6044. }
  6045. static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
  6046. u32 *pbufnum)
  6047. {
  6048. u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
  6049. struct qib_devdata *dd = ppd->dd;
  6050. /* last is same for 2k and 4k, because we use 4k if all 2k busy */
  6051. if (pbc & PBC_7322_VL15_SEND) {
  6052. first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
  6053. last = first;
  6054. } else {
  6055. if ((plen + 1) > dd->piosize2kmax_dwords)
  6056. first = dd->piobcnt2k;
  6057. else
  6058. first = 0;
  6059. last = dd->cspec->lastbuf_for_pio;
  6060. }
  6061. return qib_getsendbuf_range(dd, pbufnum, first, last);
  6062. }
  6063. static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
  6064. u32 start)
  6065. {
  6066. qib_write_kreg_port(ppd, krp_psinterval, intv);
  6067. qib_write_kreg_port(ppd, krp_psstart, start);
  6068. }
  6069. /*
  6070. * Must be called with sdma_lock held, or before init finished.
  6071. */
  6072. static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
  6073. {
  6074. qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
  6075. }
  6076. /*
  6077. * sdma_lock should be acquired before calling this routine
  6078. */
  6079. static void dump_sdma_7322_state(struct qib_pportdata *ppd)
  6080. {
  6081. u64 reg, reg1, reg2;
  6082. reg = qib_read_kreg_port(ppd, krp_senddmastatus);
  6083. qib_dev_porterr(ppd->dd, ppd->port,
  6084. "SDMA senddmastatus: 0x%016llx\n", reg);
  6085. reg = qib_read_kreg_port(ppd, krp_sendctrl);
  6086. qib_dev_porterr(ppd->dd, ppd->port,
  6087. "SDMA sendctrl: 0x%016llx\n", reg);
  6088. reg = qib_read_kreg_port(ppd, krp_senddmabase);
  6089. qib_dev_porterr(ppd->dd, ppd->port,
  6090. "SDMA senddmabase: 0x%016llx\n", reg);
  6091. reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
  6092. reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
  6093. reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
  6094. qib_dev_porterr(ppd->dd, ppd->port,
  6095. "SDMA senddmabufmask 0:%llx 1:%llx 2:%llx\n",
  6096. reg, reg1, reg2);
  6097. /* get bufuse bits, clear them, and print them again if non-zero */
  6098. reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
  6099. qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
  6100. reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
  6101. qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
  6102. reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
  6103. qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
  6104. /* 0 and 1 should always be zero, so print as short form */
  6105. qib_dev_porterr(ppd->dd, ppd->port,
  6106. "SDMA current senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
  6107. reg, reg1, reg2);
  6108. reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
  6109. reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
  6110. reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
  6111. /* 0 and 1 should always be zero, so print as short form */
  6112. qib_dev_porterr(ppd->dd, ppd->port,
  6113. "SDMA cleared senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
  6114. reg, reg1, reg2);
  6115. reg = qib_read_kreg_port(ppd, krp_senddmatail);
  6116. qib_dev_porterr(ppd->dd, ppd->port,
  6117. "SDMA senddmatail: 0x%016llx\n", reg);
  6118. reg = qib_read_kreg_port(ppd, krp_senddmahead);
  6119. qib_dev_porterr(ppd->dd, ppd->port,
  6120. "SDMA senddmahead: 0x%016llx\n", reg);
  6121. reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
  6122. qib_dev_porterr(ppd->dd, ppd->port,
  6123. "SDMA senddmaheadaddr: 0x%016llx\n", reg);
  6124. reg = qib_read_kreg_port(ppd, krp_senddmalengen);
  6125. qib_dev_porterr(ppd->dd, ppd->port,
  6126. "SDMA senddmalengen: 0x%016llx\n", reg);
  6127. reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
  6128. qib_dev_porterr(ppd->dd, ppd->port,
  6129. "SDMA senddmadesccnt: 0x%016llx\n", reg);
  6130. reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
  6131. qib_dev_porterr(ppd->dd, ppd->port,
  6132. "SDMA senddmaidlecnt: 0x%016llx\n", reg);
  6133. reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
  6134. qib_dev_porterr(ppd->dd, ppd->port,
  6135. "SDMA senddmapriorityhld: 0x%016llx\n", reg);
  6136. reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
  6137. qib_dev_porterr(ppd->dd, ppd->port,
  6138. "SDMA senddmareloadcnt: 0x%016llx\n", reg);
  6139. dump_sdma_state(ppd);
  6140. }
  6141. static struct sdma_set_state_action sdma_7322_action_table[] = {
  6142. [qib_sdma_state_s00_hw_down] = {
  6143. .go_s99_running_tofalse = 1,
  6144. .op_enable = 0,
  6145. .op_intenable = 0,
  6146. .op_halt = 0,
  6147. .op_drain = 0,
  6148. },
  6149. [qib_sdma_state_s10_hw_start_up_wait] = {
  6150. .op_enable = 0,
  6151. .op_intenable = 1,
  6152. .op_halt = 1,
  6153. .op_drain = 0,
  6154. },
  6155. [qib_sdma_state_s20_idle] = {
  6156. .op_enable = 1,
  6157. .op_intenable = 1,
  6158. .op_halt = 1,
  6159. .op_drain = 0,
  6160. },
  6161. [qib_sdma_state_s30_sw_clean_up_wait] = {
  6162. .op_enable = 0,
  6163. .op_intenable = 1,
  6164. .op_halt = 1,
  6165. .op_drain = 0,
  6166. },
  6167. [qib_sdma_state_s40_hw_clean_up_wait] = {
  6168. .op_enable = 1,
  6169. .op_intenable = 1,
  6170. .op_halt = 1,
  6171. .op_drain = 0,
  6172. },
  6173. [qib_sdma_state_s50_hw_halt_wait] = {
  6174. .op_enable = 1,
  6175. .op_intenable = 1,
  6176. .op_halt = 1,
  6177. .op_drain = 1,
  6178. },
  6179. [qib_sdma_state_s99_running] = {
  6180. .op_enable = 1,
  6181. .op_intenable = 1,
  6182. .op_halt = 0,
  6183. .op_drain = 0,
  6184. .go_s99_running_totrue = 1,
  6185. },
  6186. };
  6187. static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
  6188. {
  6189. ppd->sdma_state.set_state_action = sdma_7322_action_table;
  6190. }
  6191. static int init_sdma_7322_regs(struct qib_pportdata *ppd)
  6192. {
  6193. struct qib_devdata *dd = ppd->dd;
  6194. unsigned lastbuf, erstbuf;
  6195. u64 senddmabufmask[3] = { 0 };
  6196. int n, ret = 0;
  6197. qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
  6198. qib_sdma_7322_setlengen(ppd);
  6199. qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
  6200. qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
  6201. qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
  6202. qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
  6203. if (dd->num_pports)
  6204. n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
  6205. else
  6206. n = dd->cspec->sdmabufcnt; /* failsafe for init */
  6207. erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
  6208. ((dd->num_pports == 1 || ppd->port == 2) ? n :
  6209. dd->cspec->sdmabufcnt);
  6210. lastbuf = erstbuf + n;
  6211. ppd->sdma_state.first_sendbuf = erstbuf;
  6212. ppd->sdma_state.last_sendbuf = lastbuf;
  6213. for (; erstbuf < lastbuf; ++erstbuf) {
  6214. unsigned word = erstbuf / BITS_PER_LONG;
  6215. unsigned bit = erstbuf & (BITS_PER_LONG - 1);
  6216. BUG_ON(word >= 3);
  6217. senddmabufmask[word] |= 1ULL << bit;
  6218. }
  6219. qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
  6220. qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
  6221. qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
  6222. return ret;
  6223. }
  6224. /* sdma_lock must be held */
  6225. static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
  6226. {
  6227. struct qib_devdata *dd = ppd->dd;
  6228. int sane;
  6229. int use_dmahead;
  6230. u16 swhead;
  6231. u16 swtail;
  6232. u16 cnt;
  6233. u16 hwhead;
  6234. use_dmahead = __qib_sdma_running(ppd) &&
  6235. (dd->flags & QIB_HAS_SDMA_TIMEOUT);
  6236. retry:
  6237. hwhead = use_dmahead ?
  6238. (u16) le64_to_cpu(*ppd->sdma_head_dma) :
  6239. (u16) qib_read_kreg_port(ppd, krp_senddmahead);
  6240. swhead = ppd->sdma_descq_head;
  6241. swtail = ppd->sdma_descq_tail;
  6242. cnt = ppd->sdma_descq_cnt;
  6243. if (swhead < swtail)
  6244. /* not wrapped */
  6245. sane = (hwhead >= swhead) & (hwhead <= swtail);
  6246. else if (swhead > swtail)
  6247. /* wrapped around */
  6248. sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
  6249. (hwhead <= swtail);
  6250. else
  6251. /* empty */
  6252. sane = (hwhead == swhead);
  6253. if (unlikely(!sane)) {
  6254. if (use_dmahead) {
  6255. /* try one more time, directly from the register */
  6256. use_dmahead = 0;
  6257. goto retry;
  6258. }
  6259. /* proceed as if no progress */
  6260. hwhead = swhead;
  6261. }
  6262. return hwhead;
  6263. }
  6264. static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
  6265. {
  6266. u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
  6267. return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
  6268. (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
  6269. !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
  6270. !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
  6271. }
  6272. /*
  6273. * Compute the amount of delay before sending the next packet if the
  6274. * port's send rate differs from the static rate set for the QP.
  6275. * The delay affects the next packet and the amount of the delay is
  6276. * based on the length of the this packet.
  6277. */
  6278. static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
  6279. u8 srate, u8 vl)
  6280. {
  6281. u8 snd_mult = ppd->delay_mult;
  6282. u8 rcv_mult = ib_rate_to_delay[srate];
  6283. u32 ret;
  6284. ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
  6285. /* Indicate VL15, else set the VL in the control word */
  6286. if (vl == 15)
  6287. ret |= PBC_7322_VL15_SEND_CTRL;
  6288. else
  6289. ret |= vl << PBC_VL_NUM_LSB;
  6290. ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
  6291. return ret;
  6292. }
  6293. /*
  6294. * Enable the per-port VL15 send buffers for use.
  6295. * They follow the rest of the buffers, without a config parameter.
  6296. * This was in initregs, but that is done before the shadow
  6297. * is set up, and this has to be done after the shadow is
  6298. * set up.
  6299. */
  6300. static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
  6301. {
  6302. unsigned vl15bufs;
  6303. vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
  6304. qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
  6305. TXCHK_CHG_TYPE_KERN, NULL);
  6306. }
  6307. static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
  6308. {
  6309. if (rcd->ctxt < NUM_IB_PORTS) {
  6310. if (rcd->dd->num_pports > 1) {
  6311. rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
  6312. rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
  6313. } else {
  6314. rcd->rcvegrcnt = KCTXT0_EGRCNT;
  6315. rcd->rcvegr_tid_base = 0;
  6316. }
  6317. } else {
  6318. rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
  6319. rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
  6320. (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
  6321. }
  6322. }
  6323. #define QTXSLEEPS 5000
  6324. static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
  6325. u32 len, u32 which, struct qib_ctxtdata *rcd)
  6326. {
  6327. int i;
  6328. const int last = start + len - 1;
  6329. const int lastr = last / BITS_PER_LONG;
  6330. u32 sleeps = 0;
  6331. int wait = rcd != NULL;
  6332. unsigned long flags;
  6333. while (wait) {
  6334. unsigned long shadow;
  6335. int cstart, previ = -1;
  6336. /*
  6337. * when flipping from kernel to user, we can't change
  6338. * the checking type if the buffer is allocated to the
  6339. * driver. It's OK the other direction, because it's
  6340. * from close, and we have just disarm'ed all the
  6341. * buffers. All the kernel to kernel changes are also
  6342. * OK.
  6343. */
  6344. for (cstart = start; cstart <= last; cstart++) {
  6345. i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
  6346. / BITS_PER_LONG;
  6347. if (i != previ) {
  6348. shadow = (unsigned long)
  6349. le64_to_cpu(dd->pioavailregs_dma[i]);
  6350. previ = i;
  6351. }
  6352. if (test_bit(((2 * cstart) +
  6353. QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
  6354. % BITS_PER_LONG, &shadow))
  6355. break;
  6356. }
  6357. if (cstart > last)
  6358. break;
  6359. if (sleeps == QTXSLEEPS)
  6360. break;
  6361. /* make sure we see an updated copy next time around */
  6362. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  6363. sleeps++;
  6364. msleep(20);
  6365. }
  6366. switch (which) {
  6367. case TXCHK_CHG_TYPE_DIS1:
  6368. /*
  6369. * disable checking on a range; used by diags; just
  6370. * one buffer, but still written generically
  6371. */
  6372. for (i = start; i <= last; i++)
  6373. clear_bit(i, dd->cspec->sendchkenable);
  6374. break;
  6375. case TXCHK_CHG_TYPE_ENAB1:
  6376. /*
  6377. * (re)enable checking on a range; used by diags; just
  6378. * one buffer, but still written generically; read
  6379. * scratch to be sure buffer actually triggered, not
  6380. * just flushed from processor.
  6381. */
  6382. qib_read_kreg32(dd, kr_scratch);
  6383. for (i = start; i <= last; i++)
  6384. set_bit(i, dd->cspec->sendchkenable);
  6385. break;
  6386. case TXCHK_CHG_TYPE_KERN:
  6387. /* usable by kernel */
  6388. for (i = start; i <= last; i++) {
  6389. set_bit(i, dd->cspec->sendibchk);
  6390. clear_bit(i, dd->cspec->sendgrhchk);
  6391. }
  6392. spin_lock_irqsave(&dd->uctxt_lock, flags);
  6393. /* see if we need to raise avail update threshold */
  6394. for (i = dd->first_user_ctxt;
  6395. dd->cspec->updthresh != dd->cspec->updthresh_dflt
  6396. && i < dd->cfgctxts; i++)
  6397. if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
  6398. ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
  6399. < dd->cspec->updthresh_dflt)
  6400. break;
  6401. spin_unlock_irqrestore(&dd->uctxt_lock, flags);
  6402. if (i == dd->cfgctxts) {
  6403. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  6404. dd->cspec->updthresh = dd->cspec->updthresh_dflt;
  6405. dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
  6406. dd->sendctrl |= (dd->cspec->updthresh &
  6407. SYM_RMASK(SendCtrl, AvailUpdThld)) <<
  6408. SYM_LSB(SendCtrl, AvailUpdThld);
  6409. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  6410. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  6411. }
  6412. break;
  6413. case TXCHK_CHG_TYPE_USER:
  6414. /* for user process */
  6415. for (i = start; i <= last; i++) {
  6416. clear_bit(i, dd->cspec->sendibchk);
  6417. set_bit(i, dd->cspec->sendgrhchk);
  6418. }
  6419. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  6420. if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
  6421. / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
  6422. dd->cspec->updthresh = (rcd->piocnt /
  6423. rcd->subctxt_cnt) - 1;
  6424. dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
  6425. dd->sendctrl |= (dd->cspec->updthresh &
  6426. SYM_RMASK(SendCtrl, AvailUpdThld))
  6427. << SYM_LSB(SendCtrl, AvailUpdThld);
  6428. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  6429. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  6430. } else
  6431. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  6432. break;
  6433. default:
  6434. break;
  6435. }
  6436. for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
  6437. qib_write_kreg(dd, kr_sendcheckmask + i,
  6438. dd->cspec->sendchkenable[i]);
  6439. for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
  6440. qib_write_kreg(dd, kr_sendgrhcheckmask + i,
  6441. dd->cspec->sendgrhchk[i]);
  6442. qib_write_kreg(dd, kr_sendibpktmask + i,
  6443. dd->cspec->sendibchk[i]);
  6444. }
  6445. /*
  6446. * Be sure whatever we did was seen by the chip and acted upon,
  6447. * before we return. Mostly important for which >= 2.
  6448. */
  6449. qib_read_kreg32(dd, kr_scratch);
  6450. }
  6451. /* useful for trigger analyzers, etc. */
  6452. static void writescratch(struct qib_devdata *dd, u32 val)
  6453. {
  6454. qib_write_kreg(dd, kr_scratch, val);
  6455. }
  6456. /* Dummy for now, use chip regs soon */
  6457. static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
  6458. {
  6459. return -ENXIO;
  6460. }
  6461. /**
  6462. * qib_init_iba7322_funcs - set up the chip-specific function pointers
  6463. * @dev: the pci_dev for qlogic_ib device
  6464. * @ent: pci_device_id struct for this dev
  6465. *
  6466. * Also allocates, inits, and returns the devdata struct for this
  6467. * device instance
  6468. *
  6469. * This is global, and is called directly at init to set up the
  6470. * chip-specific function pointers for later use.
  6471. */
  6472. struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
  6473. const struct pci_device_id *ent)
  6474. {
  6475. struct qib_devdata *dd;
  6476. int ret, i;
  6477. u32 tabsize, actual_cnt = 0;
  6478. dd = qib_alloc_devdata(pdev,
  6479. NUM_IB_PORTS * sizeof(struct qib_pportdata) +
  6480. sizeof(struct qib_chip_specific) +
  6481. NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
  6482. if (IS_ERR(dd))
  6483. goto bail;
  6484. dd->f_bringup_serdes = qib_7322_bringup_serdes;
  6485. dd->f_cleanup = qib_setup_7322_cleanup;
  6486. dd->f_clear_tids = qib_7322_clear_tids;
  6487. dd->f_free_irq = qib_7322_free_irq;
  6488. dd->f_get_base_info = qib_7322_get_base_info;
  6489. dd->f_get_msgheader = qib_7322_get_msgheader;
  6490. dd->f_getsendbuf = qib_7322_getsendbuf;
  6491. dd->f_gpio_mod = gpio_7322_mod;
  6492. dd->f_eeprom_wen = qib_7322_eeprom_wen;
  6493. dd->f_hdrqempty = qib_7322_hdrqempty;
  6494. dd->f_ib_updown = qib_7322_ib_updown;
  6495. dd->f_init_ctxt = qib_7322_init_ctxt;
  6496. dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
  6497. dd->f_intr_fallback = qib_7322_intr_fallback;
  6498. dd->f_late_initreg = qib_late_7322_initreg;
  6499. dd->f_setpbc_control = qib_7322_setpbc_control;
  6500. dd->f_portcntr = qib_portcntr_7322;
  6501. dd->f_put_tid = qib_7322_put_tid;
  6502. dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
  6503. dd->f_rcvctrl = rcvctrl_7322_mod;
  6504. dd->f_read_cntrs = qib_read_7322cntrs;
  6505. dd->f_read_portcntrs = qib_read_7322portcntrs;
  6506. dd->f_reset = qib_do_7322_reset;
  6507. dd->f_init_sdma_regs = init_sdma_7322_regs;
  6508. dd->f_sdma_busy = qib_sdma_7322_busy;
  6509. dd->f_sdma_gethead = qib_sdma_7322_gethead;
  6510. dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
  6511. dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
  6512. dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
  6513. dd->f_sendctrl = sendctrl_7322_mod;
  6514. dd->f_set_armlaunch = qib_set_7322_armlaunch;
  6515. dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
  6516. dd->f_iblink_state = qib_7322_iblink_state;
  6517. dd->f_ibphys_portstate = qib_7322_phys_portstate;
  6518. dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
  6519. dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
  6520. dd->f_set_ib_loopback = qib_7322_set_loopback;
  6521. dd->f_get_ib_table = qib_7322_get_ib_table;
  6522. dd->f_set_ib_table = qib_7322_set_ib_table;
  6523. dd->f_set_intr_state = qib_7322_set_intr_state;
  6524. dd->f_setextled = qib_setup_7322_setextled;
  6525. dd->f_txchk_change = qib_7322_txchk_change;
  6526. dd->f_update_usrhead = qib_update_7322_usrhead;
  6527. dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
  6528. dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
  6529. dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
  6530. dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
  6531. dd->f_sdma_init_early = qib_7322_sdma_init_early;
  6532. dd->f_writescratch = writescratch;
  6533. dd->f_tempsense_rd = qib_7322_tempsense_rd;
  6534. #ifdef CONFIG_INFINIBAND_QIB_DCA
  6535. dd->f_notify_dca = qib_7322_notify_dca;
  6536. #endif
  6537. /*
  6538. * Do remaining PCIe setup and save PCIe values in dd.
  6539. * Any error printing is already done by the init code.
  6540. * On return, we have the chip mapped, but chip registers
  6541. * are not set up until start of qib_init_7322_variables.
  6542. */
  6543. ret = qib_pcie_ddinit(dd, pdev, ent);
  6544. if (ret < 0)
  6545. goto bail_free;
  6546. /* initialize chip-specific variables */
  6547. ret = qib_init_7322_variables(dd);
  6548. if (ret)
  6549. goto bail_cleanup;
  6550. if (qib_mini_init || !dd->num_pports)
  6551. goto bail;
  6552. /*
  6553. * Determine number of vectors we want; depends on port count
  6554. * and number of configured kernel receive queues actually used.
  6555. * Should also depend on whether sdma is enabled or not, but
  6556. * that's such a rare testing case it's not worth worrying about.
  6557. */
  6558. tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
  6559. for (i = 0; i < tabsize; i++)
  6560. if ((i < ARRAY_SIZE(irq_table) &&
  6561. irq_table[i].port <= dd->num_pports) ||
  6562. (i >= ARRAY_SIZE(irq_table) &&
  6563. dd->rcd[i - ARRAY_SIZE(irq_table)]))
  6564. actual_cnt++;
  6565. /* reduce by ctxt's < 2 */
  6566. if (qib_krcvq01_no_msi)
  6567. actual_cnt -= dd->num_pports;
  6568. tabsize = actual_cnt;
  6569. dd->cspec->msix_entries = kzalloc(tabsize *
  6570. sizeof(struct qib_msix_entry), GFP_KERNEL);
  6571. if (!dd->cspec->msix_entries) {
  6572. qib_dev_err(dd, "No memory for MSIx table\n");
  6573. tabsize = 0;
  6574. }
  6575. for (i = 0; i < tabsize; i++)
  6576. dd->cspec->msix_entries[i].msix.entry = i;
  6577. if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
  6578. qib_dev_err(dd,
  6579. "Failed to setup PCIe or interrupts; continuing anyway\n");
  6580. /* may be less than we wanted, if not enough available */
  6581. dd->cspec->num_msix_entries = tabsize;
  6582. /* setup interrupt handler */
  6583. qib_setup_7322_interrupt(dd, 1);
  6584. /* clear diagctrl register, in case diags were running and crashed */
  6585. qib_write_kreg(dd, kr_hwdiagctrl, 0);
  6586. #ifdef CONFIG_INFINIBAND_QIB_DCA
  6587. if (!dca_add_requester(&pdev->dev)) {
  6588. qib_devinfo(dd->pcidev, "DCA enabled\n");
  6589. dd->flags |= QIB_DCA_ENABLED;
  6590. qib_setup_dca(dd);
  6591. }
  6592. #endif
  6593. goto bail;
  6594. bail_cleanup:
  6595. qib_pcie_ddcleanup(dd);
  6596. bail_free:
  6597. qib_free_devdata(dd);
  6598. dd = ERR_PTR(ret);
  6599. bail:
  6600. return dd;
  6601. }
  6602. /*
  6603. * Set the table entry at the specified index from the table specifed.
  6604. * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
  6605. * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
  6606. * 'idx' below addresses the correct entry, while its 4 LSBs select the
  6607. * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
  6608. */
  6609. #define DDS_ENT_AMP_LSB 14
  6610. #define DDS_ENT_MAIN_LSB 9
  6611. #define DDS_ENT_POST_LSB 5
  6612. #define DDS_ENT_PRE_XTRA_LSB 3
  6613. #define DDS_ENT_PRE_LSB 0
  6614. /*
  6615. * Set one entry in the TxDDS table for spec'd port
  6616. * ridx picks one of the entries, while tp points
  6617. * to the appropriate table entry.
  6618. */
  6619. static void set_txdds(struct qib_pportdata *ppd, int ridx,
  6620. const struct txdds_ent *tp)
  6621. {
  6622. struct qib_devdata *dd = ppd->dd;
  6623. u32 pack_ent;
  6624. int regidx;
  6625. /* Get correct offset in chip-space, and in source table */
  6626. regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
  6627. /*
  6628. * We do not use qib_write_kreg_port() because it was intended
  6629. * only for registers in the lower "port specific" pages.
  6630. * So do index calculation by hand.
  6631. */
  6632. if (ppd->hw_pidx)
  6633. regidx += (dd->palign / sizeof(u64));
  6634. pack_ent = tp->amp << DDS_ENT_AMP_LSB;
  6635. pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
  6636. pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
  6637. pack_ent |= tp->post << DDS_ENT_POST_LSB;
  6638. qib_write_kreg(dd, regidx, pack_ent);
  6639. /* Prevent back-to-back writes by hitting scratch */
  6640. qib_write_kreg(ppd->dd, kr_scratch, 0);
  6641. }
  6642. static const struct vendor_txdds_ent vendor_txdds[] = {
  6643. { /* Amphenol 1m 30awg NoEq */
  6644. { 0x41, 0x50, 0x48 }, "584470002 ",
  6645. { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
  6646. },
  6647. { /* Amphenol 3m 28awg NoEq */
  6648. { 0x41, 0x50, 0x48 }, "584470004 ",
  6649. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
  6650. },
  6651. { /* Finisar 3m OM2 Optical */
  6652. { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
  6653. { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
  6654. },
  6655. { /* Finisar 30m OM2 Optical */
  6656. { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
  6657. { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
  6658. },
  6659. { /* Finisar Default OM2 Optical */
  6660. { 0x00, 0x90, 0x65 }, NULL,
  6661. { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
  6662. },
  6663. { /* Gore 1m 30awg NoEq */
  6664. { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
  6665. { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
  6666. },
  6667. { /* Gore 2m 30awg NoEq */
  6668. { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
  6669. { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
  6670. },
  6671. { /* Gore 1m 28awg NoEq */
  6672. { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
  6673. { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
  6674. },
  6675. { /* Gore 3m 28awg NoEq */
  6676. { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
  6677. { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
  6678. },
  6679. { /* Gore 5m 24awg Eq */
  6680. { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
  6681. { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
  6682. },
  6683. { /* Gore 7m 24awg Eq */
  6684. { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
  6685. { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
  6686. },
  6687. { /* Gore 5m 26awg Eq */
  6688. { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
  6689. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
  6690. },
  6691. { /* Gore 7m 26awg Eq */
  6692. { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
  6693. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
  6694. },
  6695. { /* Intersil 12m 24awg Active */
  6696. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
  6697. { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
  6698. },
  6699. { /* Intersil 10m 28awg Active */
  6700. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
  6701. { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
  6702. },
  6703. { /* Intersil 7m 30awg Active */
  6704. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
  6705. { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
  6706. },
  6707. { /* Intersil 5m 32awg Active */
  6708. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
  6709. { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
  6710. },
  6711. { /* Intersil Default Active */
  6712. { 0x00, 0x30, 0xB4 }, NULL,
  6713. { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
  6714. },
  6715. { /* Luxtera 20m Active Optical */
  6716. { 0x00, 0x25, 0x63 }, NULL,
  6717. { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
  6718. },
  6719. { /* Molex 1M Cu loopback */
  6720. { 0x00, 0x09, 0x3A }, "74763-0025 ",
  6721. { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
  6722. },
  6723. { /* Molex 2m 28awg NoEq */
  6724. { 0x00, 0x09, 0x3A }, "74757-2201 ",
  6725. { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
  6726. },
  6727. };
  6728. static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
  6729. /* amp, pre, main, post */
  6730. { 2, 2, 15, 6 }, /* Loopback */
  6731. { 0, 0, 0, 1 }, /* 2 dB */
  6732. { 0, 0, 0, 2 }, /* 3 dB */
  6733. { 0, 0, 0, 3 }, /* 4 dB */
  6734. { 0, 0, 0, 4 }, /* 5 dB */
  6735. { 0, 0, 0, 5 }, /* 6 dB */
  6736. { 0, 0, 0, 6 }, /* 7 dB */
  6737. { 0, 0, 0, 7 }, /* 8 dB */
  6738. { 0, 0, 0, 8 }, /* 9 dB */
  6739. { 0, 0, 0, 9 }, /* 10 dB */
  6740. { 0, 0, 0, 10 }, /* 11 dB */
  6741. { 0, 0, 0, 11 }, /* 12 dB */
  6742. { 0, 0, 0, 12 }, /* 13 dB */
  6743. { 0, 0, 0, 13 }, /* 14 dB */
  6744. { 0, 0, 0, 14 }, /* 15 dB */
  6745. { 0, 0, 0, 15 }, /* 16 dB */
  6746. };
  6747. static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
  6748. /* amp, pre, main, post */
  6749. { 2, 2, 15, 6 }, /* Loopback */
  6750. { 0, 0, 0, 8 }, /* 2 dB */
  6751. { 0, 0, 0, 8 }, /* 3 dB */
  6752. { 0, 0, 0, 9 }, /* 4 dB */
  6753. { 0, 0, 0, 9 }, /* 5 dB */
  6754. { 0, 0, 0, 10 }, /* 6 dB */
  6755. { 0, 0, 0, 10 }, /* 7 dB */
  6756. { 0, 0, 0, 11 }, /* 8 dB */
  6757. { 0, 0, 0, 11 }, /* 9 dB */
  6758. { 0, 0, 0, 12 }, /* 10 dB */
  6759. { 0, 0, 0, 12 }, /* 11 dB */
  6760. { 0, 0, 0, 13 }, /* 12 dB */
  6761. { 0, 0, 0, 13 }, /* 13 dB */
  6762. { 0, 0, 0, 14 }, /* 14 dB */
  6763. { 0, 0, 0, 14 }, /* 15 dB */
  6764. { 0, 0, 0, 15 }, /* 16 dB */
  6765. };
  6766. static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
  6767. /* amp, pre, main, post */
  6768. { 2, 2, 15, 6 }, /* Loopback */
  6769. { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
  6770. { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
  6771. { 0, 1, 0, 11 }, /* 4 dB */
  6772. { 0, 1, 0, 13 }, /* 5 dB */
  6773. { 0, 1, 0, 15 }, /* 6 dB */
  6774. { 0, 1, 3, 15 }, /* 7 dB */
  6775. { 0, 1, 7, 15 }, /* 8 dB */
  6776. { 0, 1, 7, 15 }, /* 9 dB */
  6777. { 0, 1, 8, 15 }, /* 10 dB */
  6778. { 0, 1, 9, 15 }, /* 11 dB */
  6779. { 0, 1, 10, 15 }, /* 12 dB */
  6780. { 0, 2, 6, 15 }, /* 13 dB */
  6781. { 0, 2, 7, 15 }, /* 14 dB */
  6782. { 0, 2, 8, 15 }, /* 15 dB */
  6783. { 0, 2, 9, 15 }, /* 16 dB */
  6784. };
  6785. /*
  6786. * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
  6787. * These are mostly used for mez cards going through connectors
  6788. * and backplane traces, but can be used to add other "unusual"
  6789. * table values as well.
  6790. */
  6791. static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
  6792. /* amp, pre, main, post */
  6793. { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
  6794. { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
  6795. { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
  6796. { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
  6797. { 0, 0, 0, 3 }, /* QMH7342 backplane settings */
  6798. { 0, 0, 0, 4 }, /* QMH7342 backplane settings */
  6799. { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
  6800. { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
  6801. { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
  6802. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
  6803. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
  6804. { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
  6805. { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
  6806. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
  6807. { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
  6808. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
  6809. { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
  6810. { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
  6811. };
  6812. static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
  6813. /* amp, pre, main, post */
  6814. { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
  6815. { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
  6816. { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
  6817. { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
  6818. { 0, 0, 0, 9 }, /* QMH7342 backplane settings */
  6819. { 0, 0, 0, 10 }, /* QMH7342 backplane settings */
  6820. { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
  6821. { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
  6822. { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
  6823. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
  6824. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
  6825. { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
  6826. { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
  6827. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
  6828. { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
  6829. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
  6830. { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
  6831. { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
  6832. };
  6833. static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
  6834. /* amp, pre, main, post */
  6835. { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
  6836. { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
  6837. { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
  6838. { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
  6839. { 0, 1, 0, 10 }, /* QMH7342 backplane settings */
  6840. { 0, 1, 0, 12 }, /* QMH7342 backplane settings */
  6841. { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
  6842. { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
  6843. { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
  6844. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
  6845. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
  6846. { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
  6847. { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
  6848. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
  6849. { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
  6850. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
  6851. { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
  6852. { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
  6853. };
  6854. static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
  6855. /* amp, pre, main, post */
  6856. { 0, 0, 0, 0 }, /* QME7342 mfg settings */
  6857. { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
  6858. };
  6859. static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
  6860. unsigned atten)
  6861. {
  6862. /*
  6863. * The attenuation table starts at 2dB for entry 1,
  6864. * with entry 0 being the loopback entry.
  6865. */
  6866. if (atten <= 2)
  6867. atten = 1;
  6868. else if (atten > TXDDS_TABLE_SZ)
  6869. atten = TXDDS_TABLE_SZ - 1;
  6870. else
  6871. atten--;
  6872. return txdds + atten;
  6873. }
  6874. /*
  6875. * if override is set, the module parameter txselect has a value
  6876. * for this specific port, so use it, rather than our normal mechanism.
  6877. */
  6878. static void find_best_ent(struct qib_pportdata *ppd,
  6879. const struct txdds_ent **sdr_dds,
  6880. const struct txdds_ent **ddr_dds,
  6881. const struct txdds_ent **qdr_dds, int override)
  6882. {
  6883. struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
  6884. int idx;
  6885. /* Search table of known cables */
  6886. for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
  6887. const struct vendor_txdds_ent *v = vendor_txdds + idx;
  6888. if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
  6889. (!v->partnum ||
  6890. !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
  6891. *sdr_dds = &v->sdr;
  6892. *ddr_dds = &v->ddr;
  6893. *qdr_dds = &v->qdr;
  6894. return;
  6895. }
  6896. }
  6897. /* Active cables don't have attenuation so we only set SERDES
  6898. * settings to account for the attenuation of the board traces. */
  6899. if (!override && QSFP_IS_ACTIVE(qd->tech)) {
  6900. *sdr_dds = txdds_sdr + ppd->dd->board_atten;
  6901. *ddr_dds = txdds_ddr + ppd->dd->board_atten;
  6902. *qdr_dds = txdds_qdr + ppd->dd->board_atten;
  6903. return;
  6904. }
  6905. if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
  6906. qd->atten[1])) {
  6907. *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
  6908. *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
  6909. *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
  6910. return;
  6911. } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
  6912. /*
  6913. * If we have no (or incomplete) data from the cable
  6914. * EEPROM, or no QSFP, or override is set, use the
  6915. * module parameter value to index into the attentuation
  6916. * table.
  6917. */
  6918. idx = ppd->cpspec->no_eep;
  6919. *sdr_dds = &txdds_sdr[idx];
  6920. *ddr_dds = &txdds_ddr[idx];
  6921. *qdr_dds = &txdds_qdr[idx];
  6922. } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
  6923. /* similar to above, but index into the "extra" table. */
  6924. idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
  6925. *sdr_dds = &txdds_extra_sdr[idx];
  6926. *ddr_dds = &txdds_extra_ddr[idx];
  6927. *qdr_dds = &txdds_extra_qdr[idx];
  6928. } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
  6929. ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
  6930. TXDDS_MFG_SZ)) {
  6931. idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
  6932. pr_info("IB%u:%u use idx %u into txdds_mfg\n",
  6933. ppd->dd->unit, ppd->port, idx);
  6934. *sdr_dds = &txdds_extra_mfg[idx];
  6935. *ddr_dds = &txdds_extra_mfg[idx];
  6936. *qdr_dds = &txdds_extra_mfg[idx];
  6937. } else {
  6938. /* this shouldn't happen, it's range checked */
  6939. *sdr_dds = txdds_sdr + qib_long_atten;
  6940. *ddr_dds = txdds_ddr + qib_long_atten;
  6941. *qdr_dds = txdds_qdr + qib_long_atten;
  6942. }
  6943. }
  6944. static void init_txdds_table(struct qib_pportdata *ppd, int override)
  6945. {
  6946. const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
  6947. struct txdds_ent *dds;
  6948. int idx;
  6949. int single_ent = 0;
  6950. find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
  6951. /* for mez cards or override, use the selected value for all entries */
  6952. if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
  6953. single_ent = 1;
  6954. /* Fill in the first entry with the best entry found. */
  6955. set_txdds(ppd, 0, sdr_dds);
  6956. set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
  6957. set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
  6958. if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
  6959. QIBL_LINKACTIVE)) {
  6960. dds = (struct txdds_ent *)(ppd->link_speed_active ==
  6961. QIB_IB_QDR ? qdr_dds :
  6962. (ppd->link_speed_active ==
  6963. QIB_IB_DDR ? ddr_dds : sdr_dds));
  6964. write_tx_serdes_param(ppd, dds);
  6965. }
  6966. /* Fill in the remaining entries with the default table values. */
  6967. for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
  6968. set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
  6969. set_txdds(ppd, idx + TXDDS_TABLE_SZ,
  6970. single_ent ? ddr_dds : txdds_ddr + idx);
  6971. set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
  6972. single_ent ? qdr_dds : txdds_qdr + idx);
  6973. }
  6974. }
  6975. #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
  6976. #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
  6977. #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
  6978. #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
  6979. #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
  6980. #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
  6981. #define AHB_TRANS_TRIES 10
  6982. /*
  6983. * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
  6984. * 5=subsystem which is why most calls have "chan + chan >> 1"
  6985. * for the channel argument.
  6986. */
  6987. static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
  6988. u32 data, u32 mask)
  6989. {
  6990. u32 rd_data, wr_data, sz_mask;
  6991. u64 trans, acc, prev_acc;
  6992. u32 ret = 0xBAD0BAD;
  6993. int tries;
  6994. prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
  6995. /* From this point on, make sure we return access */
  6996. acc = (quad << 1) | 1;
  6997. qib_write_kreg(dd, KR_AHB_ACC, acc);
  6998. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  6999. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  7000. if (trans & AHB_TRANS_RDY)
  7001. break;
  7002. }
  7003. if (tries >= AHB_TRANS_TRIES) {
  7004. qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
  7005. goto bail;
  7006. }
  7007. /* If mask is not all 1s, we need to read, but different SerDes
  7008. * entities have different sizes
  7009. */
  7010. sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
  7011. wr_data = data & mask & sz_mask;
  7012. if ((~mask & sz_mask) != 0) {
  7013. trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
  7014. qib_write_kreg(dd, KR_AHB_TRANS, trans);
  7015. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  7016. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  7017. if (trans & AHB_TRANS_RDY)
  7018. break;
  7019. }
  7020. if (tries >= AHB_TRANS_TRIES) {
  7021. qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
  7022. AHB_TRANS_TRIES);
  7023. goto bail;
  7024. }
  7025. /* Re-read in case host split reads and read data first */
  7026. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  7027. rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
  7028. wr_data |= (rd_data & ~mask & sz_mask);
  7029. }
  7030. /* If mask is not zero, we need to write. */
  7031. if (mask & sz_mask) {
  7032. trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
  7033. trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
  7034. trans |= AHB_WR;
  7035. qib_write_kreg(dd, KR_AHB_TRANS, trans);
  7036. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  7037. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  7038. if (trans & AHB_TRANS_RDY)
  7039. break;
  7040. }
  7041. if (tries >= AHB_TRANS_TRIES) {
  7042. qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
  7043. AHB_TRANS_TRIES);
  7044. goto bail;
  7045. }
  7046. }
  7047. ret = wr_data;
  7048. bail:
  7049. qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
  7050. return ret;
  7051. }
  7052. static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
  7053. unsigned mask)
  7054. {
  7055. struct qib_devdata *dd = ppd->dd;
  7056. int chan;
  7057. u32 rbc;
  7058. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  7059. ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
  7060. data, mask);
  7061. rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7062. addr, 0, 0);
  7063. }
  7064. }
  7065. static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
  7066. {
  7067. u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
  7068. u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
  7069. if (enable && !state) {
  7070. pr_info("IB%u:%u Turning LOS on\n",
  7071. ppd->dd->unit, ppd->port);
  7072. data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
  7073. } else if (!enable && state) {
  7074. pr_info("IB%u:%u Turning LOS off\n",
  7075. ppd->dd->unit, ppd->port);
  7076. data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
  7077. }
  7078. qib_write_kreg_port(ppd, krp_serdesctrl, data);
  7079. }
  7080. static int serdes_7322_init(struct qib_pportdata *ppd)
  7081. {
  7082. int ret = 0;
  7083. if (ppd->dd->cspec->r1)
  7084. ret = serdes_7322_init_old(ppd);
  7085. else
  7086. ret = serdes_7322_init_new(ppd);
  7087. return ret;
  7088. }
  7089. static int serdes_7322_init_old(struct qib_pportdata *ppd)
  7090. {
  7091. u32 le_val;
  7092. /*
  7093. * Initialize the Tx DDS tables. Also done every QSFP event,
  7094. * for adapters with QSFP
  7095. */
  7096. init_txdds_table(ppd, 0);
  7097. /* ensure no tx overrides from earlier driver loads */
  7098. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  7099. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7100. reset_tx_deemphasis_override));
  7101. /* Patch some SerDes defaults to "Better for IB" */
  7102. /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
  7103. ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
  7104. /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
  7105. ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
  7106. /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
  7107. ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
  7108. /* May be overridden in qsfp_7322_event */
  7109. le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
  7110. ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
  7111. /* enable LE1 adaptation for all but QME, which is disabled */
  7112. le_val = IS_QME(ppd->dd) ? 0 : 1;
  7113. ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
  7114. /* Clear cmode-override, may be set from older driver */
  7115. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
  7116. /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
  7117. ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
  7118. /* setup LoS params; these are subsystem, so chan == 5 */
  7119. /* LoS filter threshold_count on, ch 0-3, set to 8 */
  7120. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
  7121. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
  7122. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
  7123. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
  7124. /* LoS filter threshold_count off, ch 0-3, set to 4 */
  7125. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
  7126. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
  7127. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
  7128. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
  7129. /* LoS filter select enabled */
  7130. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
  7131. /* LoS target data: SDR=4, DDR=2, QDR=1 */
  7132. ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
  7133. ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
  7134. ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
  7135. serdes_7322_los_enable(ppd, 1);
  7136. /* rxbistena; set 0 to avoid effects of it switch later */
  7137. ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
  7138. /* Configure 4 DFE taps, and only they adapt */
  7139. ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
  7140. /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
  7141. le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
  7142. ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
  7143. /*
  7144. * Set receive adaptation mode. SDR and DDR adaptation are
  7145. * always on, and QDR is initially enabled; later disabled.
  7146. */
  7147. qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
  7148. qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
  7149. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  7150. ppd->dd->cspec->r1 ?
  7151. QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
  7152. ppd->cpspec->qdr_dfe_on = 1;
  7153. /* FLoop LOS gate: PPM filter enabled */
  7154. ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
  7155. /* rx offset center enabled */
  7156. ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
  7157. if (!ppd->dd->cspec->r1) {
  7158. ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
  7159. ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
  7160. }
  7161. /* Set the frequency loop bandwidth to 15 */
  7162. ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
  7163. return 0;
  7164. }
  7165. static int serdes_7322_init_new(struct qib_pportdata *ppd)
  7166. {
  7167. unsigned long tend;
  7168. u32 le_val, rxcaldone;
  7169. int chan, chan_done = (1 << SERDES_CHANS) - 1;
  7170. /* Clear cmode-override, may be set from older driver */
  7171. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
  7172. /* ensure no tx overrides from earlier driver loads */
  7173. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  7174. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7175. reset_tx_deemphasis_override));
  7176. /* START OF LSI SUGGESTED SERDES BRINGUP */
  7177. /* Reset - Calibration Setup */
  7178. /* Stop DFE adaptaion */
  7179. ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
  7180. /* Disable LE1 */
  7181. ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
  7182. /* Disable autoadapt for LE1 */
  7183. ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
  7184. /* Disable LE2 */
  7185. ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
  7186. /* Disable VGA */
  7187. ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
  7188. /* Disable AFE Offset Cancel */
  7189. ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
  7190. /* Disable Timing Loop */
  7191. ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
  7192. /* Disable Frequency Loop */
  7193. ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
  7194. /* Disable Baseline Wander Correction */
  7195. ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
  7196. /* Disable RX Calibration */
  7197. ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
  7198. /* Disable RX Offset Calibration */
  7199. ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
  7200. /* Select BB CDR */
  7201. ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
  7202. /* CDR Step Size */
  7203. ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
  7204. /* Enable phase Calibration */
  7205. ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
  7206. /* DFE Bandwidth [2:14-12] */
  7207. ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
  7208. /* DFE Config (4 taps only) */
  7209. ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
  7210. /* Gain Loop Bandwidth */
  7211. if (!ppd->dd->cspec->r1) {
  7212. ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
  7213. ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
  7214. } else {
  7215. ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
  7216. }
  7217. /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
  7218. /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
  7219. /* Data Rate Select [5:7-6] (leave as default) */
  7220. /* RX Parallel Word Width [3:10-8] (leave as default) */
  7221. /* RX REST */
  7222. /* Single- or Multi-channel reset */
  7223. /* RX Analog reset */
  7224. /* RX Digital reset */
  7225. ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
  7226. msleep(20);
  7227. /* RX Analog reset */
  7228. ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
  7229. msleep(20);
  7230. /* RX Digital reset */
  7231. ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
  7232. msleep(20);
  7233. /* setup LoS params; these are subsystem, so chan == 5 */
  7234. /* LoS filter threshold_count on, ch 0-3, set to 8 */
  7235. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
  7236. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
  7237. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
  7238. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
  7239. /* LoS filter threshold_count off, ch 0-3, set to 4 */
  7240. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
  7241. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
  7242. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
  7243. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
  7244. /* LoS filter select enabled */
  7245. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
  7246. /* LoS target data: SDR=4, DDR=2, QDR=1 */
  7247. ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
  7248. ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
  7249. ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
  7250. /* Turn on LOS on initial SERDES init */
  7251. serdes_7322_los_enable(ppd, 1);
  7252. /* FLoop LOS gate: PPM filter enabled */
  7253. ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
  7254. /* RX LATCH CALIBRATION */
  7255. /* Enable Eyefinder Phase Calibration latch */
  7256. ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
  7257. /* Enable RX Offset Calibration latch */
  7258. ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
  7259. msleep(20);
  7260. /* Start Calibration */
  7261. ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
  7262. tend = jiffies + msecs_to_jiffies(500);
  7263. while (chan_done && !time_is_before_jiffies(tend)) {
  7264. msleep(20);
  7265. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  7266. rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
  7267. (chan + (chan >> 1)),
  7268. 25, 0, 0);
  7269. if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
  7270. (~chan_done & (1 << chan)) == 0)
  7271. chan_done &= ~(1 << chan);
  7272. }
  7273. }
  7274. if (chan_done) {
  7275. pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
  7276. IBSD(ppd->hw_pidx), chan_done);
  7277. } else {
  7278. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  7279. rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
  7280. (chan + (chan >> 1)),
  7281. 25, 0, 0);
  7282. if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
  7283. pr_info("Serdes %d chan %d calibration failed\n",
  7284. IBSD(ppd->hw_pidx), chan);
  7285. }
  7286. }
  7287. /* Turn off Calibration */
  7288. ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
  7289. msleep(20);
  7290. /* BRING RX UP */
  7291. /* Set LE2 value (May be overridden in qsfp_7322_event) */
  7292. le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
  7293. ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
  7294. /* Set LE2 Loop bandwidth */
  7295. ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
  7296. /* Enable LE2 */
  7297. ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
  7298. msleep(20);
  7299. /* Enable H0 only */
  7300. ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
  7301. /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
  7302. le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
  7303. ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
  7304. /* Enable VGA */
  7305. ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
  7306. msleep(20);
  7307. /* Set Frequency Loop Bandwidth */
  7308. ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
  7309. /* Enable Frequency Loop */
  7310. ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
  7311. /* Set Timing Loop Bandwidth */
  7312. ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
  7313. /* Enable Timing Loop */
  7314. ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
  7315. msleep(50);
  7316. /* Enable DFE
  7317. * Set receive adaptation mode. SDR and DDR adaptation are
  7318. * always on, and QDR is initially enabled; later disabled.
  7319. */
  7320. qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
  7321. qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
  7322. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  7323. ppd->dd->cspec->r1 ?
  7324. QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
  7325. ppd->cpspec->qdr_dfe_on = 1;
  7326. /* Disable LE1 */
  7327. ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
  7328. /* Disable auto adapt for LE1 */
  7329. ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
  7330. msleep(20);
  7331. /* Enable AFE Offset Cancel */
  7332. ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
  7333. /* Enable Baseline Wander Correction */
  7334. ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
  7335. /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
  7336. ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
  7337. /* VGA output common mode */
  7338. ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
  7339. /*
  7340. * Initialize the Tx DDS tables. Also done every QSFP event,
  7341. * for adapters with QSFP
  7342. */
  7343. init_txdds_table(ppd, 0);
  7344. return 0;
  7345. }
  7346. /* start adjust QMH serdes parameters */
  7347. static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
  7348. {
  7349. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7350. 9, code << 9, 0x3f << 9);
  7351. }
  7352. static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
  7353. int enable, u32 tapenable)
  7354. {
  7355. if (enable)
  7356. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7357. 1, 3 << 10, 0x1f << 10);
  7358. else
  7359. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7360. 1, 0, 0x1f << 10);
  7361. }
  7362. /* Set clock to 1, 0, 1, 0 */
  7363. static void clock_man(struct qib_pportdata *ppd, int chan)
  7364. {
  7365. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7366. 4, 0x4000, 0x4000);
  7367. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7368. 4, 0, 0x4000);
  7369. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7370. 4, 0x4000, 0x4000);
  7371. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7372. 4, 0, 0x4000);
  7373. }
  7374. /*
  7375. * write the current Tx serdes pre,post,main,amp settings into the serdes.
  7376. * The caller must pass the settings appropriate for the current speed,
  7377. * or not care if they are correct for the current speed.
  7378. */
  7379. static void write_tx_serdes_param(struct qib_pportdata *ppd,
  7380. struct txdds_ent *txdds)
  7381. {
  7382. u64 deemph;
  7383. deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
  7384. /* field names for amp, main, post, pre, respectively */
  7385. deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
  7386. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
  7387. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
  7388. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
  7389. deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7390. tx_override_deemphasis_select);
  7391. deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7392. txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7393. txampcntl_d2a);
  7394. deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7395. txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7396. txc0_ena);
  7397. deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7398. txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7399. txcp1_ena);
  7400. deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7401. txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7402. txcn1_ena);
  7403. qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
  7404. }
  7405. /*
  7406. * Set the parameters for mez cards on link bounce, so they are
  7407. * always exactly what was requested. Similar logic to init_txdds
  7408. * but does just the serdes.
  7409. */
  7410. static void adj_tx_serdes(struct qib_pportdata *ppd)
  7411. {
  7412. const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
  7413. struct txdds_ent *dds;
  7414. find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
  7415. dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
  7416. qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
  7417. ddr_dds : sdr_dds));
  7418. write_tx_serdes_param(ppd, dds);
  7419. }
  7420. /* set QDR forced value for H1, if needed */
  7421. static void force_h1(struct qib_pportdata *ppd)
  7422. {
  7423. int chan;
  7424. ppd->cpspec->qdr_reforce = 0;
  7425. if (!ppd->dd->cspec->r1)
  7426. return;
  7427. for (chan = 0; chan < SERDES_CHANS; chan++) {
  7428. set_man_mode_h1(ppd, chan, 1, 0);
  7429. set_man_code(ppd, chan, ppd->cpspec->h1_val);
  7430. clock_man(ppd, chan);
  7431. set_man_mode_h1(ppd, chan, 0, 0);
  7432. }
  7433. }
  7434. #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
  7435. #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
  7436. #define R_OPCODE_LSB 3
  7437. #define R_OP_NOP 0
  7438. #define R_OP_SHIFT 2
  7439. #define R_OP_UPDATE 3
  7440. #define R_TDI_LSB 2
  7441. #define R_TDO_LSB 1
  7442. #define R_RDY 1
  7443. static int qib_r_grab(struct qib_devdata *dd)
  7444. {
  7445. u64 val;
  7446. val = SJA_EN;
  7447. qib_write_kreg(dd, kr_r_access, val);
  7448. qib_read_kreg32(dd, kr_scratch);
  7449. return 0;
  7450. }
  7451. /* qib_r_wait_for_rdy() not only waits for the ready bit, it
  7452. * returns the current state of R_TDO
  7453. */
  7454. static int qib_r_wait_for_rdy(struct qib_devdata *dd)
  7455. {
  7456. u64 val;
  7457. int timeout;
  7458. for (timeout = 0; timeout < 100 ; ++timeout) {
  7459. val = qib_read_kreg32(dd, kr_r_access);
  7460. if (val & R_RDY)
  7461. return (val >> R_TDO_LSB) & 1;
  7462. }
  7463. return -1;
  7464. }
  7465. static int qib_r_shift(struct qib_devdata *dd, int bisten,
  7466. int len, u8 *inp, u8 *outp)
  7467. {
  7468. u64 valbase, val;
  7469. int ret, pos;
  7470. valbase = SJA_EN | (bisten << BISTEN_LSB) |
  7471. (R_OP_SHIFT << R_OPCODE_LSB);
  7472. ret = qib_r_wait_for_rdy(dd);
  7473. if (ret < 0)
  7474. goto bail;
  7475. for (pos = 0; pos < len; ++pos) {
  7476. val = valbase;
  7477. if (outp) {
  7478. outp[pos >> 3] &= ~(1 << (pos & 7));
  7479. outp[pos >> 3] |= (ret << (pos & 7));
  7480. }
  7481. if (inp) {
  7482. int tdi = inp[pos >> 3] >> (pos & 7);
  7483. val |= ((tdi & 1) << R_TDI_LSB);
  7484. }
  7485. qib_write_kreg(dd, kr_r_access, val);
  7486. qib_read_kreg32(dd, kr_scratch);
  7487. ret = qib_r_wait_for_rdy(dd);
  7488. if (ret < 0)
  7489. break;
  7490. }
  7491. /* Restore to NOP between operations. */
  7492. val = SJA_EN | (bisten << BISTEN_LSB);
  7493. qib_write_kreg(dd, kr_r_access, val);
  7494. qib_read_kreg32(dd, kr_scratch);
  7495. ret = qib_r_wait_for_rdy(dd);
  7496. if (ret >= 0)
  7497. ret = pos;
  7498. bail:
  7499. return ret;
  7500. }
  7501. static int qib_r_update(struct qib_devdata *dd, int bisten)
  7502. {
  7503. u64 val;
  7504. int ret;
  7505. val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
  7506. ret = qib_r_wait_for_rdy(dd);
  7507. if (ret >= 0) {
  7508. qib_write_kreg(dd, kr_r_access, val);
  7509. qib_read_kreg32(dd, kr_scratch);
  7510. }
  7511. return ret;
  7512. }
  7513. #define BISTEN_PORT_SEL 15
  7514. #define LEN_PORT_SEL 625
  7515. #define BISTEN_AT 17
  7516. #define LEN_AT 156
  7517. #define BISTEN_ETM 16
  7518. #define LEN_ETM 632
  7519. #define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
  7520. /* these are common for all IB port use cases. */
  7521. static u8 reset_at[BIT2BYTE(LEN_AT)] = {
  7522. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7523. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
  7524. };
  7525. static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
  7526. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7527. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7528. 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
  7529. 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
  7530. 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
  7531. 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
  7532. 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7533. 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
  7534. };
  7535. static u8 at[BIT2BYTE(LEN_AT)] = {
  7536. 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
  7537. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
  7538. };
  7539. /* used for IB1 or IB2, only one in use */
  7540. static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
  7541. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7542. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7543. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7544. 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
  7545. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7546. 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
  7547. 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
  7548. 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
  7549. };
  7550. /* used when both IB1 and IB2 are in use */
  7551. static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
  7552. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7553. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
  7554. 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7555. 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
  7556. 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
  7557. 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
  7558. 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
  7559. 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
  7560. };
  7561. /* used when only IB1 is in use */
  7562. static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
  7563. 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
  7564. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
  7565. 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7566. 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7567. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
  7568. 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7569. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7570. 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
  7571. };
  7572. /* used when only IB2 is in use */
  7573. static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
  7574. 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
  7575. 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
  7576. 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
  7577. 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
  7578. 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
  7579. 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
  7580. 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
  7581. 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
  7582. };
  7583. /* used when both IB1 and IB2 are in use */
  7584. static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
  7585. 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
  7586. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
  7587. 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7588. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7589. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
  7590. 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
  7591. 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7592. 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
  7593. };
  7594. /*
  7595. * Do setup to properly handle IB link recovery; if port is zero, we
  7596. * are initializing to cover both ports; otherwise we are initializing
  7597. * to cover a single port card, or the port has reached INIT and we may
  7598. * need to switch coverage types.
  7599. */
  7600. static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
  7601. {
  7602. u8 *portsel, *etm;
  7603. struct qib_devdata *dd = ppd->dd;
  7604. if (!ppd->dd->cspec->r1)
  7605. return;
  7606. if (!both) {
  7607. dd->cspec->recovery_ports_initted++;
  7608. ppd->cpspec->recovery_init = 1;
  7609. }
  7610. if (!both && dd->cspec->recovery_ports_initted == 1) {
  7611. portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
  7612. etm = atetm_1port;
  7613. } else {
  7614. portsel = portsel_2port;
  7615. etm = atetm_2port;
  7616. }
  7617. if (qib_r_grab(dd) < 0 ||
  7618. qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
  7619. qib_r_update(dd, BISTEN_ETM) < 0 ||
  7620. qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
  7621. qib_r_update(dd, BISTEN_AT) < 0 ||
  7622. qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
  7623. portsel, NULL) < 0 ||
  7624. qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
  7625. qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
  7626. qib_r_update(dd, BISTEN_AT) < 0 ||
  7627. qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
  7628. qib_r_update(dd, BISTEN_ETM) < 0)
  7629. qib_dev_err(dd, "Failed IB link recovery setup\n");
  7630. }
  7631. static void check_7322_rxe_status(struct qib_pportdata *ppd)
  7632. {
  7633. struct qib_devdata *dd = ppd->dd;
  7634. u64 fmask;
  7635. if (dd->cspec->recovery_ports_initted != 1)
  7636. return; /* rest doesn't apply to dualport */
  7637. qib_write_kreg(dd, kr_control, dd->control |
  7638. SYM_MASK(Control, FreezeMode));
  7639. (void)qib_read_kreg64(dd, kr_scratch);
  7640. udelay(3); /* ibcreset asserted 400ns, be sure that's over */
  7641. fmask = qib_read_kreg64(dd, kr_act_fmask);
  7642. if (!fmask) {
  7643. /*
  7644. * require a powercycle before we'll work again, and make
  7645. * sure we get no more interrupts, and don't turn off
  7646. * freeze.
  7647. */
  7648. ppd->dd->cspec->stay_in_freeze = 1;
  7649. qib_7322_set_intr_state(ppd->dd, 0);
  7650. qib_write_kreg(dd, kr_fmask, 0ULL);
  7651. qib_dev_err(dd, "HCA unusable until powercycled\n");
  7652. return; /* eventually reset */
  7653. }
  7654. qib_write_kreg(ppd->dd, kr_hwerrclear,
  7655. SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
  7656. /* don't do the full clear_freeze(), not needed for this */
  7657. qib_write_kreg(dd, kr_control, dd->control);
  7658. qib_read_kreg32(dd, kr_scratch);
  7659. /* take IBC out of reset */
  7660. if (ppd->link_speed_supported) {
  7661. ppd->cpspec->ibcctrl_a &=
  7662. ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  7663. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  7664. ppd->cpspec->ibcctrl_a);
  7665. qib_read_kreg32(dd, kr_scratch);
  7666. if (ppd->lflags & QIBL_IB_LINK_DISABLED)
  7667. qib_set_ib_7322_lstate(ppd, 0,
  7668. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  7669. }
  7670. }