qib_iba7322.c 248 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020
  1. /*
  2. * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. /*
  33. * This file contains all of the code that is specific to the
  34. * InfiniPath 7322 chip
  35. */
  36. #include <linux/interrupt.h>
  37. #include <linux/pci.h>
  38. #include <linux/delay.h>
  39. #include <linux/io.h>
  40. #include <linux/jiffies.h>
  41. #include <rdma/ib_verbs.h>
  42. #include <rdma/ib_smi.h>
  43. #include "qib.h"
  44. #include "qib_7322_regs.h"
  45. #include "qib_qsfp.h"
  46. #include "qib_mad.h"
  47. static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
  48. static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
  49. static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
  50. static irqreturn_t qib_7322intr(int irq, void *data);
  51. static irqreturn_t qib_7322bufavail(int irq, void *data);
  52. static irqreturn_t sdma_intr(int irq, void *data);
  53. static irqreturn_t sdma_idle_intr(int irq, void *data);
  54. static irqreturn_t sdma_progress_intr(int irq, void *data);
  55. static irqreturn_t sdma_cleanup_intr(int irq, void *data);
  56. static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
  57. struct qib_ctxtdata *rcd);
  58. static u8 qib_7322_phys_portstate(u64);
  59. static u32 qib_7322_iblink_state(u64);
  60. static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  61. u16 linitcmd);
  62. static void force_h1(struct qib_pportdata *);
  63. static void adj_tx_serdes(struct qib_pportdata *);
  64. static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
  65. static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
  66. static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
  67. static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
  68. static void serdes_7322_los_enable(struct qib_pportdata *, int);
  69. static int serdes_7322_init_old(struct qib_pportdata *);
  70. static int serdes_7322_init_new(struct qib_pportdata *);
  71. #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
  72. /* LE2 serdes values for different cases */
  73. #define LE2_DEFAULT 5
  74. #define LE2_5m 4
  75. #define LE2_QME 0
  76. /* Below is special-purpose, so only really works for the IB SerDes blocks. */
  77. #define IBSD(hw_pidx) (hw_pidx + 2)
  78. /* these are variables for documentation and experimentation purposes */
  79. static const unsigned rcv_int_timeout = 375;
  80. static const unsigned rcv_int_count = 16;
  81. static const unsigned sdma_idle_cnt = 64;
  82. /* Time to stop altering Rx Equalization parameters, after link up. */
  83. #define RXEQ_DISABLE_MSECS 2500
  84. /*
  85. * Number of VLs we are configured to use (to allow for more
  86. * credits per vl, etc.)
  87. */
  88. ushort qib_num_cfg_vls = 2;
  89. module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
  90. MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
  91. static ushort qib_chase = 1;
  92. module_param_named(chase, qib_chase, ushort, S_IRUGO);
  93. MODULE_PARM_DESC(chase, "Enable state chase handling");
  94. static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
  95. module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
  96. MODULE_PARM_DESC(long_attenuation, \
  97. "attenuation cutoff (dB) for long copper cable setup");
  98. static ushort qib_singleport;
  99. module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
  100. MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
  101. static ushort qib_krcvq01_no_msi;
  102. module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
  103. MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
  104. /*
  105. * Receive header queue sizes
  106. */
  107. static unsigned qib_rcvhdrcnt;
  108. module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
  109. MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
  110. static unsigned qib_rcvhdrsize;
  111. module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
  112. MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
  113. static unsigned qib_rcvhdrentsize;
  114. module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
  115. MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
  116. #define MAX_ATTEN_LEN 64 /* plenty for any real system */
  117. /* for read back, default index is ~5m copper cable */
  118. static char txselect_list[MAX_ATTEN_LEN] = "10";
  119. static struct kparam_string kp_txselect = {
  120. .string = txselect_list,
  121. .maxlen = MAX_ATTEN_LEN
  122. };
  123. static int setup_txselect(const char *, struct kernel_param *);
  124. module_param_call(txselect, setup_txselect, param_get_string,
  125. &kp_txselect, S_IWUSR | S_IRUGO);
  126. MODULE_PARM_DESC(txselect, \
  127. "Tx serdes indices (for no QSFP or invalid QSFP data)");
  128. #define BOARD_QME7342 5
  129. #define BOARD_QMH7342 6
  130. #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
  131. BOARD_QMH7342)
  132. #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
  133. BOARD_QME7342)
  134. #define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
  135. #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
  136. #define MASK_ACROSS(lsb, msb) \
  137. (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
  138. #define SYM_RMASK(regname, fldname) ((u64) \
  139. QIB_7322_##regname##_##fldname##_RMASK)
  140. #define SYM_MASK(regname, fldname) ((u64) \
  141. QIB_7322_##regname##_##fldname##_RMASK << \
  142. QIB_7322_##regname##_##fldname##_LSB)
  143. #define SYM_FIELD(value, regname, fldname) ((u64) \
  144. (((value) >> SYM_LSB(regname, fldname)) & \
  145. SYM_RMASK(regname, fldname)))
  146. /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
  147. #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
  148. (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
  149. #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
  150. #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
  151. #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
  152. #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
  153. #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
  154. /* Below because most, but not all, fields of IntMask have that full suffix */
  155. #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
  156. #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
  157. /*
  158. * the size bits give us 2^N, in KB units. 0 marks as invalid,
  159. * and 7 is reserved. We currently use only 2KB and 4KB
  160. */
  161. #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
  162. #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
  163. #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
  164. #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
  165. #define SendIBSLIDAssignMask \
  166. QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
  167. #define SendIBSLMCMask \
  168. QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
  169. #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
  170. #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
  171. #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
  172. #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
  173. #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
  174. #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
  175. #define _QIB_GPIO_SDA_NUM 1
  176. #define _QIB_GPIO_SCL_NUM 0
  177. #define QIB_EEPROM_WEN_NUM 14
  178. #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
  179. /* HW counter clock is at 4nsec */
  180. #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
  181. /* full speed IB port 1 only */
  182. #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
  183. #define PORT_SPD_CAP_SHIFT 3
  184. /* full speed featuremask, both ports */
  185. #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
  186. /*
  187. * This file contains almost all the chip-specific register information and
  188. * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
  189. */
  190. /* Use defines to tie machine-generated names to lower-case names */
  191. #define kr_contextcnt KREG_IDX(ContextCnt)
  192. #define kr_control KREG_IDX(Control)
  193. #define kr_counterregbase KREG_IDX(CntrRegBase)
  194. #define kr_errclear KREG_IDX(ErrClear)
  195. #define kr_errmask KREG_IDX(ErrMask)
  196. #define kr_errstatus KREG_IDX(ErrStatus)
  197. #define kr_extctrl KREG_IDX(EXTCtrl)
  198. #define kr_extstatus KREG_IDX(EXTStatus)
  199. #define kr_gpio_clear KREG_IDX(GPIOClear)
  200. #define kr_gpio_mask KREG_IDX(GPIOMask)
  201. #define kr_gpio_out KREG_IDX(GPIOOut)
  202. #define kr_gpio_status KREG_IDX(GPIOStatus)
  203. #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
  204. #define kr_debugportval KREG_IDX(DebugPortValueReg)
  205. #define kr_fmask KREG_IDX(feature_mask)
  206. #define kr_act_fmask KREG_IDX(active_feature_mask)
  207. #define kr_hwerrclear KREG_IDX(HwErrClear)
  208. #define kr_hwerrmask KREG_IDX(HwErrMask)
  209. #define kr_hwerrstatus KREG_IDX(HwErrStatus)
  210. #define kr_intclear KREG_IDX(IntClear)
  211. #define kr_intmask KREG_IDX(IntMask)
  212. #define kr_intredirect KREG_IDX(IntRedirect0)
  213. #define kr_intstatus KREG_IDX(IntStatus)
  214. #define kr_pagealign KREG_IDX(PageAlign)
  215. #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
  216. #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
  217. #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
  218. #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
  219. #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
  220. #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
  221. #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
  222. #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
  223. #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
  224. #define kr_revision KREG_IDX(Revision)
  225. #define kr_scratch KREG_IDX(Scratch)
  226. #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
  227. #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
  228. #define kr_sendctrl KREG_IDX(SendCtrl)
  229. #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
  230. #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
  231. #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
  232. #define kr_sendpiobufbase KREG_IDX(SendBufBase)
  233. #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
  234. #define kr_sendpiosize KREG_IDX(SendBufSize)
  235. #define kr_sendregbase KREG_IDX(SendRegBase)
  236. #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
  237. #define kr_userregbase KREG_IDX(UserRegBase)
  238. #define kr_intgranted KREG_IDX(Int_Granted)
  239. #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
  240. #define kr_intblocked KREG_IDX(IntBlocked)
  241. #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
  242. /*
  243. * per-port kernel registers. Access only with qib_read_kreg_port()
  244. * or qib_write_kreg_port()
  245. */
  246. #define krp_errclear KREG_IBPORT_IDX(ErrClear)
  247. #define krp_errmask KREG_IBPORT_IDX(ErrMask)
  248. #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
  249. #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
  250. #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
  251. #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
  252. #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
  253. #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
  254. #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
  255. #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
  256. #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
  257. #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
  258. #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
  259. #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
  260. #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
  261. #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
  262. #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
  263. #define krp_psstart KREG_IBPORT_IDX(PSStart)
  264. #define krp_psstat KREG_IBPORT_IDX(PSStat)
  265. #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
  266. #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
  267. #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
  268. #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
  269. #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
  270. #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
  271. #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
  272. #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
  273. #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
  274. #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
  275. #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
  276. #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
  277. #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
  278. #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
  279. #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
  280. #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
  281. #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
  282. #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
  283. #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
  284. #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
  285. #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
  286. #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
  287. #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
  288. #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
  289. #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
  290. #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
  291. #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
  292. #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
  293. #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
  294. #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
  295. #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
  296. /*
  297. * Per-context kernel registers. Access only with qib_read_kreg_ctxt()
  298. * or qib_write_kreg_ctxt()
  299. */
  300. #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
  301. #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
  302. /*
  303. * TID Flow table, per context. Reduces
  304. * number of hdrq updates to one per flow (or on errors).
  305. * context 0 and 1 share same memory, but have distinct
  306. * addresses. Since for now, we never use expected sends
  307. * on kernel contexts, we don't worry about that (we initialize
  308. * those entries for ctxt 0/1 on driver load twice, for example).
  309. */
  310. #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
  311. #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
  312. /* these are the error bits in the tid flows, and are W1C */
  313. #define TIDFLOW_ERRBITS ( \
  314. (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
  315. SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
  316. (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
  317. SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
  318. /* Most (not all) Counters are per-IBport.
  319. * Requires LBIntCnt is at offset 0 in the group
  320. */
  321. #define CREG_IDX(regname) \
  322. ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
  323. #define crp_badformat CREG_IDX(RxVersionErrCnt)
  324. #define crp_err_rlen CREG_IDX(RxLenErrCnt)
  325. #define crp_erricrc CREG_IDX(RxICRCErrCnt)
  326. #define crp_errlink CREG_IDX(RxLinkMalformCnt)
  327. #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
  328. #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
  329. #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
  330. #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
  331. #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
  332. #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
  333. #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
  334. #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
  335. #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
  336. #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
  337. #define crp_pktrcv CREG_IDX(RxDataPktCnt)
  338. #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
  339. #define crp_pktsend CREG_IDX(TxDataPktCnt)
  340. #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
  341. #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
  342. #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
  343. #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
  344. #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
  345. #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
  346. #define crp_rcvebp CREG_IDX(RxEBPCnt)
  347. #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
  348. #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
  349. #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
  350. #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
  351. #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
  352. #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
  353. #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
  354. #define crp_sendstall CREG_IDX(TxFlowStallCnt)
  355. #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
  356. #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
  357. #define crp_txlenerr CREG_IDX(TxLenErrCnt)
  358. #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
  359. #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
  360. #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
  361. #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
  362. #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
  363. #define crp_wordrcv CREG_IDX(RxDwordCnt)
  364. #define crp_wordsend CREG_IDX(TxDwordCnt)
  365. #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
  366. /* these are the (few) counters that are not port-specific */
  367. #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
  368. QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
  369. #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
  370. #define cr_lbint CREG_DEVIDX(LBIntCnt)
  371. #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
  372. #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
  373. #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
  374. #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
  375. #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
  376. /* no chip register for # of IB ports supported, so define */
  377. #define NUM_IB_PORTS 2
  378. /* 1 VL15 buffer per hardware IB port, no register for this, so define */
  379. #define NUM_VL15_BUFS NUM_IB_PORTS
  380. /*
  381. * context 0 and 1 are special, and there is no chip register that
  382. * defines this value, so we have to define it here.
  383. * These are all allocated to either 0 or 1 for single port
  384. * hardware configuration, otherwise each gets half
  385. */
  386. #define KCTXT0_EGRCNT 2048
  387. /* values for vl and port fields in PBC, 7322-specific */
  388. #define PBC_PORT_SEL_LSB 26
  389. #define PBC_PORT_SEL_RMASK 1
  390. #define PBC_VL_NUM_LSB 27
  391. #define PBC_VL_NUM_RMASK 7
  392. #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
  393. #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
  394. static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
  395. [IB_RATE_2_5_GBPS] = 16,
  396. [IB_RATE_5_GBPS] = 8,
  397. [IB_RATE_10_GBPS] = 4,
  398. [IB_RATE_20_GBPS] = 2,
  399. [IB_RATE_30_GBPS] = 2,
  400. [IB_RATE_40_GBPS] = 1
  401. };
  402. #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
  403. #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
  404. /* link training states, from IBC */
  405. #define IB_7322_LT_STATE_DISABLED 0x00
  406. #define IB_7322_LT_STATE_LINKUP 0x01
  407. #define IB_7322_LT_STATE_POLLACTIVE 0x02
  408. #define IB_7322_LT_STATE_POLLQUIET 0x03
  409. #define IB_7322_LT_STATE_SLEEPDELAY 0x04
  410. #define IB_7322_LT_STATE_SLEEPQUIET 0x05
  411. #define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
  412. #define IB_7322_LT_STATE_CFGRCVFCFG 0x09
  413. #define IB_7322_LT_STATE_CFGWAITRMT 0x0a
  414. #define IB_7322_LT_STATE_CFGIDLE 0x0b
  415. #define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
  416. #define IB_7322_LT_STATE_TXREVLANES 0x0d
  417. #define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
  418. #define IB_7322_LT_STATE_RECOVERIDLE 0x0f
  419. #define IB_7322_LT_STATE_CFGENH 0x10
  420. #define IB_7322_LT_STATE_CFGTEST 0x11
  421. #define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
  422. #define IB_7322_LT_STATE_CFGWAITENH 0x13
  423. /* link state machine states from IBC */
  424. #define IB_7322_L_STATE_DOWN 0x0
  425. #define IB_7322_L_STATE_INIT 0x1
  426. #define IB_7322_L_STATE_ARM 0x2
  427. #define IB_7322_L_STATE_ACTIVE 0x3
  428. #define IB_7322_L_STATE_ACT_DEFER 0x4
  429. static const u8 qib_7322_physportstate[0x20] = {
  430. [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
  431. [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
  432. [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
  433. [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
  434. [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
  435. [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
  436. [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
  437. [IB_7322_LT_STATE_CFGRCVFCFG] =
  438. IB_PHYSPORTSTATE_CFG_TRAIN,
  439. [IB_7322_LT_STATE_CFGWAITRMT] =
  440. IB_PHYSPORTSTATE_CFG_TRAIN,
  441. [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
  442. [IB_7322_LT_STATE_RECOVERRETRAIN] =
  443. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  444. [IB_7322_LT_STATE_RECOVERWAITRMT] =
  445. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  446. [IB_7322_LT_STATE_RECOVERIDLE] =
  447. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  448. [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
  449. [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
  450. [IB_7322_LT_STATE_CFGWAITRMTTEST] =
  451. IB_PHYSPORTSTATE_CFG_TRAIN,
  452. [IB_7322_LT_STATE_CFGWAITENH] =
  453. IB_PHYSPORTSTATE_CFG_WAIT_ENH,
  454. [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
  455. [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
  456. [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
  457. [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
  458. };
  459. struct qib_chip_specific {
  460. u64 __iomem *cregbase;
  461. u64 *cntrs;
  462. spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
  463. spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
  464. u64 main_int_mask; /* clear bits which have dedicated handlers */
  465. u64 int_enable_mask; /* for per port interrupts in single port mode */
  466. u64 errormask;
  467. u64 hwerrmask;
  468. u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
  469. u64 gpio_mask; /* shadow the gpio mask register */
  470. u64 extctrl; /* shadow the gpio output enable, etc... */
  471. u32 ncntrs;
  472. u32 nportcntrs;
  473. u32 cntrnamelen;
  474. u32 portcntrnamelen;
  475. u32 numctxts;
  476. u32 rcvegrcnt;
  477. u32 updthresh; /* current AvailUpdThld */
  478. u32 updthresh_dflt; /* default AvailUpdThld */
  479. u32 r1;
  480. int irq;
  481. u32 num_msix_entries;
  482. u32 sdmabufcnt;
  483. u32 lastbuf_for_pio;
  484. u32 stay_in_freeze;
  485. u32 recovery_ports_initted;
  486. struct msix_entry *msix_entries;
  487. void **msix_arg;
  488. unsigned long *sendchkenable;
  489. unsigned long *sendgrhchk;
  490. unsigned long *sendibchk;
  491. u32 rcvavail_timeout[18];
  492. char emsgbuf[128]; /* for device error interrupt msg buffer */
  493. };
  494. /* Table of entries in "human readable" form Tx Emphasis. */
  495. struct txdds_ent {
  496. u8 amp;
  497. u8 pre;
  498. u8 main;
  499. u8 post;
  500. };
  501. struct vendor_txdds_ent {
  502. u8 oui[QSFP_VOUI_LEN];
  503. u8 *partnum;
  504. struct txdds_ent sdr;
  505. struct txdds_ent ddr;
  506. struct txdds_ent qdr;
  507. };
  508. static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
  509. #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
  510. #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
  511. #define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
  512. #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
  513. #define H1_FORCE_VAL 8
  514. #define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
  515. #define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
  516. /* The static and dynamic registers are paired, and the pairs indexed by spd */
  517. #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
  518. + ((spd) * 2))
  519. #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
  520. #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
  521. #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
  522. #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
  523. #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
  524. struct qib_chippport_specific {
  525. u64 __iomem *kpregbase;
  526. u64 __iomem *cpregbase;
  527. u64 *portcntrs;
  528. struct qib_pportdata *ppd;
  529. wait_queue_head_t autoneg_wait;
  530. struct delayed_work autoneg_work;
  531. struct delayed_work ipg_work;
  532. struct timer_list chase_timer;
  533. /*
  534. * these 5 fields are used to establish deltas for IB symbol
  535. * errors and linkrecovery errors. They can be reported on
  536. * some chips during link negotiation prior to INIT, and with
  537. * DDR when faking DDR negotiations with non-IBTA switches.
  538. * The chip counters are adjusted at driver unload if there is
  539. * a non-zero delta.
  540. */
  541. u64 ibdeltainprog;
  542. u64 ibsymdelta;
  543. u64 ibsymsnap;
  544. u64 iblnkerrdelta;
  545. u64 iblnkerrsnap;
  546. u64 iblnkdownsnap;
  547. u64 iblnkdowndelta;
  548. u64 ibmalfdelta;
  549. u64 ibmalfsnap;
  550. u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
  551. u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
  552. u64 qdr_dfe_time;
  553. u64 chase_end;
  554. u32 autoneg_tries;
  555. u32 recovery_init;
  556. u32 qdr_dfe_on;
  557. u32 qdr_reforce;
  558. /*
  559. * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
  560. * entry zero is unused, to simplify indexing
  561. */
  562. u8 h1_val;
  563. u8 no_eep; /* txselect table index to use if no qsfp info */
  564. u8 ipg_tries;
  565. u8 ibmalfusesnap;
  566. struct qib_qsfp_data qsfp_data;
  567. char epmsgbuf[192]; /* for port error interrupt msg buffer */
  568. };
  569. static struct {
  570. const char *name;
  571. irq_handler_t handler;
  572. int lsb;
  573. int port; /* 0 if not port-specific, else port # */
  574. } irq_table[] = {
  575. { QIB_DRV_NAME, qib_7322intr, -1, 0 },
  576. { QIB_DRV_NAME " (buf avail)", qib_7322bufavail,
  577. SYM_LSB(IntStatus, SendBufAvail), 0 },
  578. { QIB_DRV_NAME " (sdma 0)", sdma_intr,
  579. SYM_LSB(IntStatus, SDmaInt_0), 1 },
  580. { QIB_DRV_NAME " (sdma 1)", sdma_intr,
  581. SYM_LSB(IntStatus, SDmaInt_1), 2 },
  582. { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr,
  583. SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
  584. { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr,
  585. SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
  586. { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr,
  587. SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
  588. { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr,
  589. SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
  590. { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr,
  591. SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
  592. { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr,
  593. SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
  594. };
  595. /* ibcctrl bits */
  596. #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
  597. /* cycle through TS1/TS2 till OK */
  598. #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
  599. /* wait for TS1, then go on */
  600. #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
  601. #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
  602. #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
  603. #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
  604. #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
  605. #define BLOB_7322_IBCHG 0x101
  606. static inline void qib_write_kreg(const struct qib_devdata *dd,
  607. const u32 regno, u64 value);
  608. static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
  609. static void write_7322_initregs(struct qib_devdata *);
  610. static void write_7322_init_portregs(struct qib_pportdata *);
  611. static void setup_7322_link_recovery(struct qib_pportdata *, u32);
  612. static void check_7322_rxe_status(struct qib_pportdata *);
  613. static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
  614. /**
  615. * qib_read_ureg32 - read 32-bit virtualized per-context register
  616. * @dd: device
  617. * @regno: register number
  618. * @ctxt: context number
  619. *
  620. * Return the contents of a register that is virtualized to be per context.
  621. * Returns -1 on errors (not distinguishable from valid contents at
  622. * runtime; we may add a separate error variable at some point).
  623. */
  624. static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
  625. enum qib_ureg regno, int ctxt)
  626. {
  627. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  628. return 0;
  629. return readl(regno + (u64 __iomem *)(
  630. (dd->ureg_align * ctxt) + (dd->userbase ?
  631. (char __iomem *)dd->userbase :
  632. (char __iomem *)dd->kregbase + dd->uregbase)));
  633. }
  634. /**
  635. * qib_read_ureg - read virtualized per-context register
  636. * @dd: device
  637. * @regno: register number
  638. * @ctxt: context number
  639. *
  640. * Return the contents of a register that is virtualized to be per context.
  641. * Returns -1 on errors (not distinguishable from valid contents at
  642. * runtime; we may add a separate error variable at some point).
  643. */
  644. static inline u64 qib_read_ureg(const struct qib_devdata *dd,
  645. enum qib_ureg regno, int ctxt)
  646. {
  647. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  648. return 0;
  649. return readq(regno + (u64 __iomem *)(
  650. (dd->ureg_align * ctxt) + (dd->userbase ?
  651. (char __iomem *)dd->userbase :
  652. (char __iomem *)dd->kregbase + dd->uregbase)));
  653. }
  654. /**
  655. * qib_write_ureg - write virtualized per-context register
  656. * @dd: device
  657. * @regno: register number
  658. * @value: value
  659. * @ctxt: context
  660. *
  661. * Write the contents of a register that is virtualized to be per context.
  662. */
  663. static inline void qib_write_ureg(const struct qib_devdata *dd,
  664. enum qib_ureg regno, u64 value, int ctxt)
  665. {
  666. u64 __iomem *ubase;
  667. if (dd->userbase)
  668. ubase = (u64 __iomem *)
  669. ((char __iomem *) dd->userbase +
  670. dd->ureg_align * ctxt);
  671. else
  672. ubase = (u64 __iomem *)
  673. (dd->uregbase +
  674. (char __iomem *) dd->kregbase +
  675. dd->ureg_align * ctxt);
  676. if (dd->kregbase && (dd->flags & QIB_PRESENT))
  677. writeq(value, &ubase[regno]);
  678. }
  679. static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
  680. const u32 regno)
  681. {
  682. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  683. return -1;
  684. return readl((u32 __iomem *) &dd->kregbase[regno]);
  685. }
  686. static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
  687. const u32 regno)
  688. {
  689. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  690. return -1;
  691. return readq(&dd->kregbase[regno]);
  692. }
  693. static inline void qib_write_kreg(const struct qib_devdata *dd,
  694. const u32 regno, u64 value)
  695. {
  696. if (dd->kregbase && (dd->flags & QIB_PRESENT))
  697. writeq(value, &dd->kregbase[regno]);
  698. }
  699. /*
  700. * not many sanity checks for the port-specific kernel register routines,
  701. * since they are only used when it's known to be safe.
  702. */
  703. static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
  704. const u16 regno)
  705. {
  706. if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
  707. return 0ULL;
  708. return readq(&ppd->cpspec->kpregbase[regno]);
  709. }
  710. static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
  711. const u16 regno, u64 value)
  712. {
  713. if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
  714. (ppd->dd->flags & QIB_PRESENT))
  715. writeq(value, &ppd->cpspec->kpregbase[regno]);
  716. }
  717. /**
  718. * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
  719. * @dd: the qlogic_ib device
  720. * @regno: the register number to write
  721. * @ctxt: the context containing the register
  722. * @value: the value to write
  723. */
  724. static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
  725. const u16 regno, unsigned ctxt,
  726. u64 value)
  727. {
  728. qib_write_kreg(dd, regno + ctxt, value);
  729. }
  730. static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
  731. {
  732. if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
  733. return 0;
  734. return readq(&dd->cspec->cregbase[regno]);
  735. }
  736. static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
  737. {
  738. if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
  739. return 0;
  740. return readl(&dd->cspec->cregbase[regno]);
  741. }
  742. static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
  743. u16 regno, u64 value)
  744. {
  745. if (ppd->cpspec && ppd->cpspec->cpregbase &&
  746. (ppd->dd->flags & QIB_PRESENT))
  747. writeq(value, &ppd->cpspec->cpregbase[regno]);
  748. }
  749. static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
  750. u16 regno)
  751. {
  752. if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
  753. !(ppd->dd->flags & QIB_PRESENT))
  754. return 0;
  755. return readq(&ppd->cpspec->cpregbase[regno]);
  756. }
  757. static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
  758. u16 regno)
  759. {
  760. if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
  761. !(ppd->dd->flags & QIB_PRESENT))
  762. return 0;
  763. return readl(&ppd->cpspec->cpregbase[regno]);
  764. }
  765. /* bits in Control register */
  766. #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
  767. #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
  768. /* bits in general interrupt regs */
  769. #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
  770. #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
  771. #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
  772. #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
  773. #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
  774. #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
  775. #define QIB_I_C_ERROR INT_MASK(Err)
  776. #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
  777. #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
  778. #define QIB_I_GPIO INT_MASK(AssertGPIO)
  779. #define QIB_I_P_SDMAINT(pidx) \
  780. (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
  781. INT_MASK_P(SDmaProgress, pidx) | \
  782. INT_MASK_PM(SDmaCleanupDone, pidx))
  783. /* Interrupt bits that are "per port" */
  784. #define QIB_I_P_BITSEXTANT(pidx) \
  785. (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
  786. INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
  787. INT_MASK_P(SDmaProgress, pidx) | \
  788. INT_MASK_PM(SDmaCleanupDone, pidx))
  789. /* Interrupt bits that are common to a device */
  790. /* currently unused: QIB_I_SPIOSENT */
  791. #define QIB_I_C_BITSEXTANT \
  792. (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
  793. QIB_I_SPIOSENT | \
  794. QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
  795. #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
  796. QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
  797. /*
  798. * Error bits that are "per port".
  799. */
  800. #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
  801. #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
  802. #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
  803. #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
  804. #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
  805. #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
  806. #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
  807. #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
  808. #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
  809. #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
  810. #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
  811. #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
  812. #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
  813. #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
  814. #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
  815. #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
  816. #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
  817. #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
  818. #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
  819. #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
  820. #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
  821. #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
  822. #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
  823. #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
  824. #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
  825. #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
  826. #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
  827. #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
  828. #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
  829. #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
  830. #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
  831. #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
  832. #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
  833. #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
  834. #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
  835. #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
  836. #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
  837. #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
  838. #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
  839. /* Error bits that are common to a device */
  840. #define QIB_E_RESET ERR_MASK(ResetNegated)
  841. #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
  842. #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
  843. /*
  844. * Per chip (rather than per-port) errors. Most either do
  845. * nothing but trigger a print (because they self-recover, or
  846. * always occur in tandem with other errors that handle the
  847. * issue), or because they indicate errors with no recovery,
  848. * but we want to know that they happened.
  849. */
  850. #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
  851. #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
  852. #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
  853. #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
  854. #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
  855. #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
  856. #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
  857. #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
  858. /* SDMA chip errors (not per port)
  859. * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
  860. * the SDMAHALT error immediately, so we just print the dup error via the
  861. * E_AUTO mechanism. This is true of most of the per-port fatal errors
  862. * as well, but since this is port-independent, by definition, it's
  863. * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
  864. * packet send errors, and so are handled in the same manner as other
  865. * per-packet errors.
  866. */
  867. #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
  868. #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
  869. #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
  870. /*
  871. * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
  872. * it is used to print "common" packet errors.
  873. */
  874. #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
  875. QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
  876. QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
  877. QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
  878. QIB_E_P_REBP)
  879. /* Error Bits that Packet-related (Receive, per-port) */
  880. #define QIB_E_P_RPKTERRS (\
  881. QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
  882. QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
  883. QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
  884. QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
  885. QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
  886. QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
  887. /*
  888. * Error bits that are Send-related (per port)
  889. * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
  890. * All of these potentially need to have a buffer disarmed
  891. */
  892. #define QIB_E_P_SPKTERRS (\
  893. QIB_E_P_SUNEXP_PKTNUM |\
  894. QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
  895. QIB_E_P_SMAXPKTLEN |\
  896. QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
  897. QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
  898. QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
  899. #define QIB_E_SPKTERRS ( \
  900. QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
  901. ERR_MASK_N(SendUnsupportedVLErr) | \
  902. QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
  903. #define QIB_E_P_SDMAERRS ( \
  904. QIB_E_P_SDMAHALT | \
  905. QIB_E_P_SDMADESCADDRMISALIGN | \
  906. QIB_E_P_SDMAUNEXPDATA | \
  907. QIB_E_P_SDMAMISSINGDW | \
  908. QIB_E_P_SDMADWEN | \
  909. QIB_E_P_SDMARPYTAG | \
  910. QIB_E_P_SDMA1STDESC | \
  911. QIB_E_P_SDMABASE | \
  912. QIB_E_P_SDMATAILOUTOFBOUND | \
  913. QIB_E_P_SDMAOUTOFBOUND | \
  914. QIB_E_P_SDMAGENMISMATCH)
  915. /*
  916. * This sets some bits more than once, but makes it more obvious which
  917. * bits are not handled under other categories, and the repeat definition
  918. * is not a problem.
  919. */
  920. #define QIB_E_P_BITSEXTANT ( \
  921. QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
  922. QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
  923. QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
  924. QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
  925. )
  926. /*
  927. * These are errors that can occur when the link
  928. * changes state while a packet is being sent or received. This doesn't
  929. * cover things like EBP or VCRC that can be the result of a sending
  930. * having the link change state, so we receive a "known bad" packet.
  931. * All of these are "per port", so renamed:
  932. */
  933. #define QIB_E_P_LINK_PKTERRS (\
  934. QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
  935. QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
  936. QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
  937. QIB_E_P_RUNEXPCHAR)
  938. /*
  939. * This sets some bits more than once, but makes it more obvious which
  940. * bits are not handled under other categories (such as QIB_E_SPKTERRS),
  941. * and the repeat definition is not a problem.
  942. */
  943. #define QIB_E_C_BITSEXTANT (\
  944. QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
  945. QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
  946. QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
  947. /* Likewise Neuter E_SPKT_ERRS_IGNORE */
  948. #define E_SPKT_ERRS_IGNORE 0
  949. #define QIB_EXTS_MEMBIST_DISABLED \
  950. SYM_MASK(EXTStatus, MemBISTDisabled)
  951. #define QIB_EXTS_MEMBIST_ENDTEST \
  952. SYM_MASK(EXTStatus, MemBISTEndTest)
  953. #define QIB_E_SPIOARMLAUNCH \
  954. ERR_MASK(SendArmLaunchErr)
  955. #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
  956. #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
  957. /*
  958. * IBTA_1_2 is set when multiple speeds are enabled (normal),
  959. * and also if forced QDR (only QDR enabled). It's enabled for the
  960. * forced QDR case so that scrambling will be enabled by the TS3
  961. * exchange, when supported by both sides of the link.
  962. */
  963. #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
  964. #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
  965. #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
  966. #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
  967. #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
  968. #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
  969. SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
  970. #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
  971. #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
  972. #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
  973. #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
  974. #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
  975. #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
  976. #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
  977. #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
  978. #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
  979. SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
  980. #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
  981. SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
  982. #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
  983. #define IBA7322_REDIRECT_VEC_PER_REG 12
  984. #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
  985. #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
  986. #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
  987. #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
  988. #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
  989. #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
  990. #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
  991. .msg = #fldname , .sz = sizeof(#fldname) }
  992. #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
  993. fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
  994. static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
  995. HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
  996. HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
  997. HWE_AUTO(PCIESerdesPClkNotDetect),
  998. HWE_AUTO(PowerOnBISTFailed),
  999. HWE_AUTO(TempsenseTholdReached),
  1000. HWE_AUTO(MemoryErr),
  1001. HWE_AUTO(PCIeBusParityErr),
  1002. HWE_AUTO(PcieCplTimeout),
  1003. HWE_AUTO(PciePoisonedTLP),
  1004. HWE_AUTO_P(SDmaMemReadErr, 1),
  1005. HWE_AUTO_P(SDmaMemReadErr, 0),
  1006. HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
  1007. HWE_AUTO_P(IBCBusToSPCParityErr, 1),
  1008. HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
  1009. HWE_AUTO(statusValidNoEop),
  1010. HWE_AUTO(LATriggered),
  1011. { .mask = 0, .sz = 0 }
  1012. };
  1013. #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
  1014. .msg = #fldname, .sz = sizeof(#fldname) }
  1015. #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
  1016. .msg = #fldname, .sz = sizeof(#fldname) }
  1017. static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
  1018. E_AUTO(RcvEgrFullErr),
  1019. E_AUTO(RcvHdrFullErr),
  1020. E_AUTO(ResetNegated),
  1021. E_AUTO(HardwareErr),
  1022. E_AUTO(InvalidAddrErr),
  1023. E_AUTO(SDmaVL15Err),
  1024. E_AUTO(SBufVL15MisUseErr),
  1025. E_AUTO(InvalidEEPCmd),
  1026. E_AUTO(RcvContextShareErr),
  1027. E_AUTO(SendVLMismatchErr),
  1028. E_AUTO(SendArmLaunchErr),
  1029. E_AUTO(SendSpecialTriggerErr),
  1030. E_AUTO(SDmaWrongPortErr),
  1031. E_AUTO(SDmaBufMaskDuplicateErr),
  1032. { .mask = 0, .sz = 0 }
  1033. };
  1034. static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
  1035. E_P_AUTO(IBStatusChanged),
  1036. E_P_AUTO(SHeadersErr),
  1037. E_P_AUTO(VL15BufMisuseErr),
  1038. /*
  1039. * SDmaHaltErr is not really an error, make it clearer;
  1040. */
  1041. {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
  1042. .sz = 11},
  1043. E_P_AUTO(SDmaDescAddrMisalignErr),
  1044. E_P_AUTO(SDmaUnexpDataErr),
  1045. E_P_AUTO(SDmaMissingDwErr),
  1046. E_P_AUTO(SDmaDwEnErr),
  1047. E_P_AUTO(SDmaRpyTagErr),
  1048. E_P_AUTO(SDma1stDescErr),
  1049. E_P_AUTO(SDmaBaseErr),
  1050. E_P_AUTO(SDmaTailOutOfBoundErr),
  1051. E_P_AUTO(SDmaOutOfBoundErr),
  1052. E_P_AUTO(SDmaGenMismatchErr),
  1053. E_P_AUTO(SendBufMisuseErr),
  1054. E_P_AUTO(SendUnsupportedVLErr),
  1055. E_P_AUTO(SendUnexpectedPktNumErr),
  1056. E_P_AUTO(SendDroppedDataPktErr),
  1057. E_P_AUTO(SendDroppedSmpPktErr),
  1058. E_P_AUTO(SendPktLenErr),
  1059. E_P_AUTO(SendUnderRunErr),
  1060. E_P_AUTO(SendMaxPktLenErr),
  1061. E_P_AUTO(SendMinPktLenErr),
  1062. E_P_AUTO(RcvIBLostLinkErr),
  1063. E_P_AUTO(RcvHdrErr),
  1064. E_P_AUTO(RcvHdrLenErr),
  1065. E_P_AUTO(RcvBadTidErr),
  1066. E_P_AUTO(RcvBadVersionErr),
  1067. E_P_AUTO(RcvIBFlowErr),
  1068. E_P_AUTO(RcvEBPErr),
  1069. E_P_AUTO(RcvUnsupportedVLErr),
  1070. E_P_AUTO(RcvUnexpectedCharErr),
  1071. E_P_AUTO(RcvShortPktLenErr),
  1072. E_P_AUTO(RcvLongPktLenErr),
  1073. E_P_AUTO(RcvMaxPktLenErr),
  1074. E_P_AUTO(RcvMinPktLenErr),
  1075. E_P_AUTO(RcvICRCErr),
  1076. E_P_AUTO(RcvVCRCErr),
  1077. E_P_AUTO(RcvFormatErr),
  1078. { .mask = 0, .sz = 0 }
  1079. };
  1080. /*
  1081. * Below generates "auto-message" for interrupts not specific to any port or
  1082. * context
  1083. */
  1084. #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
  1085. .msg = #fldname, .sz = sizeof(#fldname) }
  1086. /* Below generates "auto-message" for interrupts specific to a port */
  1087. #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
  1088. SYM_LSB(IntMask, fldname##Mask##_0), \
  1089. SYM_LSB(IntMask, fldname##Mask##_1)), \
  1090. .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
  1091. /* For some reason, the SerDesTrimDone bits are reversed */
  1092. #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
  1093. SYM_LSB(IntMask, fldname##Mask##_1), \
  1094. SYM_LSB(IntMask, fldname##Mask##_0)), \
  1095. .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
  1096. /*
  1097. * Below generates "auto-message" for interrupts specific to a context,
  1098. * with ctxt-number appended
  1099. */
  1100. #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
  1101. SYM_LSB(IntMask, fldname##0IntMask), \
  1102. SYM_LSB(IntMask, fldname##17IntMask)), \
  1103. .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
  1104. static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
  1105. INTR_AUTO_P(SDmaInt),
  1106. INTR_AUTO_P(SDmaProgressInt),
  1107. INTR_AUTO_P(SDmaIdleInt),
  1108. INTR_AUTO_P(SDmaCleanupDone),
  1109. INTR_AUTO_C(RcvUrg),
  1110. INTR_AUTO_P(ErrInt),
  1111. INTR_AUTO(ErrInt), /* non-port-specific errs */
  1112. INTR_AUTO(AssertGPIOInt),
  1113. INTR_AUTO_P(SendDoneInt),
  1114. INTR_AUTO(SendBufAvailInt),
  1115. INTR_AUTO_C(RcvAvail),
  1116. { .mask = 0, .sz = 0 }
  1117. };
  1118. #define TXSYMPTOM_AUTO_P(fldname) \
  1119. { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
  1120. .msg = #fldname, .sz = sizeof(#fldname) }
  1121. static const struct qib_hwerror_msgs hdrchk_msgs[] = {
  1122. TXSYMPTOM_AUTO_P(NonKeyPacket),
  1123. TXSYMPTOM_AUTO_P(GRHFail),
  1124. TXSYMPTOM_AUTO_P(PkeyFail),
  1125. TXSYMPTOM_AUTO_P(QPFail),
  1126. TXSYMPTOM_AUTO_P(SLIDFail),
  1127. TXSYMPTOM_AUTO_P(RawIPV6),
  1128. TXSYMPTOM_AUTO_P(PacketTooSmall),
  1129. { .mask = 0, .sz = 0 }
  1130. };
  1131. #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
  1132. /*
  1133. * Called when we might have an error that is specific to a particular
  1134. * PIO buffer, and may need to cancel that buffer, so it can be re-used,
  1135. * because we don't need to force the update of pioavail
  1136. */
  1137. static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
  1138. {
  1139. struct qib_devdata *dd = ppd->dd;
  1140. u32 i;
  1141. int any;
  1142. u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  1143. u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
  1144. unsigned long sbuf[4];
  1145. /*
  1146. * It's possible that sendbuffererror could have bits set; might
  1147. * have already done this as a result of hardware error handling.
  1148. */
  1149. any = 0;
  1150. for (i = 0; i < regcnt; ++i) {
  1151. sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
  1152. if (sbuf[i]) {
  1153. any = 1;
  1154. qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
  1155. }
  1156. }
  1157. if (any)
  1158. qib_disarm_piobufs_set(dd, sbuf, piobcnt);
  1159. }
  1160. /* No txe_recover yet, if ever */
  1161. /* No decode__errors yet */
  1162. static void err_decode(char *msg, size_t len, u64 errs,
  1163. const struct qib_hwerror_msgs *msp)
  1164. {
  1165. u64 these, lmask;
  1166. int took, multi, n = 0;
  1167. while (errs && msp && msp->mask) {
  1168. multi = (msp->mask & (msp->mask - 1));
  1169. while (errs & msp->mask) {
  1170. these = (errs & msp->mask);
  1171. lmask = (these & (these - 1)) ^ these;
  1172. if (len) {
  1173. if (n++) {
  1174. /* separate the strings */
  1175. *msg++ = ',';
  1176. len--;
  1177. }
  1178. BUG_ON(!msp->sz);
  1179. /* msp->sz counts the nul */
  1180. took = min_t(size_t, msp->sz - (size_t)1, len);
  1181. memcpy(msg, msp->msg, took);
  1182. len -= took;
  1183. msg += took;
  1184. if (len)
  1185. *msg = '\0';
  1186. }
  1187. errs &= ~lmask;
  1188. if (len && multi) {
  1189. /* More than one bit this mask */
  1190. int idx = -1;
  1191. while (lmask & msp->mask) {
  1192. ++idx;
  1193. lmask >>= 1;
  1194. }
  1195. took = scnprintf(msg, len, "_%d", idx);
  1196. len -= took;
  1197. msg += took;
  1198. }
  1199. }
  1200. ++msp;
  1201. }
  1202. /* If some bits are left, show in hex. */
  1203. if (len && errs)
  1204. snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
  1205. (unsigned long long) errs);
  1206. }
  1207. /* only called if r1 set */
  1208. static void flush_fifo(struct qib_pportdata *ppd)
  1209. {
  1210. struct qib_devdata *dd = ppd->dd;
  1211. u32 __iomem *piobuf;
  1212. u32 bufn;
  1213. u32 *hdr;
  1214. u64 pbc;
  1215. const unsigned hdrwords = 7;
  1216. static struct qib_ib_header ibhdr = {
  1217. .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
  1218. .lrh[1] = IB_LID_PERMISSIVE,
  1219. .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
  1220. .lrh[3] = IB_LID_PERMISSIVE,
  1221. .u.oth.bth[0] = cpu_to_be32(
  1222. (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
  1223. .u.oth.bth[1] = cpu_to_be32(0),
  1224. .u.oth.bth[2] = cpu_to_be32(0),
  1225. .u.oth.u.ud.deth[0] = cpu_to_be32(0),
  1226. .u.oth.u.ud.deth[1] = cpu_to_be32(0),
  1227. };
  1228. /*
  1229. * Send a dummy VL15 packet to flush the launch FIFO.
  1230. * This will not actually be sent since the TxeBypassIbc bit is set.
  1231. */
  1232. pbc = PBC_7322_VL15_SEND |
  1233. (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
  1234. (hdrwords + SIZE_OF_CRC);
  1235. piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
  1236. if (!piobuf)
  1237. return;
  1238. writeq(pbc, piobuf);
  1239. hdr = (u32 *) &ibhdr;
  1240. if (dd->flags & QIB_PIO_FLUSH_WC) {
  1241. qib_flush_wc();
  1242. qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
  1243. qib_flush_wc();
  1244. __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
  1245. qib_flush_wc();
  1246. } else
  1247. qib_pio_copy(piobuf + 2, hdr, hdrwords);
  1248. qib_sendbuf_done(dd, bufn);
  1249. }
  1250. /*
  1251. * This is called with interrupts disabled and sdma_lock held.
  1252. */
  1253. static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
  1254. {
  1255. struct qib_devdata *dd = ppd->dd;
  1256. u64 set_sendctrl = 0;
  1257. u64 clr_sendctrl = 0;
  1258. if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
  1259. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
  1260. else
  1261. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
  1262. if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
  1263. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
  1264. else
  1265. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
  1266. if (op & QIB_SDMA_SENDCTRL_OP_HALT)
  1267. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
  1268. else
  1269. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
  1270. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
  1271. set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
  1272. SYM_MASK(SendCtrl_0, TxeAbortIbc) |
  1273. SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
  1274. else
  1275. clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
  1276. SYM_MASK(SendCtrl_0, TxeAbortIbc) |
  1277. SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
  1278. spin_lock(&dd->sendctrl_lock);
  1279. /* If we are draining everything, block sends first */
  1280. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
  1281. ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
  1282. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1283. qib_write_kreg(dd, kr_scratch, 0);
  1284. }
  1285. ppd->p_sendctrl |= set_sendctrl;
  1286. ppd->p_sendctrl &= ~clr_sendctrl;
  1287. if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
  1288. qib_write_kreg_port(ppd, krp_sendctrl,
  1289. ppd->p_sendctrl |
  1290. SYM_MASK(SendCtrl_0, SDmaCleanup));
  1291. else
  1292. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1293. qib_write_kreg(dd, kr_scratch, 0);
  1294. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
  1295. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
  1296. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1297. qib_write_kreg(dd, kr_scratch, 0);
  1298. }
  1299. spin_unlock(&dd->sendctrl_lock);
  1300. if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
  1301. flush_fifo(ppd);
  1302. }
  1303. static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
  1304. {
  1305. __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
  1306. }
  1307. static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
  1308. {
  1309. /*
  1310. * Set SendDmaLenGen and clear and set
  1311. * the MSB of the generation count to enable generation checking
  1312. * and load the internal generation counter.
  1313. */
  1314. qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
  1315. qib_write_kreg_port(ppd, krp_senddmalengen,
  1316. ppd->sdma_descq_cnt |
  1317. (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
  1318. }
  1319. /*
  1320. * Must be called with sdma_lock held, or before init finished.
  1321. */
  1322. static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
  1323. {
  1324. /* Commit writes to memory and advance the tail on the chip */
  1325. wmb();
  1326. ppd->sdma_descq_tail = tail;
  1327. qib_write_kreg_port(ppd, krp_senddmatail, tail);
  1328. }
  1329. /*
  1330. * This is called with interrupts disabled and sdma_lock held.
  1331. */
  1332. static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
  1333. {
  1334. /*
  1335. * Drain all FIFOs.
  1336. * The hardware doesn't require this but we do it so that verbs
  1337. * and user applications don't wait for link active to send stale
  1338. * data.
  1339. */
  1340. sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
  1341. qib_sdma_7322_setlengen(ppd);
  1342. qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
  1343. ppd->sdma_head_dma[0] = 0;
  1344. qib_7322_sdma_sendctrl(ppd,
  1345. ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
  1346. }
  1347. #define DISABLES_SDMA ( \
  1348. QIB_E_P_SDMAHALT | \
  1349. QIB_E_P_SDMADESCADDRMISALIGN | \
  1350. QIB_E_P_SDMAMISSINGDW | \
  1351. QIB_E_P_SDMADWEN | \
  1352. QIB_E_P_SDMARPYTAG | \
  1353. QIB_E_P_SDMA1STDESC | \
  1354. QIB_E_P_SDMABASE | \
  1355. QIB_E_P_SDMATAILOUTOFBOUND | \
  1356. QIB_E_P_SDMAOUTOFBOUND | \
  1357. QIB_E_P_SDMAGENMISMATCH)
  1358. static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
  1359. {
  1360. unsigned long flags;
  1361. struct qib_devdata *dd = ppd->dd;
  1362. errs &= QIB_E_P_SDMAERRS;
  1363. if (errs & QIB_E_P_SDMAUNEXPDATA)
  1364. qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
  1365. ppd->port);
  1366. spin_lock_irqsave(&ppd->sdma_lock, flags);
  1367. switch (ppd->sdma_state.current_state) {
  1368. case qib_sdma_state_s00_hw_down:
  1369. break;
  1370. case qib_sdma_state_s10_hw_start_up_wait:
  1371. if (errs & QIB_E_P_SDMAHALT)
  1372. __qib_sdma_process_event(ppd,
  1373. qib_sdma_event_e20_hw_started);
  1374. break;
  1375. case qib_sdma_state_s20_idle:
  1376. break;
  1377. case qib_sdma_state_s30_sw_clean_up_wait:
  1378. break;
  1379. case qib_sdma_state_s40_hw_clean_up_wait:
  1380. if (errs & QIB_E_P_SDMAHALT)
  1381. __qib_sdma_process_event(ppd,
  1382. qib_sdma_event_e50_hw_cleaned);
  1383. break;
  1384. case qib_sdma_state_s50_hw_halt_wait:
  1385. if (errs & QIB_E_P_SDMAHALT)
  1386. __qib_sdma_process_event(ppd,
  1387. qib_sdma_event_e60_hw_halted);
  1388. break;
  1389. case qib_sdma_state_s99_running:
  1390. __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
  1391. __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
  1392. break;
  1393. }
  1394. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  1395. }
  1396. /*
  1397. * handle per-device errors (not per-port errors)
  1398. */
  1399. static noinline void handle_7322_errors(struct qib_devdata *dd)
  1400. {
  1401. char *msg;
  1402. u64 iserr = 0;
  1403. u64 errs;
  1404. u64 mask;
  1405. int log_idx;
  1406. qib_stats.sps_errints++;
  1407. errs = qib_read_kreg64(dd, kr_errstatus);
  1408. if (!errs) {
  1409. qib_devinfo(dd->pcidev, "device error interrupt, "
  1410. "but no error bits set!\n");
  1411. goto done;
  1412. }
  1413. /* don't report errors that are masked */
  1414. errs &= dd->cspec->errormask;
  1415. msg = dd->cspec->emsgbuf;
  1416. /* do these first, they are most important */
  1417. if (errs & QIB_E_HARDWARE) {
  1418. *msg = '\0';
  1419. qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
  1420. } else
  1421. for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
  1422. if (errs & dd->eep_st_masks[log_idx].errs_to_log)
  1423. qib_inc_eeprom_err(dd, log_idx, 1);
  1424. if (errs & QIB_E_SPKTERRS) {
  1425. qib_disarm_7322_senderrbufs(dd->pport);
  1426. qib_stats.sps_txerrs++;
  1427. } else if (errs & QIB_E_INVALIDADDR)
  1428. qib_stats.sps_txerrs++;
  1429. else if (errs & QIB_E_ARMLAUNCH) {
  1430. qib_stats.sps_txerrs++;
  1431. qib_disarm_7322_senderrbufs(dd->pport);
  1432. }
  1433. qib_write_kreg(dd, kr_errclear, errs);
  1434. /*
  1435. * The ones we mask off are handled specially below
  1436. * or above. Also mask SDMADISABLED by default as it
  1437. * is too chatty.
  1438. */
  1439. mask = QIB_E_HARDWARE;
  1440. *msg = '\0';
  1441. err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
  1442. qib_7322error_msgs);
  1443. /*
  1444. * Getting reset is a tragedy for all ports. Mark the device
  1445. * _and_ the ports as "offline" in way meaningful to each.
  1446. */
  1447. if (errs & QIB_E_RESET) {
  1448. int pidx;
  1449. qib_dev_err(dd, "Got reset, requires re-init "
  1450. "(unload and reload driver)\n");
  1451. dd->flags &= ~QIB_INITTED; /* needs re-init */
  1452. /* mark as having had error */
  1453. *dd->devstatusp |= QIB_STATUS_HWERROR;
  1454. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  1455. if (dd->pport[pidx].link_speed_supported)
  1456. *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
  1457. }
  1458. if (*msg && iserr)
  1459. qib_dev_err(dd, "%s error\n", msg);
  1460. /*
  1461. * If there were hdrq or egrfull errors, wake up any processes
  1462. * waiting in poll. We used to try to check which contexts had
  1463. * the overflow, but given the cost of that and the chip reads
  1464. * to support it, it's better to just wake everybody up if we
  1465. * get an overflow; waiters can poll again if it's not them.
  1466. */
  1467. if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
  1468. qib_handle_urcv(dd, ~0U);
  1469. if (errs & ERR_MASK(RcvEgrFullErr))
  1470. qib_stats.sps_buffull++;
  1471. else
  1472. qib_stats.sps_hdrfull++;
  1473. }
  1474. done:
  1475. return;
  1476. }
  1477. static void qib_error_tasklet(unsigned long data)
  1478. {
  1479. struct qib_devdata *dd = (struct qib_devdata *)data;
  1480. handle_7322_errors(dd);
  1481. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  1482. }
  1483. static void reenable_chase(unsigned long opaque)
  1484. {
  1485. struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
  1486. ppd->cpspec->chase_timer.expires = 0;
  1487. qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
  1488. QLOGIC_IB_IBCC_LINKINITCMD_POLL);
  1489. }
  1490. static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
  1491. {
  1492. ppd->cpspec->chase_end = 0;
  1493. if (!qib_chase)
  1494. return;
  1495. qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
  1496. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  1497. ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
  1498. add_timer(&ppd->cpspec->chase_timer);
  1499. }
  1500. static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
  1501. {
  1502. u8 ibclt;
  1503. u64 tnow;
  1504. ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
  1505. /*
  1506. * Detect and handle the state chase issue, where we can
  1507. * get stuck if we are unlucky on timing on both sides of
  1508. * the link. If we are, we disable, set a timer, and
  1509. * then re-enable.
  1510. */
  1511. switch (ibclt) {
  1512. case IB_7322_LT_STATE_CFGRCVFCFG:
  1513. case IB_7322_LT_STATE_CFGWAITRMT:
  1514. case IB_7322_LT_STATE_TXREVLANES:
  1515. case IB_7322_LT_STATE_CFGENH:
  1516. tnow = get_jiffies_64();
  1517. if (ppd->cpspec->chase_end &&
  1518. time_after64(tnow, ppd->cpspec->chase_end))
  1519. disable_chase(ppd, tnow, ibclt);
  1520. else if (!ppd->cpspec->chase_end)
  1521. ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
  1522. break;
  1523. default:
  1524. ppd->cpspec->chase_end = 0;
  1525. break;
  1526. }
  1527. if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
  1528. ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
  1529. ibclt == IB_7322_LT_STATE_LINKUP) &&
  1530. (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
  1531. force_h1(ppd);
  1532. ppd->cpspec->qdr_reforce = 1;
  1533. if (!ppd->dd->cspec->r1)
  1534. serdes_7322_los_enable(ppd, 0);
  1535. } else if (ppd->cpspec->qdr_reforce &&
  1536. (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
  1537. (ibclt == IB_7322_LT_STATE_CFGENH ||
  1538. ibclt == IB_7322_LT_STATE_CFGIDLE ||
  1539. ibclt == IB_7322_LT_STATE_LINKUP))
  1540. force_h1(ppd);
  1541. if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
  1542. ppd->link_speed_enabled == QIB_IB_QDR &&
  1543. (ibclt == IB_7322_LT_STATE_CFGTEST ||
  1544. ibclt == IB_7322_LT_STATE_CFGENH ||
  1545. (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
  1546. ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
  1547. adj_tx_serdes(ppd);
  1548. if (ibclt != IB_7322_LT_STATE_LINKUP) {
  1549. u8 ltstate = qib_7322_phys_portstate(ibcst);
  1550. u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
  1551. LinkTrainingState);
  1552. if (!ppd->dd->cspec->r1 &&
  1553. pibclt == IB_7322_LT_STATE_LINKUP &&
  1554. ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
  1555. ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
  1556. ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
  1557. ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
  1558. /* If the link went down (but no into recovery,
  1559. * turn LOS back on */
  1560. serdes_7322_los_enable(ppd, 1);
  1561. if (!ppd->cpspec->qdr_dfe_on &&
  1562. ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
  1563. ppd->cpspec->qdr_dfe_on = 1;
  1564. ppd->cpspec->qdr_dfe_time = 0;
  1565. /* On link down, reenable QDR adaptation */
  1566. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  1567. ppd->dd->cspec->r1 ?
  1568. QDR_STATIC_ADAPT_DOWN_R1 :
  1569. QDR_STATIC_ADAPT_DOWN);
  1570. printk(KERN_INFO QIB_DRV_NAME
  1571. " IB%u:%u re-enabled QDR adaptation "
  1572. "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
  1573. }
  1574. }
  1575. }
  1576. static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
  1577. /*
  1578. * This is per-pport error handling.
  1579. * will likely get it's own MSIx interrupt (one for each port,
  1580. * although just a single handler).
  1581. */
  1582. static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
  1583. {
  1584. char *msg;
  1585. u64 ignore_this_time = 0, iserr = 0, errs, fmask;
  1586. struct qib_devdata *dd = ppd->dd;
  1587. /* do this as soon as possible */
  1588. fmask = qib_read_kreg64(dd, kr_act_fmask);
  1589. if (!fmask)
  1590. check_7322_rxe_status(ppd);
  1591. errs = qib_read_kreg_port(ppd, krp_errstatus);
  1592. if (!errs)
  1593. qib_devinfo(dd->pcidev,
  1594. "Port%d error interrupt, but no error bits set!\n",
  1595. ppd->port);
  1596. if (!fmask)
  1597. errs &= ~QIB_E_P_IBSTATUSCHANGED;
  1598. if (!errs)
  1599. goto done;
  1600. msg = ppd->cpspec->epmsgbuf;
  1601. *msg = '\0';
  1602. if (errs & ~QIB_E_P_BITSEXTANT) {
  1603. err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
  1604. errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
  1605. if (!*msg)
  1606. snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
  1607. "no others");
  1608. qib_dev_porterr(dd, ppd->port, "error interrupt with unknown"
  1609. " errors 0x%016Lx set (and %s)\n",
  1610. (errs & ~QIB_E_P_BITSEXTANT), msg);
  1611. *msg = '\0';
  1612. }
  1613. if (errs & QIB_E_P_SHDR) {
  1614. u64 symptom;
  1615. /* determine cause, then write to clear */
  1616. symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
  1617. qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
  1618. err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
  1619. hdrchk_msgs);
  1620. *msg = '\0';
  1621. /* senderrbuf cleared in SPKTERRS below */
  1622. }
  1623. if (errs & QIB_E_P_SPKTERRS) {
  1624. if ((errs & QIB_E_P_LINK_PKTERRS) &&
  1625. !(ppd->lflags & QIBL_LINKACTIVE)) {
  1626. /*
  1627. * This can happen when trying to bring the link
  1628. * up, but the IB link changes state at the "wrong"
  1629. * time. The IB logic then complains that the packet
  1630. * isn't valid. We don't want to confuse people, so
  1631. * we just don't print them, except at debug
  1632. */
  1633. err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
  1634. (errs & QIB_E_P_LINK_PKTERRS),
  1635. qib_7322p_error_msgs);
  1636. *msg = '\0';
  1637. ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
  1638. }
  1639. qib_disarm_7322_senderrbufs(ppd);
  1640. } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
  1641. !(ppd->lflags & QIBL_LINKACTIVE)) {
  1642. /*
  1643. * This can happen when SMA is trying to bring the link
  1644. * up, but the IB link changes state at the "wrong" time.
  1645. * The IB logic then complains that the packet isn't
  1646. * valid. We don't want to confuse people, so we just
  1647. * don't print them, except at debug
  1648. */
  1649. err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
  1650. qib_7322p_error_msgs);
  1651. ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
  1652. *msg = '\0';
  1653. }
  1654. qib_write_kreg_port(ppd, krp_errclear, errs);
  1655. errs &= ~ignore_this_time;
  1656. if (!errs)
  1657. goto done;
  1658. if (errs & QIB_E_P_RPKTERRS)
  1659. qib_stats.sps_rcverrs++;
  1660. if (errs & QIB_E_P_SPKTERRS)
  1661. qib_stats.sps_txerrs++;
  1662. iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
  1663. if (errs & QIB_E_P_SDMAERRS)
  1664. sdma_7322_p_errors(ppd, errs);
  1665. if (errs & QIB_E_P_IBSTATUSCHANGED) {
  1666. u64 ibcs;
  1667. u8 ltstate;
  1668. ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
  1669. ltstate = qib_7322_phys_portstate(ibcs);
  1670. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
  1671. handle_serdes_issues(ppd, ibcs);
  1672. if (!(ppd->cpspec->ibcctrl_a &
  1673. SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
  1674. /*
  1675. * We got our interrupt, so init code should be
  1676. * happy and not try alternatives. Now squelch
  1677. * other "chatter" from link-negotiation (pre Init)
  1678. */
  1679. ppd->cpspec->ibcctrl_a |=
  1680. SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  1681. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  1682. ppd->cpspec->ibcctrl_a);
  1683. }
  1684. /* Update our picture of width and speed from chip */
  1685. ppd->link_width_active =
  1686. (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
  1687. IB_WIDTH_4X : IB_WIDTH_1X;
  1688. ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
  1689. LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
  1690. SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
  1691. QIB_IB_DDR : QIB_IB_SDR;
  1692. if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
  1693. IB_PHYSPORTSTATE_DISABLED)
  1694. qib_set_ib_7322_lstate(ppd, 0,
  1695. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  1696. else
  1697. /*
  1698. * Since going into a recovery state causes the link
  1699. * state to go down and since recovery is transitory,
  1700. * it is better if we "miss" ever seeing the link
  1701. * training state go into recovery (i.e., ignore this
  1702. * transition for link state special handling purposes)
  1703. * without updating lastibcstat.
  1704. */
  1705. if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
  1706. ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
  1707. ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
  1708. ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
  1709. qib_handle_e_ibstatuschanged(ppd, ibcs);
  1710. }
  1711. if (*msg && iserr)
  1712. qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
  1713. if (ppd->state_wanted & ppd->lflags)
  1714. wake_up_interruptible(&ppd->state_wait);
  1715. done:
  1716. return;
  1717. }
  1718. /* enable/disable chip from delivering interrupts */
  1719. static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
  1720. {
  1721. if (enable) {
  1722. if (dd->flags & QIB_BADINTR)
  1723. return;
  1724. qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
  1725. /* cause any pending enabled interrupts to be re-delivered */
  1726. qib_write_kreg(dd, kr_intclear, 0ULL);
  1727. if (dd->cspec->num_msix_entries) {
  1728. /* and same for MSIx */
  1729. u64 val = qib_read_kreg64(dd, kr_intgranted);
  1730. if (val)
  1731. qib_write_kreg(dd, kr_intgranted, val);
  1732. }
  1733. } else
  1734. qib_write_kreg(dd, kr_intmask, 0ULL);
  1735. }
  1736. /*
  1737. * Try to cleanup as much as possible for anything that might have gone
  1738. * wrong while in freeze mode, such as pio buffers being written by user
  1739. * processes (causing armlaunch), send errors due to going into freeze mode,
  1740. * etc., and try to avoid causing extra interrupts while doing so.
  1741. * Forcibly update the in-memory pioavail register copies after cleanup
  1742. * because the chip won't do it while in freeze mode (the register values
  1743. * themselves are kept correct).
  1744. * Make sure that we don't lose any important interrupts by using the chip
  1745. * feature that says that writing 0 to a bit in *clear that is set in
  1746. * *status will cause an interrupt to be generated again (if allowed by
  1747. * the *mask value).
  1748. * This is in chip-specific code because of all of the register accesses,
  1749. * even though the details are similar on most chips.
  1750. */
  1751. static void qib_7322_clear_freeze(struct qib_devdata *dd)
  1752. {
  1753. int pidx;
  1754. /* disable error interrupts, to avoid confusion */
  1755. qib_write_kreg(dd, kr_errmask, 0ULL);
  1756. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  1757. if (dd->pport[pidx].link_speed_supported)
  1758. qib_write_kreg_port(dd->pport + pidx, krp_errmask,
  1759. 0ULL);
  1760. /* also disable interrupts; errormask is sometimes overwriten */
  1761. qib_7322_set_intr_state(dd, 0);
  1762. /* clear the freeze, and be sure chip saw it */
  1763. qib_write_kreg(dd, kr_control, dd->control);
  1764. qib_read_kreg32(dd, kr_scratch);
  1765. /*
  1766. * Force new interrupt if any hwerr, error or interrupt bits are
  1767. * still set, and clear "safe" send packet errors related to freeze
  1768. * and cancelling sends. Re-enable error interrupts before possible
  1769. * force of re-interrupt on pending interrupts.
  1770. */
  1771. qib_write_kreg(dd, kr_hwerrclear, 0ULL);
  1772. qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
  1773. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  1774. /* We need to purge per-port errs and reset mask, too */
  1775. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  1776. if (!dd->pport[pidx].link_speed_supported)
  1777. continue;
  1778. qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
  1779. qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
  1780. }
  1781. qib_7322_set_intr_state(dd, 1);
  1782. }
  1783. /* no error handling to speak of */
  1784. /**
  1785. * qib_7322_handle_hwerrors - display hardware errors.
  1786. * @dd: the qlogic_ib device
  1787. * @msg: the output buffer
  1788. * @msgl: the size of the output buffer
  1789. *
  1790. * Use same msg buffer as regular errors to avoid excessive stack
  1791. * use. Most hardware errors are catastrophic, but for right now,
  1792. * we'll print them and continue. We reuse the same message buffer as
  1793. * qib_handle_errors() to avoid excessive stack usage.
  1794. */
  1795. static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
  1796. size_t msgl)
  1797. {
  1798. u64 hwerrs;
  1799. u32 ctrl;
  1800. int isfatal = 0;
  1801. hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
  1802. if (!hwerrs)
  1803. goto bail;
  1804. if (hwerrs == ~0ULL) {
  1805. qib_dev_err(dd, "Read of hardware error status failed "
  1806. "(all bits set); ignoring\n");
  1807. goto bail;
  1808. }
  1809. qib_stats.sps_hwerrs++;
  1810. /* Always clear the error status register, except BIST fail */
  1811. qib_write_kreg(dd, kr_hwerrclear, hwerrs &
  1812. ~HWE_MASK(PowerOnBISTFailed));
  1813. hwerrs &= dd->cspec->hwerrmask;
  1814. /* no EEPROM logging, yet */
  1815. if (hwerrs)
  1816. qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
  1817. "(cleared)\n", (unsigned long long) hwerrs);
  1818. ctrl = qib_read_kreg32(dd, kr_control);
  1819. if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
  1820. /*
  1821. * No recovery yet...
  1822. */
  1823. if ((hwerrs & ~HWE_MASK(LATriggered)) ||
  1824. dd->cspec->stay_in_freeze) {
  1825. /*
  1826. * If any set that we aren't ignoring only make the
  1827. * complaint once, in case it's stuck or recurring,
  1828. * and we get here multiple times
  1829. * Force link down, so switch knows, and
  1830. * LEDs are turned off.
  1831. */
  1832. if (dd->flags & QIB_INITTED)
  1833. isfatal = 1;
  1834. } else
  1835. qib_7322_clear_freeze(dd);
  1836. }
  1837. if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
  1838. isfatal = 1;
  1839. strlcpy(msg, "[Memory BIST test failed, "
  1840. "InfiniPath hardware unusable]", msgl);
  1841. /* ignore from now on, so disable until driver reloaded */
  1842. dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
  1843. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  1844. }
  1845. err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
  1846. /* Ignore esoteric PLL failures et al. */
  1847. qib_dev_err(dd, "%s hardware error\n", msg);
  1848. if (isfatal && !dd->diag_client) {
  1849. qib_dev_err(dd, "Fatal Hardware Error, no longer"
  1850. " usable, SN %.16s\n", dd->serial);
  1851. /*
  1852. * for /sys status file and user programs to print; if no
  1853. * trailing brace is copied, we'll know it was truncated.
  1854. */
  1855. if (dd->freezemsg)
  1856. snprintf(dd->freezemsg, dd->freezelen,
  1857. "{%s}", msg);
  1858. qib_disable_after_error(dd);
  1859. }
  1860. bail:;
  1861. }
  1862. /**
  1863. * qib_7322_init_hwerrors - enable hardware errors
  1864. * @dd: the qlogic_ib device
  1865. *
  1866. * now that we have finished initializing everything that might reasonably
  1867. * cause a hardware error, and cleared those errors bits as they occur,
  1868. * we can enable hardware errors in the mask (potentially enabling
  1869. * freeze mode), and enable hardware errors as errors (along with
  1870. * everything else) in errormask
  1871. */
  1872. static void qib_7322_init_hwerrors(struct qib_devdata *dd)
  1873. {
  1874. int pidx;
  1875. u64 extsval;
  1876. extsval = qib_read_kreg64(dd, kr_extstatus);
  1877. if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
  1878. QIB_EXTS_MEMBIST_ENDTEST)))
  1879. qib_dev_err(dd, "MemBIST did not complete!\n");
  1880. /* never clear BIST failure, so reported on each driver load */
  1881. qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
  1882. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  1883. /* clear all */
  1884. qib_write_kreg(dd, kr_errclear, ~0ULL);
  1885. /* enable errors that are masked, at least this first time. */
  1886. qib_write_kreg(dd, kr_errmask, ~0ULL);
  1887. dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
  1888. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  1889. if (dd->pport[pidx].link_speed_supported)
  1890. qib_write_kreg_port(dd->pport + pidx, krp_errmask,
  1891. ~0ULL);
  1892. }
  1893. /*
  1894. * Disable and enable the armlaunch error. Used for PIO bandwidth testing
  1895. * on chips that are count-based, rather than trigger-based. There is no
  1896. * reference counting, but that's also fine, given the intended use.
  1897. * Only chip-specific because it's all register accesses
  1898. */
  1899. static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
  1900. {
  1901. if (enable) {
  1902. qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
  1903. dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
  1904. } else
  1905. dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
  1906. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  1907. }
  1908. /*
  1909. * Formerly took parameter <which> in pre-shifted,
  1910. * pre-merged form with LinkCmd and LinkInitCmd
  1911. * together, and assuming the zero was NOP.
  1912. */
  1913. static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  1914. u16 linitcmd)
  1915. {
  1916. u64 mod_wd;
  1917. struct qib_devdata *dd = ppd->dd;
  1918. unsigned long flags;
  1919. if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
  1920. /*
  1921. * If we are told to disable, note that so link-recovery
  1922. * code does not attempt to bring us back up.
  1923. * Also reset everything that we can, so we start
  1924. * completely clean when re-enabled (before we
  1925. * actually issue the disable to the IBC)
  1926. */
  1927. qib_7322_mini_pcs_reset(ppd);
  1928. spin_lock_irqsave(&ppd->lflags_lock, flags);
  1929. ppd->lflags |= QIBL_IB_LINK_DISABLED;
  1930. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  1931. } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
  1932. /*
  1933. * Any other linkinitcmd will lead to LINKDOWN and then
  1934. * to INIT (if all is well), so clear flag to let
  1935. * link-recovery code attempt to bring us back up.
  1936. */
  1937. spin_lock_irqsave(&ppd->lflags_lock, flags);
  1938. ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
  1939. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  1940. /*
  1941. * Clear status change interrupt reduction so the
  1942. * new state is seen.
  1943. */
  1944. ppd->cpspec->ibcctrl_a &=
  1945. ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  1946. }
  1947. mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
  1948. (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
  1949. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
  1950. mod_wd);
  1951. /* write to chip to prevent back-to-back writes of ibc reg */
  1952. qib_write_kreg(dd, kr_scratch, 0);
  1953. }
  1954. /*
  1955. * The total RCV buffer memory is 64KB, used for both ports, and is
  1956. * in units of 64 bytes (same as IB flow control credit unit).
  1957. * The consumedVL unit in the same registers are in 32 byte units!
  1958. * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
  1959. * and we can therefore allocate just 9 IB credits for 2 VL15 packets
  1960. * in krp_rxcreditvl15, rather than 10.
  1961. */
  1962. #define RCV_BUF_UNITSZ 64
  1963. #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
  1964. static void set_vls(struct qib_pportdata *ppd)
  1965. {
  1966. int i, numvls, totcred, cred_vl, vl0extra;
  1967. struct qib_devdata *dd = ppd->dd;
  1968. u64 val;
  1969. numvls = qib_num_vls(ppd->vls_operational);
  1970. /*
  1971. * Set up per-VL credits. Below is kluge based on these assumptions:
  1972. * 1) port is disabled at the time early_init is called.
  1973. * 2) give VL15 17 credits, for two max-plausible packets.
  1974. * 3) Give VL0-N the rest, with any rounding excess used for VL0
  1975. */
  1976. /* 2 VL15 packets @ 288 bytes each (including IB headers) */
  1977. totcred = NUM_RCV_BUF_UNITS(dd);
  1978. cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
  1979. totcred -= cred_vl;
  1980. qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
  1981. cred_vl = totcred / numvls;
  1982. vl0extra = totcred - cred_vl * numvls;
  1983. qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
  1984. for (i = 1; i < numvls; i++)
  1985. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
  1986. for (; i < 8; i++) /* no buffer space for other VLs */
  1987. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
  1988. /* Notify IBC that credits need to be recalculated */
  1989. val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
  1990. val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
  1991. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  1992. qib_write_kreg(dd, kr_scratch, 0ULL);
  1993. val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
  1994. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  1995. for (i = 0; i < numvls; i++)
  1996. val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
  1997. val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
  1998. /* Change the number of operational VLs */
  1999. ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
  2000. ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
  2001. ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
  2002. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  2003. qib_write_kreg(dd, kr_scratch, 0ULL);
  2004. }
  2005. /*
  2006. * The code that deals with actual SerDes is in serdes_7322_init().
  2007. * Compared to the code for iba7220, it is minimal.
  2008. */
  2009. static int serdes_7322_init(struct qib_pportdata *ppd);
  2010. /**
  2011. * qib_7322_bringup_serdes - bring up the serdes
  2012. * @ppd: physical port on the qlogic_ib device
  2013. */
  2014. static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
  2015. {
  2016. struct qib_devdata *dd = ppd->dd;
  2017. u64 val, guid, ibc;
  2018. unsigned long flags;
  2019. int ret = 0;
  2020. /*
  2021. * SerDes model not in Pd, but still need to
  2022. * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
  2023. * eventually.
  2024. */
  2025. /* Put IBC in reset, sends disabled (should be in reset already) */
  2026. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2027. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  2028. qib_write_kreg(dd, kr_scratch, 0ULL);
  2029. if (qib_compat_ddr_negotiate) {
  2030. ppd->cpspec->ibdeltainprog = 1;
  2031. ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
  2032. crp_ibsymbolerr);
  2033. ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
  2034. crp_iblinkerrrecov);
  2035. }
  2036. /* flowcontrolwatermark is in units of KBytes */
  2037. ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
  2038. /*
  2039. * Flow control is sent this often, even if no changes in
  2040. * buffer space occur. Units are 128ns for this chip.
  2041. * Set to 3usec.
  2042. */
  2043. ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
  2044. /* max error tolerance */
  2045. ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
  2046. /* IB credit flow control. */
  2047. ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
  2048. /*
  2049. * set initial max size pkt IBC will send, including ICRC; it's the
  2050. * PIO buffer size in dwords, less 1; also see qib_set_mtu()
  2051. */
  2052. ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
  2053. SYM_LSB(IBCCtrlA_0, MaxPktLen);
  2054. ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
  2055. /* initially come up waiting for TS1, without sending anything. */
  2056. val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
  2057. QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
  2058. /*
  2059. * Reset the PCS interface to the serdes (and also ibc, which is still
  2060. * in reset from above). Writes new value of ibcctrl_a as last step.
  2061. */
  2062. qib_7322_mini_pcs_reset(ppd);
  2063. qib_write_kreg(dd, kr_scratch, 0ULL);
  2064. if (!ppd->cpspec->ibcctrl_b) {
  2065. unsigned lse = ppd->link_speed_enabled;
  2066. /*
  2067. * Not on re-init after reset, establish shadow
  2068. * and force initial config.
  2069. */
  2070. ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
  2071. krp_ibcctrl_b);
  2072. ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
  2073. IBA7322_IBC_SPEED_DDR |
  2074. IBA7322_IBC_SPEED_SDR |
  2075. IBA7322_IBC_WIDTH_AUTONEG |
  2076. SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
  2077. if (lse & (lse - 1)) /* Muliple speeds enabled */
  2078. ppd->cpspec->ibcctrl_b |=
  2079. (lse << IBA7322_IBC_SPEED_LSB) |
  2080. IBA7322_IBC_IBTA_1_2_MASK |
  2081. IBA7322_IBC_MAX_SPEED_MASK;
  2082. else
  2083. ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
  2084. IBA7322_IBC_SPEED_QDR |
  2085. IBA7322_IBC_IBTA_1_2_MASK :
  2086. (lse == QIB_IB_DDR) ?
  2087. IBA7322_IBC_SPEED_DDR :
  2088. IBA7322_IBC_SPEED_SDR;
  2089. if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
  2090. (IB_WIDTH_1X | IB_WIDTH_4X))
  2091. ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
  2092. else
  2093. ppd->cpspec->ibcctrl_b |=
  2094. ppd->link_width_enabled == IB_WIDTH_4X ?
  2095. IBA7322_IBC_WIDTH_4X_ONLY :
  2096. IBA7322_IBC_WIDTH_1X_ONLY;
  2097. /* always enable these on driver reload, not sticky */
  2098. ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
  2099. IBA7322_IBC_HRTBT_MASK);
  2100. }
  2101. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  2102. /* setup so we have more time at CFGTEST to change H1 */
  2103. val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
  2104. val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
  2105. val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
  2106. qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
  2107. serdes_7322_init(ppd);
  2108. guid = be64_to_cpu(ppd->guid);
  2109. if (!guid) {
  2110. if (dd->base_guid)
  2111. guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
  2112. ppd->guid = cpu_to_be64(guid);
  2113. }
  2114. qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
  2115. /* write to chip to prevent back-to-back writes of ibc reg */
  2116. qib_write_kreg(dd, kr_scratch, 0);
  2117. /* Enable port */
  2118. ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2119. set_vls(ppd);
  2120. /* be paranoid against later code motion, etc. */
  2121. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  2122. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
  2123. qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
  2124. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  2125. /* Hold the link state machine for mezz boards */
  2126. if (IS_QMH(dd) || IS_QME(dd))
  2127. qib_set_ib_7322_lstate(ppd, 0,
  2128. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  2129. /* Also enable IBSTATUSCHG interrupt. */
  2130. val = qib_read_kreg_port(ppd, krp_errmask);
  2131. qib_write_kreg_port(ppd, krp_errmask,
  2132. val | ERR_MASK_N(IBStatusChanged));
  2133. /* Always zero until we start messing with SerDes for real */
  2134. return ret;
  2135. }
  2136. /**
  2137. * qib_7322_quiet_serdes - set serdes to txidle
  2138. * @dd: the qlogic_ib device
  2139. * Called when driver is being unloaded
  2140. */
  2141. static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
  2142. {
  2143. u64 val;
  2144. unsigned long flags;
  2145. qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  2146. spin_lock_irqsave(&ppd->lflags_lock, flags);
  2147. ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
  2148. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  2149. wake_up(&ppd->cpspec->autoneg_wait);
  2150. cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
  2151. if (ppd->dd->cspec->r1)
  2152. cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
  2153. ppd->cpspec->chase_end = 0;
  2154. if (ppd->cpspec->chase_timer.data) /* if initted */
  2155. del_timer_sync(&ppd->cpspec->chase_timer);
  2156. /*
  2157. * Despite the name, actually disables IBC as well. Do it when
  2158. * we are as sure as possible that no more packets can be
  2159. * received, following the down and the PCS reset.
  2160. * The actual disabling happens in qib_7322_mini_pci_reset(),
  2161. * along with the PCS being reset.
  2162. */
  2163. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2164. qib_7322_mini_pcs_reset(ppd);
  2165. /*
  2166. * Update the adjusted counters so the adjustment persists
  2167. * across driver reload.
  2168. */
  2169. if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
  2170. ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
  2171. struct qib_devdata *dd = ppd->dd;
  2172. u64 diagc;
  2173. /* enable counter writes */
  2174. diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
  2175. qib_write_kreg(dd, kr_hwdiagctrl,
  2176. diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
  2177. if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
  2178. val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
  2179. if (ppd->cpspec->ibdeltainprog)
  2180. val -= val - ppd->cpspec->ibsymsnap;
  2181. val -= ppd->cpspec->ibsymdelta;
  2182. write_7322_creg_port(ppd, crp_ibsymbolerr, val);
  2183. }
  2184. if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
  2185. val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
  2186. if (ppd->cpspec->ibdeltainprog)
  2187. val -= val - ppd->cpspec->iblnkerrsnap;
  2188. val -= ppd->cpspec->iblnkerrdelta;
  2189. write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
  2190. }
  2191. if (ppd->cpspec->iblnkdowndelta) {
  2192. val = read_7322_creg32_port(ppd, crp_iblinkdown);
  2193. val += ppd->cpspec->iblnkdowndelta;
  2194. write_7322_creg_port(ppd, crp_iblinkdown, val);
  2195. }
  2196. /*
  2197. * No need to save ibmalfdelta since IB perfcounters
  2198. * are cleared on driver reload.
  2199. */
  2200. /* and disable counter writes */
  2201. qib_write_kreg(dd, kr_hwdiagctrl, diagc);
  2202. }
  2203. }
  2204. /**
  2205. * qib_setup_7322_setextled - set the state of the two external LEDs
  2206. * @ppd: physical port on the qlogic_ib device
  2207. * @on: whether the link is up or not
  2208. *
  2209. * The exact combo of LEDs if on is true is determined by looking
  2210. * at the ibcstatus.
  2211. *
  2212. * These LEDs indicate the physical and logical state of IB link.
  2213. * For this chip (at least with recommended board pinouts), LED1
  2214. * is Yellow (logical state) and LED2 is Green (physical state),
  2215. *
  2216. * Note: We try to match the Mellanox HCA LED behavior as best
  2217. * we can. Green indicates physical link state is OK (something is
  2218. * plugged in, and we can train).
  2219. * Amber indicates the link is logically up (ACTIVE).
  2220. * Mellanox further blinks the amber LED to indicate data packet
  2221. * activity, but we have no hardware support for that, so it would
  2222. * require waking up every 10-20 msecs and checking the counters
  2223. * on the chip, and then turning the LED off if appropriate. That's
  2224. * visible overhead, so not something we will do.
  2225. */
  2226. static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
  2227. {
  2228. struct qib_devdata *dd = ppd->dd;
  2229. u64 extctl, ledblink = 0, val;
  2230. unsigned long flags;
  2231. int yel, grn;
  2232. /*
  2233. * The diags use the LED to indicate diag info, so we leave
  2234. * the external LED alone when the diags are running.
  2235. */
  2236. if (dd->diag_client)
  2237. return;
  2238. /* Allow override of LED display for, e.g. Locating system in rack */
  2239. if (ppd->led_override) {
  2240. grn = (ppd->led_override & QIB_LED_PHYS);
  2241. yel = (ppd->led_override & QIB_LED_LOG);
  2242. } else if (on) {
  2243. val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
  2244. grn = qib_7322_phys_portstate(val) ==
  2245. IB_PHYSPORTSTATE_LINKUP;
  2246. yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
  2247. } else {
  2248. grn = 0;
  2249. yel = 0;
  2250. }
  2251. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  2252. extctl = dd->cspec->extctrl & (ppd->port == 1 ?
  2253. ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
  2254. if (grn) {
  2255. extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
  2256. /*
  2257. * Counts are in chip clock (4ns) periods.
  2258. * This is 1/16 sec (66.6ms) on,
  2259. * 3/16 sec (187.5 ms) off, with packets rcvd.
  2260. */
  2261. ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
  2262. ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
  2263. }
  2264. if (yel)
  2265. extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
  2266. dd->cspec->extctrl = extctl;
  2267. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  2268. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  2269. if (ledblink) /* blink the LED on packet receive */
  2270. qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
  2271. }
  2272. /*
  2273. * Disable MSIx interrupt if enabled, call generic MSIx code
  2274. * to cleanup, and clear pending MSIx interrupts.
  2275. * Used for fallback to INTx, after reset, and when MSIx setup fails.
  2276. */
  2277. static void qib_7322_nomsix(struct qib_devdata *dd)
  2278. {
  2279. u64 intgranted;
  2280. int n;
  2281. dd->cspec->main_int_mask = ~0ULL;
  2282. n = dd->cspec->num_msix_entries;
  2283. if (n) {
  2284. int i;
  2285. dd->cspec->num_msix_entries = 0;
  2286. for (i = 0; i < n; i++)
  2287. free_irq(dd->cspec->msix_entries[i].vector,
  2288. dd->cspec->msix_arg[i]);
  2289. qib_nomsix(dd);
  2290. }
  2291. /* make sure no MSIx interrupts are left pending */
  2292. intgranted = qib_read_kreg64(dd, kr_intgranted);
  2293. if (intgranted)
  2294. qib_write_kreg(dd, kr_intgranted, intgranted);
  2295. }
  2296. static void qib_7322_free_irq(struct qib_devdata *dd)
  2297. {
  2298. if (dd->cspec->irq) {
  2299. free_irq(dd->cspec->irq, dd);
  2300. dd->cspec->irq = 0;
  2301. }
  2302. qib_7322_nomsix(dd);
  2303. }
  2304. static void qib_setup_7322_cleanup(struct qib_devdata *dd)
  2305. {
  2306. int i;
  2307. qib_7322_free_irq(dd);
  2308. kfree(dd->cspec->cntrs);
  2309. kfree(dd->cspec->sendchkenable);
  2310. kfree(dd->cspec->sendgrhchk);
  2311. kfree(dd->cspec->sendibchk);
  2312. kfree(dd->cspec->msix_entries);
  2313. kfree(dd->cspec->msix_arg);
  2314. for (i = 0; i < dd->num_pports; i++) {
  2315. unsigned long flags;
  2316. u32 mask = QSFP_GPIO_MOD_PRS_N |
  2317. (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
  2318. kfree(dd->pport[i].cpspec->portcntrs);
  2319. if (dd->flags & QIB_HAS_QSFP) {
  2320. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  2321. dd->cspec->gpio_mask &= ~mask;
  2322. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  2323. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  2324. qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
  2325. }
  2326. if (dd->pport[i].ibport_data.smi_ah)
  2327. ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
  2328. }
  2329. }
  2330. /* handle SDMA interrupts */
  2331. static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
  2332. {
  2333. struct qib_pportdata *ppd0 = &dd->pport[0];
  2334. struct qib_pportdata *ppd1 = &dd->pport[1];
  2335. u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
  2336. INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
  2337. u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
  2338. INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
  2339. if (intr0)
  2340. qib_sdma_intr(ppd0);
  2341. if (intr1)
  2342. qib_sdma_intr(ppd1);
  2343. if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
  2344. qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
  2345. if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
  2346. qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
  2347. }
  2348. /*
  2349. * Set or clear the Send buffer available interrupt enable bit.
  2350. */
  2351. static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
  2352. {
  2353. unsigned long flags;
  2354. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  2355. if (needint)
  2356. dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
  2357. else
  2358. dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
  2359. qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
  2360. qib_write_kreg(dd, kr_scratch, 0ULL);
  2361. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  2362. }
  2363. /*
  2364. * Somehow got an interrupt with reserved bits set in interrupt status.
  2365. * Print a message so we know it happened, then clear them.
  2366. * keep mainline interrupt handler cache-friendly
  2367. */
  2368. static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
  2369. {
  2370. u64 kills;
  2371. char msg[128];
  2372. kills = istat & ~QIB_I_BITSEXTANT;
  2373. qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:"
  2374. " %s\n", (unsigned long long) kills, msg);
  2375. qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
  2376. }
  2377. /* keep mainline interrupt handler cache-friendly */
  2378. static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
  2379. {
  2380. u32 gpiostatus;
  2381. int handled = 0;
  2382. int pidx;
  2383. /*
  2384. * Boards for this chip currently don't use GPIO interrupts,
  2385. * so clear by writing GPIOstatus to GPIOclear, and complain
  2386. * to developer. To avoid endless repeats, clear
  2387. * the bits in the mask, since there is some kind of
  2388. * programming error or chip problem.
  2389. */
  2390. gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
  2391. /*
  2392. * In theory, writing GPIOstatus to GPIOclear could
  2393. * have a bad side-effect on some diagnostic that wanted
  2394. * to poll for a status-change, but the various shadows
  2395. * make that problematic at best. Diags will just suppress
  2396. * all GPIO interrupts during such tests.
  2397. */
  2398. qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
  2399. /*
  2400. * Check for QSFP MOD_PRS changes
  2401. * only works for single port if IB1 != pidx1
  2402. */
  2403. for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
  2404. ++pidx) {
  2405. struct qib_pportdata *ppd;
  2406. struct qib_qsfp_data *qd;
  2407. u32 mask;
  2408. if (!dd->pport[pidx].link_speed_supported)
  2409. continue;
  2410. mask = QSFP_GPIO_MOD_PRS_N;
  2411. ppd = dd->pport + pidx;
  2412. mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
  2413. if (gpiostatus & dd->cspec->gpio_mask & mask) {
  2414. u64 pins;
  2415. qd = &ppd->cpspec->qsfp_data;
  2416. gpiostatus &= ~mask;
  2417. pins = qib_read_kreg64(dd, kr_extstatus);
  2418. pins >>= SYM_LSB(EXTStatus, GPIOIn);
  2419. if (!(pins & mask)) {
  2420. ++handled;
  2421. qd->t_insert = get_jiffies_64();
  2422. queue_work(ib_wq, &qd->work);
  2423. }
  2424. }
  2425. }
  2426. if (gpiostatus && !handled) {
  2427. const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
  2428. u32 gpio_irq = mask & gpiostatus;
  2429. /*
  2430. * Clear any troublemakers, and update chip from shadow
  2431. */
  2432. dd->cspec->gpio_mask &= ~gpio_irq;
  2433. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  2434. }
  2435. }
  2436. /*
  2437. * Handle errors and unusual events first, separate function
  2438. * to improve cache hits for fast path interrupt handling.
  2439. */
  2440. static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
  2441. {
  2442. if (istat & ~QIB_I_BITSEXTANT)
  2443. unknown_7322_ibits(dd, istat);
  2444. if (istat & QIB_I_GPIO)
  2445. unknown_7322_gpio_intr(dd);
  2446. if (istat & QIB_I_C_ERROR) {
  2447. qib_write_kreg(dd, kr_errmask, 0ULL);
  2448. tasklet_schedule(&dd->error_tasklet);
  2449. }
  2450. if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
  2451. handle_7322_p_errors(dd->rcd[0]->ppd);
  2452. if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
  2453. handle_7322_p_errors(dd->rcd[1]->ppd);
  2454. }
  2455. /*
  2456. * Dynamically adjust the rcv int timeout for a context based on incoming
  2457. * packet rate.
  2458. */
  2459. static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
  2460. {
  2461. struct qib_devdata *dd = rcd->dd;
  2462. u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
  2463. /*
  2464. * Dynamically adjust idle timeout on chip
  2465. * based on number of packets processed.
  2466. */
  2467. if (npkts < rcv_int_count && timeout > 2)
  2468. timeout >>= 1;
  2469. else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
  2470. timeout = min(timeout << 1, rcv_int_timeout);
  2471. else
  2472. return;
  2473. dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
  2474. qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
  2475. }
  2476. /*
  2477. * This is the main interrupt handler.
  2478. * It will normally only be used for low frequency interrupts but may
  2479. * have to handle all interrupts if INTx is enabled or fewer than normal
  2480. * MSIx interrupts were allocated.
  2481. * This routine should ignore the interrupt bits for any of the
  2482. * dedicated MSIx handlers.
  2483. */
  2484. static irqreturn_t qib_7322intr(int irq, void *data)
  2485. {
  2486. struct qib_devdata *dd = data;
  2487. irqreturn_t ret;
  2488. u64 istat;
  2489. u64 ctxtrbits;
  2490. u64 rmask;
  2491. unsigned i;
  2492. u32 npkts;
  2493. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
  2494. /*
  2495. * This return value is not great, but we do not want the
  2496. * interrupt core code to remove our interrupt handler
  2497. * because we don't appear to be handling an interrupt
  2498. * during a chip reset.
  2499. */
  2500. ret = IRQ_HANDLED;
  2501. goto bail;
  2502. }
  2503. istat = qib_read_kreg64(dd, kr_intstatus);
  2504. if (unlikely(istat == ~0ULL)) {
  2505. qib_bad_intrstatus(dd);
  2506. qib_dev_err(dd, "Interrupt status all f's, skipping\n");
  2507. /* don't know if it was our interrupt or not */
  2508. ret = IRQ_NONE;
  2509. goto bail;
  2510. }
  2511. istat &= dd->cspec->main_int_mask;
  2512. if (unlikely(!istat)) {
  2513. /* already handled, or shared and not us */
  2514. ret = IRQ_NONE;
  2515. goto bail;
  2516. }
  2517. qib_stats.sps_ints++;
  2518. if (dd->int_counter != (u32) -1)
  2519. dd->int_counter++;
  2520. /* handle "errors" of various kinds first, device ahead of port */
  2521. if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
  2522. QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
  2523. INT_MASK_P(Err, 1))))
  2524. unlikely_7322_intr(dd, istat);
  2525. /*
  2526. * Clear the interrupt bits we found set, relatively early, so we
  2527. * "know" know the chip will have seen this by the time we process
  2528. * the queue, and will re-interrupt if necessary. The processor
  2529. * itself won't take the interrupt again until we return.
  2530. */
  2531. qib_write_kreg(dd, kr_intclear, istat);
  2532. /*
  2533. * Handle kernel receive queues before checking for pio buffers
  2534. * available since receives can overflow; piobuf waiters can afford
  2535. * a few extra cycles, since they were waiting anyway.
  2536. */
  2537. ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
  2538. if (ctxtrbits) {
  2539. rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
  2540. (1ULL << QIB_I_RCVURG_LSB);
  2541. for (i = 0; i < dd->first_user_ctxt; i++) {
  2542. if (ctxtrbits & rmask) {
  2543. ctxtrbits &= ~rmask;
  2544. if (dd->rcd[i]) {
  2545. qib_kreceive(dd->rcd[i], NULL, &npkts);
  2546. }
  2547. }
  2548. rmask <<= 1;
  2549. }
  2550. if (ctxtrbits) {
  2551. ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
  2552. (ctxtrbits >> QIB_I_RCVURG_LSB);
  2553. qib_handle_urcv(dd, ctxtrbits);
  2554. }
  2555. }
  2556. if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
  2557. sdma_7322_intr(dd, istat);
  2558. if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
  2559. qib_ib_piobufavail(dd);
  2560. ret = IRQ_HANDLED;
  2561. bail:
  2562. return ret;
  2563. }
  2564. /*
  2565. * Dedicated receive packet available interrupt handler.
  2566. */
  2567. static irqreturn_t qib_7322pintr(int irq, void *data)
  2568. {
  2569. struct qib_ctxtdata *rcd = data;
  2570. struct qib_devdata *dd = rcd->dd;
  2571. u32 npkts;
  2572. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2573. /*
  2574. * This return value is not great, but we do not want the
  2575. * interrupt core code to remove our interrupt handler
  2576. * because we don't appear to be handling an interrupt
  2577. * during a chip reset.
  2578. */
  2579. return IRQ_HANDLED;
  2580. qib_stats.sps_ints++;
  2581. if (dd->int_counter != (u32) -1)
  2582. dd->int_counter++;
  2583. /* Clear the interrupt bit we expect to be set. */
  2584. qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
  2585. (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
  2586. qib_kreceive(rcd, NULL, &npkts);
  2587. return IRQ_HANDLED;
  2588. }
  2589. /*
  2590. * Dedicated Send buffer available interrupt handler.
  2591. */
  2592. static irqreturn_t qib_7322bufavail(int irq, void *data)
  2593. {
  2594. struct qib_devdata *dd = data;
  2595. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2596. /*
  2597. * This return value is not great, but we do not want the
  2598. * interrupt core code to remove our interrupt handler
  2599. * because we don't appear to be handling an interrupt
  2600. * during a chip reset.
  2601. */
  2602. return IRQ_HANDLED;
  2603. qib_stats.sps_ints++;
  2604. if (dd->int_counter != (u32) -1)
  2605. dd->int_counter++;
  2606. /* Clear the interrupt bit we expect to be set. */
  2607. qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
  2608. /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
  2609. if (dd->flags & QIB_INITTED)
  2610. qib_ib_piobufavail(dd);
  2611. else
  2612. qib_wantpiobuf_7322_intr(dd, 0);
  2613. return IRQ_HANDLED;
  2614. }
  2615. /*
  2616. * Dedicated Send DMA interrupt handler.
  2617. */
  2618. static irqreturn_t sdma_intr(int irq, void *data)
  2619. {
  2620. struct qib_pportdata *ppd = data;
  2621. struct qib_devdata *dd = ppd->dd;
  2622. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2623. /*
  2624. * This return value is not great, but we do not want the
  2625. * interrupt core code to remove our interrupt handler
  2626. * because we don't appear to be handling an interrupt
  2627. * during a chip reset.
  2628. */
  2629. return IRQ_HANDLED;
  2630. qib_stats.sps_ints++;
  2631. if (dd->int_counter != (u32) -1)
  2632. dd->int_counter++;
  2633. /* Clear the interrupt bit we expect to be set. */
  2634. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2635. INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
  2636. qib_sdma_intr(ppd);
  2637. return IRQ_HANDLED;
  2638. }
  2639. /*
  2640. * Dedicated Send DMA idle interrupt handler.
  2641. */
  2642. static irqreturn_t sdma_idle_intr(int irq, void *data)
  2643. {
  2644. struct qib_pportdata *ppd = data;
  2645. struct qib_devdata *dd = ppd->dd;
  2646. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2647. /*
  2648. * This return value is not great, but we do not want the
  2649. * interrupt core code to remove our interrupt handler
  2650. * because we don't appear to be handling an interrupt
  2651. * during a chip reset.
  2652. */
  2653. return IRQ_HANDLED;
  2654. qib_stats.sps_ints++;
  2655. if (dd->int_counter != (u32) -1)
  2656. dd->int_counter++;
  2657. /* Clear the interrupt bit we expect to be set. */
  2658. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2659. INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
  2660. qib_sdma_intr(ppd);
  2661. return IRQ_HANDLED;
  2662. }
  2663. /*
  2664. * Dedicated Send DMA progress interrupt handler.
  2665. */
  2666. static irqreturn_t sdma_progress_intr(int irq, void *data)
  2667. {
  2668. struct qib_pportdata *ppd = data;
  2669. struct qib_devdata *dd = ppd->dd;
  2670. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2671. /*
  2672. * This return value is not great, but we do not want the
  2673. * interrupt core code to remove our interrupt handler
  2674. * because we don't appear to be handling an interrupt
  2675. * during a chip reset.
  2676. */
  2677. return IRQ_HANDLED;
  2678. qib_stats.sps_ints++;
  2679. if (dd->int_counter != (u32) -1)
  2680. dd->int_counter++;
  2681. /* Clear the interrupt bit we expect to be set. */
  2682. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2683. INT_MASK_P(SDmaProgress, 1) :
  2684. INT_MASK_P(SDmaProgress, 0));
  2685. qib_sdma_intr(ppd);
  2686. return IRQ_HANDLED;
  2687. }
  2688. /*
  2689. * Dedicated Send DMA cleanup interrupt handler.
  2690. */
  2691. static irqreturn_t sdma_cleanup_intr(int irq, void *data)
  2692. {
  2693. struct qib_pportdata *ppd = data;
  2694. struct qib_devdata *dd = ppd->dd;
  2695. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2696. /*
  2697. * This return value is not great, but we do not want the
  2698. * interrupt core code to remove our interrupt handler
  2699. * because we don't appear to be handling an interrupt
  2700. * during a chip reset.
  2701. */
  2702. return IRQ_HANDLED;
  2703. qib_stats.sps_ints++;
  2704. if (dd->int_counter != (u32) -1)
  2705. dd->int_counter++;
  2706. /* Clear the interrupt bit we expect to be set. */
  2707. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2708. INT_MASK_PM(SDmaCleanupDone, 1) :
  2709. INT_MASK_PM(SDmaCleanupDone, 0));
  2710. qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
  2711. return IRQ_HANDLED;
  2712. }
  2713. /*
  2714. * Set up our chip-specific interrupt handler.
  2715. * The interrupt type has already been setup, so
  2716. * we just need to do the registration and error checking.
  2717. * If we are using MSIx interrupts, we may fall back to
  2718. * INTx later, if the interrupt handler doesn't get called
  2719. * within 1/2 second (see verify_interrupt()).
  2720. */
  2721. static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
  2722. {
  2723. int ret, i, msixnum;
  2724. u64 redirect[6];
  2725. u64 mask;
  2726. if (!dd->num_pports)
  2727. return;
  2728. if (clearpend) {
  2729. /*
  2730. * if not switching interrupt types, be sure interrupts are
  2731. * disabled, and then clear anything pending at this point,
  2732. * because we are starting clean.
  2733. */
  2734. qib_7322_set_intr_state(dd, 0);
  2735. /* clear the reset error, init error/hwerror mask */
  2736. qib_7322_init_hwerrors(dd);
  2737. /* clear any interrupt bits that might be set */
  2738. qib_write_kreg(dd, kr_intclear, ~0ULL);
  2739. /* make sure no pending MSIx intr, and clear diag reg */
  2740. qib_write_kreg(dd, kr_intgranted, ~0ULL);
  2741. qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
  2742. }
  2743. if (!dd->cspec->num_msix_entries) {
  2744. /* Try to get INTx interrupt */
  2745. try_intx:
  2746. if (!dd->pcidev->irq) {
  2747. qib_dev_err(dd, "irq is 0, BIOS error? "
  2748. "Interrupts won't work\n");
  2749. goto bail;
  2750. }
  2751. ret = request_irq(dd->pcidev->irq, qib_7322intr,
  2752. IRQF_SHARED, QIB_DRV_NAME, dd);
  2753. if (ret) {
  2754. qib_dev_err(dd, "Couldn't setup INTx "
  2755. "interrupt (irq=%d): %d\n",
  2756. dd->pcidev->irq, ret);
  2757. goto bail;
  2758. }
  2759. dd->cspec->irq = dd->pcidev->irq;
  2760. dd->cspec->main_int_mask = ~0ULL;
  2761. goto bail;
  2762. }
  2763. /* Try to get MSIx interrupts */
  2764. memset(redirect, 0, sizeof redirect);
  2765. mask = ~0ULL;
  2766. msixnum = 0;
  2767. for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
  2768. irq_handler_t handler;
  2769. const char *name;
  2770. void *arg;
  2771. u64 val;
  2772. int lsb, reg, sh;
  2773. if (i < ARRAY_SIZE(irq_table)) {
  2774. if (irq_table[i].port) {
  2775. /* skip if for a non-configured port */
  2776. if (irq_table[i].port > dd->num_pports)
  2777. continue;
  2778. arg = dd->pport + irq_table[i].port - 1;
  2779. } else
  2780. arg = dd;
  2781. lsb = irq_table[i].lsb;
  2782. handler = irq_table[i].handler;
  2783. name = irq_table[i].name;
  2784. } else {
  2785. unsigned ctxt;
  2786. ctxt = i - ARRAY_SIZE(irq_table);
  2787. /* per krcvq context receive interrupt */
  2788. arg = dd->rcd[ctxt];
  2789. if (!arg)
  2790. continue;
  2791. if (qib_krcvq01_no_msi && ctxt < 2)
  2792. continue;
  2793. lsb = QIB_I_RCVAVAIL_LSB + ctxt;
  2794. handler = qib_7322pintr;
  2795. name = QIB_DRV_NAME " (kctx)";
  2796. }
  2797. ret = request_irq(dd->cspec->msix_entries[msixnum].vector,
  2798. handler, 0, name, arg);
  2799. if (ret) {
  2800. /*
  2801. * Shouldn't happen since the enable said we could
  2802. * have as many as we are trying to setup here.
  2803. */
  2804. qib_dev_err(dd, "Couldn't setup MSIx "
  2805. "interrupt (vec=%d, irq=%d): %d\n", msixnum,
  2806. dd->cspec->msix_entries[msixnum].vector,
  2807. ret);
  2808. qib_7322_nomsix(dd);
  2809. goto try_intx;
  2810. }
  2811. dd->cspec->msix_arg[msixnum] = arg;
  2812. if (lsb >= 0) {
  2813. reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
  2814. sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
  2815. SYM_LSB(IntRedirect0, vec1);
  2816. mask &= ~(1ULL << lsb);
  2817. redirect[reg] |= ((u64) msixnum) << sh;
  2818. }
  2819. val = qib_read_kreg64(dd, 2 * msixnum + 1 +
  2820. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  2821. msixnum++;
  2822. }
  2823. /* Initialize the vector mapping */
  2824. for (i = 0; i < ARRAY_SIZE(redirect); i++)
  2825. qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
  2826. dd->cspec->main_int_mask = mask;
  2827. tasklet_init(&dd->error_tasklet, qib_error_tasklet,
  2828. (unsigned long)dd);
  2829. bail:;
  2830. }
  2831. /**
  2832. * qib_7322_boardname - fill in the board name and note features
  2833. * @dd: the qlogic_ib device
  2834. *
  2835. * info will be based on the board revision register
  2836. */
  2837. static unsigned qib_7322_boardname(struct qib_devdata *dd)
  2838. {
  2839. /* Will need enumeration of board-types here */
  2840. char *n;
  2841. u32 boardid, namelen;
  2842. unsigned features = DUAL_PORT_CAP;
  2843. boardid = SYM_FIELD(dd->revision, Revision, BoardID);
  2844. switch (boardid) {
  2845. case 0:
  2846. n = "InfiniPath_QLE7342_Emulation";
  2847. break;
  2848. case 1:
  2849. n = "InfiniPath_QLE7340";
  2850. dd->flags |= QIB_HAS_QSFP;
  2851. features = PORT_SPD_CAP;
  2852. break;
  2853. case 2:
  2854. n = "InfiniPath_QLE7342";
  2855. dd->flags |= QIB_HAS_QSFP;
  2856. break;
  2857. case 3:
  2858. n = "InfiniPath_QMI7342";
  2859. break;
  2860. case 4:
  2861. n = "InfiniPath_Unsupported7342";
  2862. qib_dev_err(dd, "Unsupported version of QMH7342\n");
  2863. features = 0;
  2864. break;
  2865. case BOARD_QMH7342:
  2866. n = "InfiniPath_QMH7342";
  2867. features = 0x24;
  2868. break;
  2869. case BOARD_QME7342:
  2870. n = "InfiniPath_QME7342";
  2871. break;
  2872. case 8:
  2873. n = "InfiniPath_QME7362";
  2874. dd->flags |= QIB_HAS_QSFP;
  2875. break;
  2876. case 15:
  2877. n = "InfiniPath_QLE7342_TEST";
  2878. dd->flags |= QIB_HAS_QSFP;
  2879. break;
  2880. default:
  2881. n = "InfiniPath_QLE73xy_UNKNOWN";
  2882. qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
  2883. break;
  2884. }
  2885. dd->board_atten = 1; /* index into txdds_Xdr */
  2886. namelen = strlen(n) + 1;
  2887. dd->boardname = kmalloc(namelen, GFP_KERNEL);
  2888. if (!dd->boardname)
  2889. qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
  2890. else
  2891. snprintf(dd->boardname, namelen, "%s", n);
  2892. snprintf(dd->boardversion, sizeof(dd->boardversion),
  2893. "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
  2894. QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
  2895. (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
  2896. dd->majrev, dd->minrev,
  2897. (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
  2898. if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
  2899. qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode"
  2900. " by module parameter\n", dd->unit);
  2901. features &= PORT_SPD_CAP;
  2902. }
  2903. return features;
  2904. }
  2905. /*
  2906. * This routine sleeps, so it can only be called from user context, not
  2907. * from interrupt context.
  2908. */
  2909. static int qib_do_7322_reset(struct qib_devdata *dd)
  2910. {
  2911. u64 val;
  2912. u64 *msix_vecsave;
  2913. int i, msix_entries, ret = 1;
  2914. u16 cmdval;
  2915. u8 int_line, clinesz;
  2916. unsigned long flags;
  2917. /* Use dev_err so it shows up in logs, etc. */
  2918. qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
  2919. qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
  2920. msix_entries = dd->cspec->num_msix_entries;
  2921. /* no interrupts till re-initted */
  2922. qib_7322_set_intr_state(dd, 0);
  2923. if (msix_entries) {
  2924. qib_7322_nomsix(dd);
  2925. /* can be up to 512 bytes, too big for stack */
  2926. msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
  2927. sizeof(u64), GFP_KERNEL);
  2928. if (!msix_vecsave)
  2929. qib_dev_err(dd, "No mem to save MSIx data\n");
  2930. } else
  2931. msix_vecsave = NULL;
  2932. /*
  2933. * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
  2934. * info that is set up by the BIOS, so we have to save and restore
  2935. * it ourselves. There is some risk something could change it,
  2936. * after we save it, but since we have disabled the MSIx, it
  2937. * shouldn't be touched...
  2938. */
  2939. for (i = 0; i < msix_entries; i++) {
  2940. u64 vecaddr, vecdata;
  2941. vecaddr = qib_read_kreg64(dd, 2 * i +
  2942. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  2943. vecdata = qib_read_kreg64(dd, 1 + 2 * i +
  2944. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  2945. if (msix_vecsave) {
  2946. msix_vecsave[2 * i] = vecaddr;
  2947. /* save it without the masked bit set */
  2948. msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
  2949. }
  2950. }
  2951. dd->pport->cpspec->ibdeltainprog = 0;
  2952. dd->pport->cpspec->ibsymdelta = 0;
  2953. dd->pport->cpspec->iblnkerrdelta = 0;
  2954. dd->pport->cpspec->ibmalfdelta = 0;
  2955. dd->int_counter = 0; /* so we check interrupts work again */
  2956. /*
  2957. * Keep chip from being accessed until we are ready. Use
  2958. * writeq() directly, to allow the write even though QIB_PRESENT
  2959. * isn't set.
  2960. */
  2961. dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
  2962. dd->flags |= QIB_DOING_RESET;
  2963. val = dd->control | QLOGIC_IB_C_RESET;
  2964. writeq(val, &dd->kregbase[kr_control]);
  2965. for (i = 1; i <= 5; i++) {
  2966. /*
  2967. * Allow MBIST, etc. to complete; longer on each retry.
  2968. * We sometimes get machine checks from bus timeout if no
  2969. * response, so for now, make it *really* long.
  2970. */
  2971. msleep(1000 + (1 + i) * 3000);
  2972. qib_pcie_reenable(dd, cmdval, int_line, clinesz);
  2973. /*
  2974. * Use readq directly, so we don't need to mark it as PRESENT
  2975. * until we get a successful indication that all is well.
  2976. */
  2977. val = readq(&dd->kregbase[kr_revision]);
  2978. if (val == dd->revision)
  2979. break;
  2980. if (i == 5) {
  2981. qib_dev_err(dd, "Failed to initialize after reset, "
  2982. "unusable\n");
  2983. ret = 0;
  2984. goto bail;
  2985. }
  2986. }
  2987. dd->flags |= QIB_PRESENT; /* it's back */
  2988. if (msix_entries) {
  2989. /* restore the MSIx vector address and data if saved above */
  2990. for (i = 0; i < msix_entries; i++) {
  2991. dd->cspec->msix_entries[i].entry = i;
  2992. if (!msix_vecsave || !msix_vecsave[2 * i])
  2993. continue;
  2994. qib_write_kreg(dd, 2 * i +
  2995. (QIB_7322_MsixTable_OFFS / sizeof(u64)),
  2996. msix_vecsave[2 * i]);
  2997. qib_write_kreg(dd, 1 + 2 * i +
  2998. (QIB_7322_MsixTable_OFFS / sizeof(u64)),
  2999. msix_vecsave[1 + 2 * i]);
  3000. }
  3001. }
  3002. /* initialize the remaining registers. */
  3003. for (i = 0; i < dd->num_pports; ++i)
  3004. write_7322_init_portregs(&dd->pport[i]);
  3005. write_7322_initregs(dd);
  3006. if (qib_pcie_params(dd, dd->lbus_width,
  3007. &dd->cspec->num_msix_entries,
  3008. dd->cspec->msix_entries))
  3009. qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; "
  3010. "continuing anyway\n");
  3011. qib_setup_7322_interrupt(dd, 1);
  3012. for (i = 0; i < dd->num_pports; ++i) {
  3013. struct qib_pportdata *ppd = &dd->pport[i];
  3014. spin_lock_irqsave(&ppd->lflags_lock, flags);
  3015. ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
  3016. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  3017. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  3018. }
  3019. bail:
  3020. dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
  3021. kfree(msix_vecsave);
  3022. return ret;
  3023. }
  3024. /**
  3025. * qib_7322_put_tid - write a TID to the chip
  3026. * @dd: the qlogic_ib device
  3027. * @tidptr: pointer to the expected TID (in chip) to update
  3028. * @tidtype: 0 for eager, 1 for expected
  3029. * @pa: physical address of in memory buffer; tidinvalid if freeing
  3030. */
  3031. static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
  3032. u32 type, unsigned long pa)
  3033. {
  3034. if (!(dd->flags & QIB_PRESENT))
  3035. return;
  3036. if (pa != dd->tidinvalid) {
  3037. u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
  3038. /* paranoia checks */
  3039. if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
  3040. qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
  3041. pa);
  3042. return;
  3043. }
  3044. if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
  3045. qib_dev_err(dd, "Physical page address 0x%lx "
  3046. "larger than supported\n", pa);
  3047. return;
  3048. }
  3049. if (type == RCVHQ_RCV_TYPE_EAGER)
  3050. chippa |= dd->tidtemplate;
  3051. else /* for now, always full 4KB page */
  3052. chippa |= IBA7322_TID_SZ_4K;
  3053. pa = chippa;
  3054. }
  3055. writeq(pa, tidptr);
  3056. mmiowb();
  3057. }
  3058. /**
  3059. * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
  3060. * @dd: the qlogic_ib device
  3061. * @ctxt: the ctxt
  3062. *
  3063. * clear all TID entries for a ctxt, expected and eager.
  3064. * Used from qib_close().
  3065. */
  3066. static void qib_7322_clear_tids(struct qib_devdata *dd,
  3067. struct qib_ctxtdata *rcd)
  3068. {
  3069. u64 __iomem *tidbase;
  3070. unsigned long tidinv;
  3071. u32 ctxt;
  3072. int i;
  3073. if (!dd->kregbase || !rcd)
  3074. return;
  3075. ctxt = rcd->ctxt;
  3076. tidinv = dd->tidinvalid;
  3077. tidbase = (u64 __iomem *)
  3078. ((char __iomem *) dd->kregbase +
  3079. dd->rcvtidbase +
  3080. ctxt * dd->rcvtidcnt * sizeof(*tidbase));
  3081. for (i = 0; i < dd->rcvtidcnt; i++)
  3082. qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
  3083. tidinv);
  3084. tidbase = (u64 __iomem *)
  3085. ((char __iomem *) dd->kregbase +
  3086. dd->rcvegrbase +
  3087. rcd->rcvegr_tid_base * sizeof(*tidbase));
  3088. for (i = 0; i < rcd->rcvegrcnt; i++)
  3089. qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
  3090. tidinv);
  3091. }
  3092. /**
  3093. * qib_7322_tidtemplate - setup constants for TID updates
  3094. * @dd: the qlogic_ib device
  3095. *
  3096. * We setup stuff that we use a lot, to avoid calculating each time
  3097. */
  3098. static void qib_7322_tidtemplate(struct qib_devdata *dd)
  3099. {
  3100. /*
  3101. * For now, we always allocate 4KB buffers (at init) so we can
  3102. * receive max size packets. We may want a module parameter to
  3103. * specify 2KB or 4KB and/or make it per port instead of per device
  3104. * for those who want to reduce memory footprint. Note that the
  3105. * rcvhdrentsize size must be large enough to hold the largest
  3106. * IB header (currently 96 bytes) that we expect to handle (plus of
  3107. * course the 2 dwords of RHF).
  3108. */
  3109. if (dd->rcvegrbufsize == 2048)
  3110. dd->tidtemplate = IBA7322_TID_SZ_2K;
  3111. else if (dd->rcvegrbufsize == 4096)
  3112. dd->tidtemplate = IBA7322_TID_SZ_4K;
  3113. dd->tidinvalid = 0;
  3114. }
  3115. /**
  3116. * qib_init_7322_get_base_info - set chip-specific flags for user code
  3117. * @rcd: the qlogic_ib ctxt
  3118. * @kbase: qib_base_info pointer
  3119. *
  3120. * We set the PCIE flag because the lower bandwidth on PCIe vs
  3121. * HyperTransport can affect some user packet algorithims.
  3122. */
  3123. static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
  3124. struct qib_base_info *kinfo)
  3125. {
  3126. kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
  3127. QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
  3128. QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
  3129. if (rcd->dd->cspec->r1)
  3130. kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
  3131. if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
  3132. kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
  3133. return 0;
  3134. }
  3135. static struct qib_message_header *
  3136. qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
  3137. {
  3138. u32 offset = qib_hdrget_offset(rhf_addr);
  3139. return (struct qib_message_header *)
  3140. (rhf_addr - dd->rhf_offset + offset);
  3141. }
  3142. /*
  3143. * Configure number of contexts.
  3144. */
  3145. static void qib_7322_config_ctxts(struct qib_devdata *dd)
  3146. {
  3147. unsigned long flags;
  3148. u32 nchipctxts;
  3149. nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
  3150. dd->cspec->numctxts = nchipctxts;
  3151. if (qib_n_krcv_queues > 1 && dd->num_pports) {
  3152. dd->first_user_ctxt = NUM_IB_PORTS +
  3153. (qib_n_krcv_queues - 1) * dd->num_pports;
  3154. if (dd->first_user_ctxt > nchipctxts)
  3155. dd->first_user_ctxt = nchipctxts;
  3156. dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
  3157. } else {
  3158. dd->first_user_ctxt = NUM_IB_PORTS;
  3159. dd->n_krcv_queues = 1;
  3160. }
  3161. if (!qib_cfgctxts) {
  3162. int nctxts = dd->first_user_ctxt + num_online_cpus();
  3163. if (nctxts <= 6)
  3164. dd->ctxtcnt = 6;
  3165. else if (nctxts <= 10)
  3166. dd->ctxtcnt = 10;
  3167. else if (nctxts <= nchipctxts)
  3168. dd->ctxtcnt = nchipctxts;
  3169. } else if (qib_cfgctxts < dd->num_pports)
  3170. dd->ctxtcnt = dd->num_pports;
  3171. else if (qib_cfgctxts <= nchipctxts)
  3172. dd->ctxtcnt = qib_cfgctxts;
  3173. if (!dd->ctxtcnt) /* none of the above, set to max */
  3174. dd->ctxtcnt = nchipctxts;
  3175. /*
  3176. * Chip can be configured for 6, 10, or 18 ctxts, and choice
  3177. * affects number of eager TIDs per ctxt (1K, 2K, 4K).
  3178. * Lock to be paranoid about later motion, etc.
  3179. */
  3180. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  3181. if (dd->ctxtcnt > 10)
  3182. dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
  3183. else if (dd->ctxtcnt > 6)
  3184. dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
  3185. /* else configure for default 6 receive ctxts */
  3186. /* The XRC opcode is 5. */
  3187. dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
  3188. /*
  3189. * RcvCtrl *must* be written here so that the
  3190. * chip understands how to change rcvegrcnt below.
  3191. */
  3192. qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
  3193. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  3194. /* kr_rcvegrcnt changes based on the number of contexts enabled */
  3195. dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
  3196. if (qib_rcvhdrcnt)
  3197. dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
  3198. else
  3199. dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
  3200. dd->num_pports > 1 ? 1024U : 2048U);
  3201. }
  3202. static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
  3203. {
  3204. int lsb, ret = 0;
  3205. u64 maskr; /* right-justified mask */
  3206. switch (which) {
  3207. case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
  3208. ret = ppd->link_width_enabled;
  3209. goto done;
  3210. case QIB_IB_CFG_LWID: /* Get currently active Link-width */
  3211. ret = ppd->link_width_active;
  3212. goto done;
  3213. case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
  3214. ret = ppd->link_speed_enabled;
  3215. goto done;
  3216. case QIB_IB_CFG_SPD: /* Get current Link spd */
  3217. ret = ppd->link_speed_active;
  3218. goto done;
  3219. case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
  3220. lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3221. maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3222. break;
  3223. case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
  3224. lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3225. maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3226. break;
  3227. case QIB_IB_CFG_LINKLATENCY:
  3228. ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
  3229. SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
  3230. goto done;
  3231. case QIB_IB_CFG_OP_VLS:
  3232. ret = ppd->vls_operational;
  3233. goto done;
  3234. case QIB_IB_CFG_VL_HIGH_CAP:
  3235. ret = 16;
  3236. goto done;
  3237. case QIB_IB_CFG_VL_LOW_CAP:
  3238. ret = 16;
  3239. goto done;
  3240. case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  3241. ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3242. OverrunThreshold);
  3243. goto done;
  3244. case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  3245. ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3246. PhyerrThreshold);
  3247. goto done;
  3248. case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  3249. /* will only take effect when the link state changes */
  3250. ret = (ppd->cpspec->ibcctrl_a &
  3251. SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
  3252. IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
  3253. goto done;
  3254. case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
  3255. lsb = IBA7322_IBC_HRTBT_LSB;
  3256. maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
  3257. break;
  3258. case QIB_IB_CFG_PMA_TICKS:
  3259. /*
  3260. * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
  3261. * Since the clock is always 250MHz, the value is 3, 1 or 0.
  3262. */
  3263. if (ppd->link_speed_active == QIB_IB_QDR)
  3264. ret = 3;
  3265. else if (ppd->link_speed_active == QIB_IB_DDR)
  3266. ret = 1;
  3267. else
  3268. ret = 0;
  3269. goto done;
  3270. default:
  3271. ret = -EINVAL;
  3272. goto done;
  3273. }
  3274. ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
  3275. done:
  3276. return ret;
  3277. }
  3278. /*
  3279. * Below again cribbed liberally from older version. Do not lean
  3280. * heavily on it.
  3281. */
  3282. #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
  3283. #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
  3284. | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
  3285. static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
  3286. {
  3287. struct qib_devdata *dd = ppd->dd;
  3288. u64 maskr; /* right-justified mask */
  3289. int lsb, ret = 0;
  3290. u16 lcmd, licmd;
  3291. unsigned long flags;
  3292. switch (which) {
  3293. case QIB_IB_CFG_LIDLMC:
  3294. /*
  3295. * Set LID and LMC. Combined to avoid possible hazard
  3296. * caller puts LMC in 16MSbits, DLID in 16LSbits of val
  3297. */
  3298. lsb = IBA7322_IBC_DLIDLMC_SHIFT;
  3299. maskr = IBA7322_IBC_DLIDLMC_MASK;
  3300. /*
  3301. * For header-checking, the SLID in the packet will
  3302. * be masked with SendIBSLMCMask, and compared
  3303. * with SendIBSLIDAssignMask. Make sure we do not
  3304. * set any bits not covered by the mask, or we get
  3305. * false-positives.
  3306. */
  3307. qib_write_kreg_port(ppd, krp_sendslid,
  3308. val & (val >> 16) & SendIBSLIDAssignMask);
  3309. qib_write_kreg_port(ppd, krp_sendslidmask,
  3310. (val >> 16) & SendIBSLMCMask);
  3311. break;
  3312. case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
  3313. ppd->link_width_enabled = val;
  3314. /* convert IB value to chip register value */
  3315. if (val == IB_WIDTH_1X)
  3316. val = 0;
  3317. else if (val == IB_WIDTH_4X)
  3318. val = 1;
  3319. else
  3320. val = 3;
  3321. maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
  3322. lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
  3323. break;
  3324. case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
  3325. /*
  3326. * As with width, only write the actual register if the
  3327. * link is currently down, otherwise takes effect on next
  3328. * link change. Since setting is being explicitly requested
  3329. * (via MAD or sysfs), clear autoneg failure status if speed
  3330. * autoneg is enabled.
  3331. */
  3332. ppd->link_speed_enabled = val;
  3333. val <<= IBA7322_IBC_SPEED_LSB;
  3334. maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
  3335. IBA7322_IBC_MAX_SPEED_MASK;
  3336. if (val & (val - 1)) {
  3337. /* Muliple speeds enabled */
  3338. val |= IBA7322_IBC_IBTA_1_2_MASK |
  3339. IBA7322_IBC_MAX_SPEED_MASK;
  3340. spin_lock_irqsave(&ppd->lflags_lock, flags);
  3341. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  3342. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  3343. } else if (val & IBA7322_IBC_SPEED_QDR)
  3344. val |= IBA7322_IBC_IBTA_1_2_MASK;
  3345. /* IBTA 1.2 mode + min/max + speed bits are contiguous */
  3346. lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
  3347. break;
  3348. case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
  3349. lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3350. maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3351. break;
  3352. case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
  3353. lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3354. maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3355. break;
  3356. case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  3357. maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3358. OverrunThreshold);
  3359. if (maskr != val) {
  3360. ppd->cpspec->ibcctrl_a &=
  3361. ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
  3362. ppd->cpspec->ibcctrl_a |= (u64) val <<
  3363. SYM_LSB(IBCCtrlA_0, OverrunThreshold);
  3364. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3365. ppd->cpspec->ibcctrl_a);
  3366. qib_write_kreg(dd, kr_scratch, 0ULL);
  3367. }
  3368. goto bail;
  3369. case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  3370. maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3371. PhyerrThreshold);
  3372. if (maskr != val) {
  3373. ppd->cpspec->ibcctrl_a &=
  3374. ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
  3375. ppd->cpspec->ibcctrl_a |= (u64) val <<
  3376. SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
  3377. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3378. ppd->cpspec->ibcctrl_a);
  3379. qib_write_kreg(dd, kr_scratch, 0ULL);
  3380. }
  3381. goto bail;
  3382. case QIB_IB_CFG_PKEYS: /* update pkeys */
  3383. maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
  3384. ((u64) ppd->pkeys[2] << 32) |
  3385. ((u64) ppd->pkeys[3] << 48);
  3386. qib_write_kreg_port(ppd, krp_partitionkey, maskr);
  3387. goto bail;
  3388. case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  3389. /* will only take effect when the link state changes */
  3390. if (val == IB_LINKINITCMD_POLL)
  3391. ppd->cpspec->ibcctrl_a &=
  3392. ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
  3393. else /* SLEEP */
  3394. ppd->cpspec->ibcctrl_a |=
  3395. SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
  3396. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  3397. qib_write_kreg(dd, kr_scratch, 0ULL);
  3398. goto bail;
  3399. case QIB_IB_CFG_MTU: /* update the MTU in IBC */
  3400. /*
  3401. * Update our housekeeping variables, and set IBC max
  3402. * size, same as init code; max IBC is max we allow in
  3403. * buffer, less the qword pbc, plus 1 for ICRC, in dwords
  3404. * Set even if it's unchanged, print debug message only
  3405. * on changes.
  3406. */
  3407. val = (ppd->ibmaxlen >> 2) + 1;
  3408. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
  3409. ppd->cpspec->ibcctrl_a |= (u64)val <<
  3410. SYM_LSB(IBCCtrlA_0, MaxPktLen);
  3411. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3412. ppd->cpspec->ibcctrl_a);
  3413. qib_write_kreg(dd, kr_scratch, 0ULL);
  3414. goto bail;
  3415. case QIB_IB_CFG_LSTATE: /* set the IB link state */
  3416. switch (val & 0xffff0000) {
  3417. case IB_LINKCMD_DOWN:
  3418. lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
  3419. ppd->cpspec->ibmalfusesnap = 1;
  3420. ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
  3421. crp_errlink);
  3422. if (!ppd->cpspec->ibdeltainprog &&
  3423. qib_compat_ddr_negotiate) {
  3424. ppd->cpspec->ibdeltainprog = 1;
  3425. ppd->cpspec->ibsymsnap =
  3426. read_7322_creg32_port(ppd,
  3427. crp_ibsymbolerr);
  3428. ppd->cpspec->iblnkerrsnap =
  3429. read_7322_creg32_port(ppd,
  3430. crp_iblinkerrrecov);
  3431. }
  3432. break;
  3433. case IB_LINKCMD_ARMED:
  3434. lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
  3435. if (ppd->cpspec->ibmalfusesnap) {
  3436. ppd->cpspec->ibmalfusesnap = 0;
  3437. ppd->cpspec->ibmalfdelta +=
  3438. read_7322_creg32_port(ppd,
  3439. crp_errlink) -
  3440. ppd->cpspec->ibmalfsnap;
  3441. }
  3442. break;
  3443. case IB_LINKCMD_ACTIVE:
  3444. lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
  3445. break;
  3446. default:
  3447. ret = -EINVAL;
  3448. qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
  3449. goto bail;
  3450. }
  3451. switch (val & 0xffff) {
  3452. case IB_LINKINITCMD_NOP:
  3453. licmd = 0;
  3454. break;
  3455. case IB_LINKINITCMD_POLL:
  3456. licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
  3457. break;
  3458. case IB_LINKINITCMD_SLEEP:
  3459. licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
  3460. break;
  3461. case IB_LINKINITCMD_DISABLE:
  3462. licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
  3463. ppd->cpspec->chase_end = 0;
  3464. /*
  3465. * stop state chase counter and timer, if running.
  3466. * wait forpending timer, but don't clear .data (ppd)!
  3467. */
  3468. if (ppd->cpspec->chase_timer.expires) {
  3469. del_timer_sync(&ppd->cpspec->chase_timer);
  3470. ppd->cpspec->chase_timer.expires = 0;
  3471. }
  3472. break;
  3473. default:
  3474. ret = -EINVAL;
  3475. qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
  3476. val & 0xffff);
  3477. goto bail;
  3478. }
  3479. qib_set_ib_7322_lstate(ppd, lcmd, licmd);
  3480. goto bail;
  3481. case QIB_IB_CFG_OP_VLS:
  3482. if (ppd->vls_operational != val) {
  3483. ppd->vls_operational = val;
  3484. set_vls(ppd);
  3485. }
  3486. goto bail;
  3487. case QIB_IB_CFG_VL_HIGH_LIMIT:
  3488. qib_write_kreg_port(ppd, krp_highprio_limit, val);
  3489. goto bail;
  3490. case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
  3491. if (val > 3) {
  3492. ret = -EINVAL;
  3493. goto bail;
  3494. }
  3495. lsb = IBA7322_IBC_HRTBT_LSB;
  3496. maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
  3497. break;
  3498. case QIB_IB_CFG_PORT:
  3499. /* val is the port number of the switch we are connected to. */
  3500. if (ppd->dd->cspec->r1) {
  3501. cancel_delayed_work(&ppd->cpspec->ipg_work);
  3502. ppd->cpspec->ipg_tries = 0;
  3503. }
  3504. goto bail;
  3505. default:
  3506. ret = -EINVAL;
  3507. goto bail;
  3508. }
  3509. ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
  3510. ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
  3511. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  3512. qib_write_kreg(dd, kr_scratch, 0);
  3513. bail:
  3514. return ret;
  3515. }
  3516. static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
  3517. {
  3518. int ret = 0;
  3519. u64 val, ctrlb;
  3520. /* only IBC loopback, may add serdes and xgxs loopbacks later */
  3521. if (!strncmp(what, "ibc", 3)) {
  3522. ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
  3523. Loopback);
  3524. val = 0; /* disable heart beat, so link will come up */
  3525. qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
  3526. ppd->dd->unit, ppd->port);
  3527. } else if (!strncmp(what, "off", 3)) {
  3528. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
  3529. Loopback);
  3530. /* enable heart beat again */
  3531. val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
  3532. qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
  3533. "(normal)\n", ppd->dd->unit, ppd->port);
  3534. } else
  3535. ret = -EINVAL;
  3536. if (!ret) {
  3537. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3538. ppd->cpspec->ibcctrl_a);
  3539. ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
  3540. << IBA7322_IBC_HRTBT_LSB);
  3541. ppd->cpspec->ibcctrl_b = ctrlb | val;
  3542. qib_write_kreg_port(ppd, krp_ibcctrl_b,
  3543. ppd->cpspec->ibcctrl_b);
  3544. qib_write_kreg(ppd->dd, kr_scratch, 0);
  3545. }
  3546. return ret;
  3547. }
  3548. static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
  3549. struct ib_vl_weight_elem *vl)
  3550. {
  3551. unsigned i;
  3552. for (i = 0; i < 16; i++, regno++, vl++) {
  3553. u32 val = qib_read_kreg_port(ppd, regno);
  3554. vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
  3555. SYM_RMASK(LowPriority0_0, VirtualLane);
  3556. vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
  3557. SYM_RMASK(LowPriority0_0, Weight);
  3558. }
  3559. }
  3560. static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
  3561. struct ib_vl_weight_elem *vl)
  3562. {
  3563. unsigned i;
  3564. for (i = 0; i < 16; i++, regno++, vl++) {
  3565. u64 val;
  3566. val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
  3567. SYM_LSB(LowPriority0_0, VirtualLane)) |
  3568. ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
  3569. SYM_LSB(LowPriority0_0, Weight));
  3570. qib_write_kreg_port(ppd, regno, val);
  3571. }
  3572. if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
  3573. struct qib_devdata *dd = ppd->dd;
  3574. unsigned long flags;
  3575. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  3576. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
  3577. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  3578. qib_write_kreg(dd, kr_scratch, 0);
  3579. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  3580. }
  3581. }
  3582. static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
  3583. {
  3584. switch (which) {
  3585. case QIB_IB_TBL_VL_HIGH_ARB:
  3586. get_vl_weights(ppd, krp_highprio_0, t);
  3587. break;
  3588. case QIB_IB_TBL_VL_LOW_ARB:
  3589. get_vl_weights(ppd, krp_lowprio_0, t);
  3590. break;
  3591. default:
  3592. return -EINVAL;
  3593. }
  3594. return 0;
  3595. }
  3596. static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
  3597. {
  3598. switch (which) {
  3599. case QIB_IB_TBL_VL_HIGH_ARB:
  3600. set_vl_weights(ppd, krp_highprio_0, t);
  3601. break;
  3602. case QIB_IB_TBL_VL_LOW_ARB:
  3603. set_vl_weights(ppd, krp_lowprio_0, t);
  3604. break;
  3605. default:
  3606. return -EINVAL;
  3607. }
  3608. return 0;
  3609. }
  3610. static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
  3611. u32 updegr, u32 egrhd, u32 npkts)
  3612. {
  3613. /*
  3614. * Need to write timeout register before updating rcvhdrhead to ensure
  3615. * that the timer is enabled on reception of a packet.
  3616. */
  3617. if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
  3618. adjust_rcv_timeout(rcd, npkts);
  3619. qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
  3620. qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
  3621. if (updegr)
  3622. qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
  3623. }
  3624. static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
  3625. {
  3626. u32 head, tail;
  3627. head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
  3628. if (rcd->rcvhdrtail_kvaddr)
  3629. tail = qib_get_rcvhdrtail(rcd);
  3630. else
  3631. tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
  3632. return head == tail;
  3633. }
  3634. #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
  3635. QIB_RCVCTRL_CTXT_DIS | \
  3636. QIB_RCVCTRL_TIDFLOW_ENB | \
  3637. QIB_RCVCTRL_TIDFLOW_DIS | \
  3638. QIB_RCVCTRL_TAILUPD_ENB | \
  3639. QIB_RCVCTRL_TAILUPD_DIS | \
  3640. QIB_RCVCTRL_INTRAVAIL_ENB | \
  3641. QIB_RCVCTRL_INTRAVAIL_DIS | \
  3642. QIB_RCVCTRL_BP_ENB | \
  3643. QIB_RCVCTRL_BP_DIS)
  3644. #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
  3645. QIB_RCVCTRL_CTXT_DIS | \
  3646. QIB_RCVCTRL_PKEY_DIS | \
  3647. QIB_RCVCTRL_PKEY_ENB)
  3648. /*
  3649. * Modify the RCVCTRL register in chip-specific way. This
  3650. * is a function because bit positions and (future) register
  3651. * location is chip-specifc, but the needed operations are
  3652. * generic. <op> is a bit-mask because we often want to
  3653. * do multiple modifications.
  3654. */
  3655. static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
  3656. int ctxt)
  3657. {
  3658. struct qib_devdata *dd = ppd->dd;
  3659. struct qib_ctxtdata *rcd;
  3660. u64 mask, val;
  3661. unsigned long flags;
  3662. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  3663. if (op & QIB_RCVCTRL_TIDFLOW_ENB)
  3664. dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
  3665. if (op & QIB_RCVCTRL_TIDFLOW_DIS)
  3666. dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
  3667. if (op & QIB_RCVCTRL_TAILUPD_ENB)
  3668. dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
  3669. if (op & QIB_RCVCTRL_TAILUPD_DIS)
  3670. dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
  3671. if (op & QIB_RCVCTRL_PKEY_ENB)
  3672. ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
  3673. if (op & QIB_RCVCTRL_PKEY_DIS)
  3674. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
  3675. if (ctxt < 0) {
  3676. mask = (1ULL << dd->ctxtcnt) - 1;
  3677. rcd = NULL;
  3678. } else {
  3679. mask = (1ULL << ctxt);
  3680. rcd = dd->rcd[ctxt];
  3681. }
  3682. if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
  3683. ppd->p_rcvctrl |=
  3684. (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
  3685. if (!(dd->flags & QIB_NODMA_RTAIL)) {
  3686. op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
  3687. dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
  3688. }
  3689. /* Write these registers before the context is enabled. */
  3690. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
  3691. rcd->rcvhdrqtailaddr_phys);
  3692. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
  3693. rcd->rcvhdrq_phys);
  3694. rcd->seq_cnt = 1;
  3695. }
  3696. if (op & QIB_RCVCTRL_CTXT_DIS)
  3697. ppd->p_rcvctrl &=
  3698. ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
  3699. if (op & QIB_RCVCTRL_BP_ENB)
  3700. dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
  3701. if (op & QIB_RCVCTRL_BP_DIS)
  3702. dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
  3703. if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
  3704. dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
  3705. if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
  3706. dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
  3707. /*
  3708. * Decide which registers to write depending on the ops enabled.
  3709. * Special case is "flush" (no bits set at all)
  3710. * which needs to write both.
  3711. */
  3712. if (op == 0 || (op & RCVCTRL_COMMON_MODS))
  3713. qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
  3714. if (op == 0 || (op & RCVCTRL_PORT_MODS))
  3715. qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
  3716. if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
  3717. /*
  3718. * Init the context registers also; if we were
  3719. * disabled, tail and head should both be zero
  3720. * already from the enable, but since we don't
  3721. * know, we have to do it explicitly.
  3722. */
  3723. val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
  3724. qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
  3725. /* be sure enabling write seen; hd/tl should be 0 */
  3726. (void) qib_read_kreg32(dd, kr_scratch);
  3727. val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
  3728. dd->rcd[ctxt]->head = val;
  3729. /* If kctxt, interrupt on next receive. */
  3730. if (ctxt < dd->first_user_ctxt)
  3731. val |= dd->rhdrhead_intr_off;
  3732. qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
  3733. } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
  3734. dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
  3735. /* arm rcv interrupt */
  3736. val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
  3737. qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
  3738. }
  3739. if (op & QIB_RCVCTRL_CTXT_DIS) {
  3740. unsigned f;
  3741. /* Now that the context is disabled, clear these registers. */
  3742. if (ctxt >= 0) {
  3743. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
  3744. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
  3745. for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
  3746. qib_write_ureg(dd, ur_rcvflowtable + f,
  3747. TIDFLOW_ERRBITS, ctxt);
  3748. } else {
  3749. unsigned i;
  3750. for (i = 0; i < dd->cfgctxts; i++) {
  3751. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
  3752. i, 0);
  3753. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
  3754. for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
  3755. qib_write_ureg(dd, ur_rcvflowtable + f,
  3756. TIDFLOW_ERRBITS, i);
  3757. }
  3758. }
  3759. }
  3760. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  3761. }
  3762. /*
  3763. * Modify the SENDCTRL register in chip-specific way. This
  3764. * is a function where there are multiple such registers with
  3765. * slightly different layouts.
  3766. * The chip doesn't allow back-to-back sendctrl writes, so write
  3767. * the scratch register after writing sendctrl.
  3768. *
  3769. * Which register is written depends on the operation.
  3770. * Most operate on the common register, while
  3771. * SEND_ENB and SEND_DIS operate on the per-port ones.
  3772. * SEND_ENB is included in common because it can change SPCL_TRIG
  3773. */
  3774. #define SENDCTRL_COMMON_MODS (\
  3775. QIB_SENDCTRL_CLEAR | \
  3776. QIB_SENDCTRL_AVAIL_DIS | \
  3777. QIB_SENDCTRL_AVAIL_ENB | \
  3778. QIB_SENDCTRL_AVAIL_BLIP | \
  3779. QIB_SENDCTRL_DISARM | \
  3780. QIB_SENDCTRL_DISARM_ALL | \
  3781. QIB_SENDCTRL_SEND_ENB)
  3782. #define SENDCTRL_PORT_MODS (\
  3783. QIB_SENDCTRL_CLEAR | \
  3784. QIB_SENDCTRL_SEND_ENB | \
  3785. QIB_SENDCTRL_SEND_DIS | \
  3786. QIB_SENDCTRL_FLUSH)
  3787. static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
  3788. {
  3789. struct qib_devdata *dd = ppd->dd;
  3790. u64 tmp_dd_sendctrl;
  3791. unsigned long flags;
  3792. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  3793. /* First the dd ones that are "sticky", saved in shadow */
  3794. if (op & QIB_SENDCTRL_CLEAR)
  3795. dd->sendctrl = 0;
  3796. if (op & QIB_SENDCTRL_AVAIL_DIS)
  3797. dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  3798. else if (op & QIB_SENDCTRL_AVAIL_ENB) {
  3799. dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
  3800. if (dd->flags & QIB_USE_SPCL_TRIG)
  3801. dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
  3802. }
  3803. /* Then the ppd ones that are "sticky", saved in shadow */
  3804. if (op & QIB_SENDCTRL_SEND_DIS)
  3805. ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
  3806. else if (op & QIB_SENDCTRL_SEND_ENB)
  3807. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
  3808. if (op & QIB_SENDCTRL_DISARM_ALL) {
  3809. u32 i, last;
  3810. tmp_dd_sendctrl = dd->sendctrl;
  3811. last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  3812. /*
  3813. * Disarm any buffers that are not yet launched,
  3814. * disabling updates until done.
  3815. */
  3816. tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  3817. for (i = 0; i < last; i++) {
  3818. qib_write_kreg(dd, kr_sendctrl,
  3819. tmp_dd_sendctrl |
  3820. SYM_MASK(SendCtrl, Disarm) | i);
  3821. qib_write_kreg(dd, kr_scratch, 0);
  3822. }
  3823. }
  3824. if (op & QIB_SENDCTRL_FLUSH) {
  3825. u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
  3826. /*
  3827. * Now drain all the fifos. The Abort bit should never be
  3828. * needed, so for now, at least, we don't use it.
  3829. */
  3830. tmp_ppd_sendctrl |=
  3831. SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
  3832. SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
  3833. SYM_MASK(SendCtrl_0, TxeBypassIbc);
  3834. qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
  3835. qib_write_kreg(dd, kr_scratch, 0);
  3836. }
  3837. tmp_dd_sendctrl = dd->sendctrl;
  3838. if (op & QIB_SENDCTRL_DISARM)
  3839. tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
  3840. ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
  3841. SYM_LSB(SendCtrl, DisarmSendBuf));
  3842. if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
  3843. (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
  3844. tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  3845. if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
  3846. qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
  3847. qib_write_kreg(dd, kr_scratch, 0);
  3848. }
  3849. if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
  3850. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  3851. qib_write_kreg(dd, kr_scratch, 0);
  3852. }
  3853. if (op & QIB_SENDCTRL_AVAIL_BLIP) {
  3854. qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
  3855. qib_write_kreg(dd, kr_scratch, 0);
  3856. }
  3857. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  3858. if (op & QIB_SENDCTRL_FLUSH) {
  3859. u32 v;
  3860. /*
  3861. * ensure writes have hit chip, then do a few
  3862. * more reads, to allow DMA of pioavail registers
  3863. * to occur, so in-memory copy is in sync with
  3864. * the chip. Not always safe to sleep.
  3865. */
  3866. v = qib_read_kreg32(dd, kr_scratch);
  3867. qib_write_kreg(dd, kr_scratch, v);
  3868. v = qib_read_kreg32(dd, kr_scratch);
  3869. qib_write_kreg(dd, kr_scratch, v);
  3870. qib_read_kreg32(dd, kr_scratch);
  3871. }
  3872. }
  3873. #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
  3874. #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
  3875. #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
  3876. /**
  3877. * qib_portcntr_7322 - read a per-port chip counter
  3878. * @ppd: the qlogic_ib pport
  3879. * @creg: the counter to read (not a chip offset)
  3880. */
  3881. static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
  3882. {
  3883. struct qib_devdata *dd = ppd->dd;
  3884. u64 ret = 0ULL;
  3885. u16 creg;
  3886. /* 0xffff for unimplemented or synthesized counters */
  3887. static const u32 xlator[] = {
  3888. [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
  3889. [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
  3890. [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
  3891. [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
  3892. [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
  3893. [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
  3894. [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
  3895. [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
  3896. [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
  3897. [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
  3898. [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
  3899. [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
  3900. [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
  3901. [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
  3902. [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
  3903. [QIBPORTCNTR_ERRICRC] = crp_erricrc,
  3904. [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
  3905. [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
  3906. [QIBPORTCNTR_BADFORMAT] = crp_badformat,
  3907. [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
  3908. [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
  3909. [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
  3910. [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
  3911. [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
  3912. [QIBPORTCNTR_ERRLINK] = crp_errlink,
  3913. [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
  3914. [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
  3915. [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
  3916. [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
  3917. [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
  3918. /*
  3919. * the next 3 aren't really counters, but were implemented
  3920. * as counters in older chips, so still get accessed as
  3921. * though they were counters from this code.
  3922. */
  3923. [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
  3924. [QIBPORTCNTR_PSSTART] = krp_psstart,
  3925. [QIBPORTCNTR_PSSTAT] = krp_psstat,
  3926. /* pseudo-counter, summed for all ports */
  3927. [QIBPORTCNTR_KHDROVFL] = 0xffff,
  3928. };
  3929. if (reg >= ARRAY_SIZE(xlator)) {
  3930. qib_devinfo(ppd->dd->pcidev,
  3931. "Unimplemented portcounter %u\n", reg);
  3932. goto done;
  3933. }
  3934. creg = xlator[reg] & _PORT_CNTR_IDXMASK;
  3935. /* handle non-counters and special cases first */
  3936. if (reg == QIBPORTCNTR_KHDROVFL) {
  3937. int i;
  3938. /* sum over all kernel contexts (skip if mini_init) */
  3939. for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
  3940. struct qib_ctxtdata *rcd = dd->rcd[i];
  3941. if (!rcd || rcd->ppd != ppd)
  3942. continue;
  3943. ret += read_7322_creg32(dd, cr_base_egrovfl + i);
  3944. }
  3945. goto done;
  3946. } else if (reg == QIBPORTCNTR_RXDROPPKT) {
  3947. /*
  3948. * Used as part of the synthesis of port_rcv_errors
  3949. * in the verbs code for IBTA counters. Not needed for 7322,
  3950. * because all the errors are already counted by other cntrs.
  3951. */
  3952. goto done;
  3953. } else if (reg == QIBPORTCNTR_PSINTERVAL ||
  3954. reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
  3955. /* were counters in older chips, now per-port kernel regs */
  3956. ret = qib_read_kreg_port(ppd, creg);
  3957. goto done;
  3958. }
  3959. /*
  3960. * Only fast increment counters are 64 bits; use 32 bit reads to
  3961. * avoid two independent reads when on Opteron.
  3962. */
  3963. if (xlator[reg] & _PORT_64BIT_FLAG)
  3964. ret = read_7322_creg_port(ppd, creg);
  3965. else
  3966. ret = read_7322_creg32_port(ppd, creg);
  3967. if (creg == crp_ibsymbolerr) {
  3968. if (ppd->cpspec->ibdeltainprog)
  3969. ret -= ret - ppd->cpspec->ibsymsnap;
  3970. ret -= ppd->cpspec->ibsymdelta;
  3971. } else if (creg == crp_iblinkerrrecov) {
  3972. if (ppd->cpspec->ibdeltainprog)
  3973. ret -= ret - ppd->cpspec->iblnkerrsnap;
  3974. ret -= ppd->cpspec->iblnkerrdelta;
  3975. } else if (creg == crp_errlink)
  3976. ret -= ppd->cpspec->ibmalfdelta;
  3977. else if (creg == crp_iblinkdown)
  3978. ret += ppd->cpspec->iblnkdowndelta;
  3979. done:
  3980. return ret;
  3981. }
  3982. /*
  3983. * Device counter names (not port-specific), one line per stat,
  3984. * single string. Used by utilities like ipathstats to print the stats
  3985. * in a way which works for different versions of drivers, without changing
  3986. * the utility. Names need to be 12 chars or less (w/o newline), for proper
  3987. * display by utility.
  3988. * Non-error counters are first.
  3989. * Start of "error" conters is indicated by a leading "E " on the first
  3990. * "error" counter, and doesn't count in label length.
  3991. * The EgrOvfl list needs to be last so we truncate them at the configured
  3992. * context count for the device.
  3993. * cntr7322indices contains the corresponding register indices.
  3994. */
  3995. static const char cntr7322names[] =
  3996. "Interrupts\n"
  3997. "HostBusStall\n"
  3998. "E RxTIDFull\n"
  3999. "RxTIDInvalid\n"
  4000. "RxTIDFloDrop\n" /* 7322 only */
  4001. "Ctxt0EgrOvfl\n"
  4002. "Ctxt1EgrOvfl\n"
  4003. "Ctxt2EgrOvfl\n"
  4004. "Ctxt3EgrOvfl\n"
  4005. "Ctxt4EgrOvfl\n"
  4006. "Ctxt5EgrOvfl\n"
  4007. "Ctxt6EgrOvfl\n"
  4008. "Ctxt7EgrOvfl\n"
  4009. "Ctxt8EgrOvfl\n"
  4010. "Ctxt9EgrOvfl\n"
  4011. "Ctx10EgrOvfl\n"
  4012. "Ctx11EgrOvfl\n"
  4013. "Ctx12EgrOvfl\n"
  4014. "Ctx13EgrOvfl\n"
  4015. "Ctx14EgrOvfl\n"
  4016. "Ctx15EgrOvfl\n"
  4017. "Ctx16EgrOvfl\n"
  4018. "Ctx17EgrOvfl\n"
  4019. ;
  4020. static const u32 cntr7322indices[] = {
  4021. cr_lbint | _PORT_64BIT_FLAG,
  4022. cr_lbstall | _PORT_64BIT_FLAG,
  4023. cr_tidfull,
  4024. cr_tidinvalid,
  4025. cr_rxtidflowdrop,
  4026. cr_base_egrovfl + 0,
  4027. cr_base_egrovfl + 1,
  4028. cr_base_egrovfl + 2,
  4029. cr_base_egrovfl + 3,
  4030. cr_base_egrovfl + 4,
  4031. cr_base_egrovfl + 5,
  4032. cr_base_egrovfl + 6,
  4033. cr_base_egrovfl + 7,
  4034. cr_base_egrovfl + 8,
  4035. cr_base_egrovfl + 9,
  4036. cr_base_egrovfl + 10,
  4037. cr_base_egrovfl + 11,
  4038. cr_base_egrovfl + 12,
  4039. cr_base_egrovfl + 13,
  4040. cr_base_egrovfl + 14,
  4041. cr_base_egrovfl + 15,
  4042. cr_base_egrovfl + 16,
  4043. cr_base_egrovfl + 17,
  4044. };
  4045. /*
  4046. * same as cntr7322names and cntr7322indices, but for port-specific counters.
  4047. * portcntr7322indices is somewhat complicated by some registers needing
  4048. * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
  4049. */
  4050. static const char portcntr7322names[] =
  4051. "TxPkt\n"
  4052. "TxFlowPkt\n"
  4053. "TxWords\n"
  4054. "RxPkt\n"
  4055. "RxFlowPkt\n"
  4056. "RxWords\n"
  4057. "TxFlowStall\n"
  4058. "TxDmaDesc\n" /* 7220 and 7322-only */
  4059. "E RxDlidFltr\n" /* 7220 and 7322-only */
  4060. "IBStatusChng\n"
  4061. "IBLinkDown\n"
  4062. "IBLnkRecov\n"
  4063. "IBRxLinkErr\n"
  4064. "IBSymbolErr\n"
  4065. "RxLLIErr\n"
  4066. "RxBadFormat\n"
  4067. "RxBadLen\n"
  4068. "RxBufOvrfl\n"
  4069. "RxEBP\n"
  4070. "RxFlowCtlErr\n"
  4071. "RxICRCerr\n"
  4072. "RxLPCRCerr\n"
  4073. "RxVCRCerr\n"
  4074. "RxInvalLen\n"
  4075. "RxInvalPKey\n"
  4076. "RxPktDropped\n"
  4077. "TxBadLength\n"
  4078. "TxDropped\n"
  4079. "TxInvalLen\n"
  4080. "TxUnderrun\n"
  4081. "TxUnsupVL\n"
  4082. "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
  4083. "RxVL15Drop\n"
  4084. "RxVlErr\n"
  4085. "XcessBufOvfl\n"
  4086. "RxQPBadCtxt\n" /* 7322-only from here down */
  4087. "TXBadHeader\n"
  4088. ;
  4089. static const u32 portcntr7322indices[] = {
  4090. QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
  4091. crp_pktsendflow,
  4092. QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
  4093. QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
  4094. crp_pktrcvflowctrl,
  4095. QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
  4096. QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
  4097. crp_txsdmadesc | _PORT_64BIT_FLAG,
  4098. crp_rxdlidfltr,
  4099. crp_ibstatuschange,
  4100. QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
  4101. QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
  4102. QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
  4103. QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
  4104. QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
  4105. QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
  4106. QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
  4107. QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
  4108. QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
  4109. crp_rcvflowctrlviol,
  4110. QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
  4111. QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
  4112. QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
  4113. QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
  4114. QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
  4115. QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
  4116. crp_txminmaxlenerr,
  4117. crp_txdroppedpkt,
  4118. crp_txlenerr,
  4119. crp_txunderrun,
  4120. crp_txunsupvl,
  4121. QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
  4122. QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
  4123. QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
  4124. QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
  4125. crp_rxqpinvalidctxt,
  4126. crp_txhdrerr,
  4127. };
  4128. /* do all the setup to make the counter reads efficient later */
  4129. static void init_7322_cntrnames(struct qib_devdata *dd)
  4130. {
  4131. int i, j = 0;
  4132. char *s;
  4133. for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
  4134. i++) {
  4135. /* we always have at least one counter before the egrovfl */
  4136. if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
  4137. j = 1;
  4138. s = strchr(s + 1, '\n');
  4139. if (s && j)
  4140. j++;
  4141. }
  4142. dd->cspec->ncntrs = i;
  4143. if (!s)
  4144. /* full list; size is without terminating null */
  4145. dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
  4146. else
  4147. dd->cspec->cntrnamelen = 1 + s - cntr7322names;
  4148. dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
  4149. * sizeof(u64), GFP_KERNEL);
  4150. if (!dd->cspec->cntrs)
  4151. qib_dev_err(dd, "Failed allocation for counters\n");
  4152. for (i = 0, s = (char *)portcntr7322names; s; i++)
  4153. s = strchr(s + 1, '\n');
  4154. dd->cspec->nportcntrs = i - 1;
  4155. dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
  4156. for (i = 0; i < dd->num_pports; ++i) {
  4157. dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
  4158. * sizeof(u64), GFP_KERNEL);
  4159. if (!dd->pport[i].cpspec->portcntrs)
  4160. qib_dev_err(dd, "Failed allocation for"
  4161. " portcounters\n");
  4162. }
  4163. }
  4164. static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
  4165. u64 **cntrp)
  4166. {
  4167. u32 ret;
  4168. if (namep) {
  4169. ret = dd->cspec->cntrnamelen;
  4170. if (pos >= ret)
  4171. ret = 0; /* final read after getting everything */
  4172. else
  4173. *namep = (char *) cntr7322names;
  4174. } else {
  4175. u64 *cntr = dd->cspec->cntrs;
  4176. int i;
  4177. ret = dd->cspec->ncntrs * sizeof(u64);
  4178. if (!cntr || pos >= ret) {
  4179. /* everything read, or couldn't get memory */
  4180. ret = 0;
  4181. goto done;
  4182. }
  4183. *cntrp = cntr;
  4184. for (i = 0; i < dd->cspec->ncntrs; i++)
  4185. if (cntr7322indices[i] & _PORT_64BIT_FLAG)
  4186. *cntr++ = read_7322_creg(dd,
  4187. cntr7322indices[i] &
  4188. _PORT_CNTR_IDXMASK);
  4189. else
  4190. *cntr++ = read_7322_creg32(dd,
  4191. cntr7322indices[i]);
  4192. }
  4193. done:
  4194. return ret;
  4195. }
  4196. static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
  4197. char **namep, u64 **cntrp)
  4198. {
  4199. u32 ret;
  4200. if (namep) {
  4201. ret = dd->cspec->portcntrnamelen;
  4202. if (pos >= ret)
  4203. ret = 0; /* final read after getting everything */
  4204. else
  4205. *namep = (char *)portcntr7322names;
  4206. } else {
  4207. struct qib_pportdata *ppd = &dd->pport[port];
  4208. u64 *cntr = ppd->cpspec->portcntrs;
  4209. int i;
  4210. ret = dd->cspec->nportcntrs * sizeof(u64);
  4211. if (!cntr || pos >= ret) {
  4212. /* everything read, or couldn't get memory */
  4213. ret = 0;
  4214. goto done;
  4215. }
  4216. *cntrp = cntr;
  4217. for (i = 0; i < dd->cspec->nportcntrs; i++) {
  4218. if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
  4219. *cntr++ = qib_portcntr_7322(ppd,
  4220. portcntr7322indices[i] &
  4221. _PORT_CNTR_IDXMASK);
  4222. else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
  4223. *cntr++ = read_7322_creg_port(ppd,
  4224. portcntr7322indices[i] &
  4225. _PORT_CNTR_IDXMASK);
  4226. else
  4227. *cntr++ = read_7322_creg32_port(ppd,
  4228. portcntr7322indices[i]);
  4229. }
  4230. }
  4231. done:
  4232. return ret;
  4233. }
  4234. /**
  4235. * qib_get_7322_faststats - get word counters from chip before they overflow
  4236. * @opaque - contains a pointer to the qlogic_ib device qib_devdata
  4237. *
  4238. * VESTIGIAL IBA7322 has no "small fast counters", so the only
  4239. * real purpose of this function is to maintain the notion of
  4240. * "active time", which in turn is only logged into the eeprom,
  4241. * which we don;t have, yet, for 7322-based boards.
  4242. *
  4243. * called from add_timer
  4244. */
  4245. static void qib_get_7322_faststats(unsigned long opaque)
  4246. {
  4247. struct qib_devdata *dd = (struct qib_devdata *) opaque;
  4248. struct qib_pportdata *ppd;
  4249. unsigned long flags;
  4250. u64 traffic_wds;
  4251. int pidx;
  4252. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  4253. ppd = dd->pport + pidx;
  4254. /*
  4255. * If port isn't enabled or not operational ports, or
  4256. * diags is running (can cause memory diags to fail)
  4257. * skip this port this time.
  4258. */
  4259. if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
  4260. || dd->diag_client)
  4261. continue;
  4262. /*
  4263. * Maintain an activity timer, based on traffic
  4264. * exceeding a threshold, so we need to check the word-counts
  4265. * even if they are 64-bit.
  4266. */
  4267. traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
  4268. qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
  4269. spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
  4270. traffic_wds -= ppd->dd->traffic_wds;
  4271. ppd->dd->traffic_wds += traffic_wds;
  4272. if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
  4273. atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
  4274. spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
  4275. if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
  4276. QIB_IB_QDR) &&
  4277. (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
  4278. QIBL_LINKACTIVE)) &&
  4279. ppd->cpspec->qdr_dfe_time &&
  4280. time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
  4281. ppd->cpspec->qdr_dfe_on = 0;
  4282. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  4283. ppd->dd->cspec->r1 ?
  4284. QDR_STATIC_ADAPT_INIT_R1 :
  4285. QDR_STATIC_ADAPT_INIT);
  4286. force_h1(ppd);
  4287. }
  4288. }
  4289. mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
  4290. }
  4291. /*
  4292. * If we were using MSIx, try to fallback to INTx.
  4293. */
  4294. static int qib_7322_intr_fallback(struct qib_devdata *dd)
  4295. {
  4296. if (!dd->cspec->num_msix_entries)
  4297. return 0; /* already using INTx */
  4298. qib_devinfo(dd->pcidev, "MSIx interrupt not detected,"
  4299. " trying INTx interrupts\n");
  4300. qib_7322_nomsix(dd);
  4301. qib_enable_intx(dd->pcidev);
  4302. qib_setup_7322_interrupt(dd, 0);
  4303. return 1;
  4304. }
  4305. /*
  4306. * Reset the XGXS (between serdes and IBC). Slightly less intrusive
  4307. * than resetting the IBC or external link state, and useful in some
  4308. * cases to cause some retraining. To do this right, we reset IBC
  4309. * as well, then return to previous state (which may be still in reset)
  4310. * NOTE: some callers of this "know" this writes the current value
  4311. * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
  4312. * check all callers.
  4313. */
  4314. static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
  4315. {
  4316. u64 val;
  4317. struct qib_devdata *dd = ppd->dd;
  4318. const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
  4319. SYM_MASK(IBPCSConfig_0, xcv_treset) |
  4320. SYM_MASK(IBPCSConfig_0, tx_rx_reset);
  4321. val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
  4322. qib_write_kreg(dd, kr_hwerrmask,
  4323. dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
  4324. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  4325. ppd->cpspec->ibcctrl_a &
  4326. ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
  4327. qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
  4328. qib_read_kreg32(dd, kr_scratch);
  4329. qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
  4330. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  4331. qib_write_kreg(dd, kr_scratch, 0ULL);
  4332. qib_write_kreg(dd, kr_hwerrclear,
  4333. SYM_MASK(HwErrClear, statusValidNoEopClear));
  4334. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  4335. }
  4336. /*
  4337. * This code for non-IBTA-compliant IB speed negotiation is only known to
  4338. * work for the SDR to DDR transition, and only between an HCA and a switch
  4339. * with recent firmware. It is based on observed heuristics, rather than
  4340. * actual knowledge of the non-compliant speed negotiation.
  4341. * It has a number of hard-coded fields, since the hope is to rewrite this
  4342. * when a spec is available on how the negoation is intended to work.
  4343. */
  4344. static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
  4345. u32 dcnt, u32 *data)
  4346. {
  4347. int i;
  4348. u64 pbc;
  4349. u32 __iomem *piobuf;
  4350. u32 pnum, control, len;
  4351. struct qib_devdata *dd = ppd->dd;
  4352. i = 0;
  4353. len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
  4354. control = qib_7322_setpbc_control(ppd, len, 0, 15);
  4355. pbc = ((u64) control << 32) | len;
  4356. while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
  4357. if (i++ > 15)
  4358. return;
  4359. udelay(2);
  4360. }
  4361. /* disable header check on this packet, since it can't be valid */
  4362. dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
  4363. writeq(pbc, piobuf);
  4364. qib_flush_wc();
  4365. qib_pio_copy(piobuf + 2, hdr, 7);
  4366. qib_pio_copy(piobuf + 9, data, dcnt);
  4367. if (dd->flags & QIB_USE_SPCL_TRIG) {
  4368. u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
  4369. qib_flush_wc();
  4370. __raw_writel(0xaebecede, piobuf + spcl_off);
  4371. }
  4372. qib_flush_wc();
  4373. qib_sendbuf_done(dd, pnum);
  4374. /* and re-enable hdr check */
  4375. dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
  4376. }
  4377. /*
  4378. * _start packet gets sent twice at start, _done gets sent twice at end
  4379. */
  4380. static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
  4381. {
  4382. struct qib_devdata *dd = ppd->dd;
  4383. static u32 swapped;
  4384. u32 dw, i, hcnt, dcnt, *data;
  4385. static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
  4386. static u32 madpayload_start[0x40] = {
  4387. 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
  4388. 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
  4389. 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
  4390. };
  4391. static u32 madpayload_done[0x40] = {
  4392. 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
  4393. 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
  4394. 0x40000001, 0x1388, 0x15e, /* rest 0's */
  4395. };
  4396. dcnt = ARRAY_SIZE(madpayload_start);
  4397. hcnt = ARRAY_SIZE(hdr);
  4398. if (!swapped) {
  4399. /* for maintainability, do it at runtime */
  4400. for (i = 0; i < hcnt; i++) {
  4401. dw = (__force u32) cpu_to_be32(hdr[i]);
  4402. hdr[i] = dw;
  4403. }
  4404. for (i = 0; i < dcnt; i++) {
  4405. dw = (__force u32) cpu_to_be32(madpayload_start[i]);
  4406. madpayload_start[i] = dw;
  4407. dw = (__force u32) cpu_to_be32(madpayload_done[i]);
  4408. madpayload_done[i] = dw;
  4409. }
  4410. swapped = 1;
  4411. }
  4412. data = which ? madpayload_done : madpayload_start;
  4413. autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
  4414. qib_read_kreg64(dd, kr_scratch);
  4415. udelay(2);
  4416. autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
  4417. qib_read_kreg64(dd, kr_scratch);
  4418. udelay(2);
  4419. }
  4420. /*
  4421. * Do the absolute minimum to cause an IB speed change, and make it
  4422. * ready, but don't actually trigger the change. The caller will
  4423. * do that when ready (if link is in Polling training state, it will
  4424. * happen immediately, otherwise when link next goes down)
  4425. *
  4426. * This routine should only be used as part of the DDR autonegotation
  4427. * code for devices that are not compliant with IB 1.2 (or code that
  4428. * fixes things up for same).
  4429. *
  4430. * When link has gone down, and autoneg enabled, or autoneg has
  4431. * failed and we give up until next time we set both speeds, and
  4432. * then we want IBTA enabled as well as "use max enabled speed.
  4433. */
  4434. static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
  4435. {
  4436. u64 newctrlb;
  4437. newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
  4438. IBA7322_IBC_IBTA_1_2_MASK |
  4439. IBA7322_IBC_MAX_SPEED_MASK);
  4440. if (speed & (speed - 1)) /* multiple speeds */
  4441. newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
  4442. IBA7322_IBC_IBTA_1_2_MASK |
  4443. IBA7322_IBC_MAX_SPEED_MASK;
  4444. else
  4445. newctrlb |= speed == QIB_IB_QDR ?
  4446. IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
  4447. ((speed == QIB_IB_DDR ?
  4448. IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
  4449. if (newctrlb == ppd->cpspec->ibcctrl_b)
  4450. return;
  4451. ppd->cpspec->ibcctrl_b = newctrlb;
  4452. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  4453. qib_write_kreg(ppd->dd, kr_scratch, 0);
  4454. }
  4455. /*
  4456. * This routine is only used when we are not talking to another
  4457. * IB 1.2-compliant device that we think can do DDR.
  4458. * (This includes all existing switch chips as of Oct 2007.)
  4459. * 1.2-compliant devices go directly to DDR prior to reaching INIT
  4460. */
  4461. static void try_7322_autoneg(struct qib_pportdata *ppd)
  4462. {
  4463. unsigned long flags;
  4464. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4465. ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
  4466. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4467. qib_autoneg_7322_send(ppd, 0);
  4468. set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
  4469. qib_7322_mini_pcs_reset(ppd);
  4470. /* 2 msec is minimum length of a poll cycle */
  4471. queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
  4472. msecs_to_jiffies(2));
  4473. }
  4474. /*
  4475. * Handle the empirically determined mechanism for auto-negotiation
  4476. * of DDR speed with switches.
  4477. */
  4478. static void autoneg_7322_work(struct work_struct *work)
  4479. {
  4480. struct qib_pportdata *ppd;
  4481. struct qib_devdata *dd;
  4482. u64 startms;
  4483. u32 i;
  4484. unsigned long flags;
  4485. ppd = container_of(work, struct qib_chippport_specific,
  4486. autoneg_work.work)->ppd;
  4487. dd = ppd->dd;
  4488. startms = jiffies_to_msecs(jiffies);
  4489. /*
  4490. * Busy wait for this first part, it should be at most a
  4491. * few hundred usec, since we scheduled ourselves for 2msec.
  4492. */
  4493. for (i = 0; i < 25; i++) {
  4494. if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
  4495. == IB_7322_LT_STATE_POLLQUIET) {
  4496. qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
  4497. break;
  4498. }
  4499. udelay(100);
  4500. }
  4501. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
  4502. goto done; /* we got there early or told to stop */
  4503. /* we expect this to timeout */
  4504. if (wait_event_timeout(ppd->cpspec->autoneg_wait,
  4505. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4506. msecs_to_jiffies(90)))
  4507. goto done;
  4508. qib_7322_mini_pcs_reset(ppd);
  4509. /* we expect this to timeout */
  4510. if (wait_event_timeout(ppd->cpspec->autoneg_wait,
  4511. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4512. msecs_to_jiffies(1700)))
  4513. goto done;
  4514. qib_7322_mini_pcs_reset(ppd);
  4515. set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
  4516. /*
  4517. * Wait up to 250 msec for link to train and get to INIT at DDR;
  4518. * this should terminate early.
  4519. */
  4520. wait_event_timeout(ppd->cpspec->autoneg_wait,
  4521. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4522. msecs_to_jiffies(250));
  4523. done:
  4524. if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
  4525. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4526. ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
  4527. if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
  4528. ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
  4529. ppd->cpspec->autoneg_tries = 0;
  4530. }
  4531. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4532. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  4533. }
  4534. }
  4535. /*
  4536. * This routine is used to request IPG set in the QLogic switch.
  4537. * Only called if r1.
  4538. */
  4539. static void try_7322_ipg(struct qib_pportdata *ppd)
  4540. {
  4541. struct qib_ibport *ibp = &ppd->ibport_data;
  4542. struct ib_mad_send_buf *send_buf;
  4543. struct ib_mad_agent *agent;
  4544. struct ib_smp *smp;
  4545. unsigned delay;
  4546. int ret;
  4547. agent = ibp->send_agent;
  4548. if (!agent)
  4549. goto retry;
  4550. send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
  4551. IB_MGMT_MAD_DATA, GFP_ATOMIC);
  4552. if (IS_ERR(send_buf))
  4553. goto retry;
  4554. if (!ibp->smi_ah) {
  4555. struct ib_ah_attr attr;
  4556. struct ib_ah *ah;
  4557. memset(&attr, 0, sizeof attr);
  4558. attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE);
  4559. attr.port_num = ppd->port;
  4560. ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
  4561. if (IS_ERR(ah))
  4562. ret = -EINVAL;
  4563. else {
  4564. send_buf->ah = ah;
  4565. ibp->smi_ah = to_iah(ah);
  4566. ret = 0;
  4567. }
  4568. } else {
  4569. send_buf->ah = &ibp->smi_ah->ibah;
  4570. ret = 0;
  4571. }
  4572. smp = send_buf->mad;
  4573. smp->base_version = IB_MGMT_BASE_VERSION;
  4574. smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
  4575. smp->class_version = 1;
  4576. smp->method = IB_MGMT_METHOD_SEND;
  4577. smp->hop_cnt = 1;
  4578. smp->attr_id = QIB_VENDOR_IPG;
  4579. smp->attr_mod = 0;
  4580. if (!ret)
  4581. ret = ib_post_send_mad(send_buf, NULL);
  4582. if (ret)
  4583. ib_free_send_mad(send_buf);
  4584. retry:
  4585. delay = 2 << ppd->cpspec->ipg_tries;
  4586. queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
  4587. msecs_to_jiffies(delay));
  4588. }
  4589. /*
  4590. * Timeout handler for setting IPG.
  4591. * Only called if r1.
  4592. */
  4593. static void ipg_7322_work(struct work_struct *work)
  4594. {
  4595. struct qib_pportdata *ppd;
  4596. ppd = container_of(work, struct qib_chippport_specific,
  4597. ipg_work.work)->ppd;
  4598. if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
  4599. && ++ppd->cpspec->ipg_tries <= 10)
  4600. try_7322_ipg(ppd);
  4601. }
  4602. static u32 qib_7322_iblink_state(u64 ibcs)
  4603. {
  4604. u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
  4605. switch (state) {
  4606. case IB_7322_L_STATE_INIT:
  4607. state = IB_PORT_INIT;
  4608. break;
  4609. case IB_7322_L_STATE_ARM:
  4610. state = IB_PORT_ARMED;
  4611. break;
  4612. case IB_7322_L_STATE_ACTIVE:
  4613. /* fall through */
  4614. case IB_7322_L_STATE_ACT_DEFER:
  4615. state = IB_PORT_ACTIVE;
  4616. break;
  4617. default: /* fall through */
  4618. case IB_7322_L_STATE_DOWN:
  4619. state = IB_PORT_DOWN;
  4620. break;
  4621. }
  4622. return state;
  4623. }
  4624. /* returns the IBTA port state, rather than the IBC link training state */
  4625. static u8 qib_7322_phys_portstate(u64 ibcs)
  4626. {
  4627. u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
  4628. return qib_7322_physportstate[state];
  4629. }
  4630. static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
  4631. {
  4632. int ret = 0, symadj = 0;
  4633. unsigned long flags;
  4634. int mult;
  4635. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4636. ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
  4637. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4638. /* Update our picture of width and speed from chip */
  4639. if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
  4640. ppd->link_speed_active = QIB_IB_QDR;
  4641. mult = 4;
  4642. } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
  4643. ppd->link_speed_active = QIB_IB_DDR;
  4644. mult = 2;
  4645. } else {
  4646. ppd->link_speed_active = QIB_IB_SDR;
  4647. mult = 1;
  4648. }
  4649. if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
  4650. ppd->link_width_active = IB_WIDTH_4X;
  4651. mult *= 4;
  4652. } else
  4653. ppd->link_width_active = IB_WIDTH_1X;
  4654. ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
  4655. if (!ibup) {
  4656. u64 clr;
  4657. /* Link went down. */
  4658. /* do IPG MAD again after linkdown, even if last time failed */
  4659. ppd->cpspec->ipg_tries = 0;
  4660. clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
  4661. (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
  4662. SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
  4663. if (clr)
  4664. qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
  4665. if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
  4666. QIBL_IB_AUTONEG_INPROG)))
  4667. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  4668. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  4669. /* unlock the Tx settings, speed may change */
  4670. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  4671. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  4672. reset_tx_deemphasis_override));
  4673. qib_cancel_sends(ppd);
  4674. /* on link down, ensure sane pcs state */
  4675. qib_7322_mini_pcs_reset(ppd);
  4676. spin_lock_irqsave(&ppd->sdma_lock, flags);
  4677. if (__qib_sdma_running(ppd))
  4678. __qib_sdma_process_event(ppd,
  4679. qib_sdma_event_e70_go_idle);
  4680. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  4681. }
  4682. clr = read_7322_creg32_port(ppd, crp_iblinkdown);
  4683. if (clr == ppd->cpspec->iblnkdownsnap)
  4684. ppd->cpspec->iblnkdowndelta++;
  4685. } else {
  4686. if (qib_compat_ddr_negotiate &&
  4687. !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
  4688. QIBL_IB_AUTONEG_INPROG)) &&
  4689. ppd->link_speed_active == QIB_IB_SDR &&
  4690. (ppd->link_speed_enabled & QIB_IB_DDR)
  4691. && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
  4692. /* we are SDR, and auto-negotiation enabled */
  4693. ++ppd->cpspec->autoneg_tries;
  4694. if (!ppd->cpspec->ibdeltainprog) {
  4695. ppd->cpspec->ibdeltainprog = 1;
  4696. ppd->cpspec->ibsymdelta +=
  4697. read_7322_creg32_port(ppd,
  4698. crp_ibsymbolerr) -
  4699. ppd->cpspec->ibsymsnap;
  4700. ppd->cpspec->iblnkerrdelta +=
  4701. read_7322_creg32_port(ppd,
  4702. crp_iblinkerrrecov) -
  4703. ppd->cpspec->iblnkerrsnap;
  4704. }
  4705. try_7322_autoneg(ppd);
  4706. ret = 1; /* no other IB status change processing */
  4707. } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
  4708. ppd->link_speed_active == QIB_IB_SDR) {
  4709. qib_autoneg_7322_send(ppd, 1);
  4710. set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
  4711. qib_7322_mini_pcs_reset(ppd);
  4712. udelay(2);
  4713. ret = 1; /* no other IB status change processing */
  4714. } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
  4715. (ppd->link_speed_active & QIB_IB_DDR)) {
  4716. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4717. ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
  4718. QIBL_IB_AUTONEG_FAILED);
  4719. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4720. ppd->cpspec->autoneg_tries = 0;
  4721. /* re-enable SDR, for next link down */
  4722. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  4723. wake_up(&ppd->cpspec->autoneg_wait);
  4724. symadj = 1;
  4725. } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
  4726. /*
  4727. * Clear autoneg failure flag, and do setup
  4728. * so we'll try next time link goes down and
  4729. * back to INIT (possibly connected to a
  4730. * different device).
  4731. */
  4732. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4733. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  4734. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4735. ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
  4736. symadj = 1;
  4737. }
  4738. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  4739. symadj = 1;
  4740. if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
  4741. try_7322_ipg(ppd);
  4742. if (!ppd->cpspec->recovery_init)
  4743. setup_7322_link_recovery(ppd, 0);
  4744. ppd->cpspec->qdr_dfe_time = jiffies +
  4745. msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
  4746. }
  4747. ppd->cpspec->ibmalfusesnap = 0;
  4748. ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
  4749. crp_errlink);
  4750. }
  4751. if (symadj) {
  4752. ppd->cpspec->iblnkdownsnap =
  4753. read_7322_creg32_port(ppd, crp_iblinkdown);
  4754. if (ppd->cpspec->ibdeltainprog) {
  4755. ppd->cpspec->ibdeltainprog = 0;
  4756. ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
  4757. crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
  4758. ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
  4759. crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
  4760. }
  4761. } else if (!ibup && qib_compat_ddr_negotiate &&
  4762. !ppd->cpspec->ibdeltainprog &&
  4763. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  4764. ppd->cpspec->ibdeltainprog = 1;
  4765. ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
  4766. crp_ibsymbolerr);
  4767. ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
  4768. crp_iblinkerrrecov);
  4769. }
  4770. if (!ret)
  4771. qib_setup_7322_setextled(ppd, ibup);
  4772. return ret;
  4773. }
  4774. /*
  4775. * Does read/modify/write to appropriate registers to
  4776. * set output and direction bits selected by mask.
  4777. * these are in their canonical postions (e.g. lsb of
  4778. * dir will end up in D48 of extctrl on existing chips).
  4779. * returns contents of GP Inputs.
  4780. */
  4781. static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
  4782. {
  4783. u64 read_val, new_out;
  4784. unsigned long flags;
  4785. if (mask) {
  4786. /* some bits being written, lock access to GPIO */
  4787. dir &= mask;
  4788. out &= mask;
  4789. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  4790. dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
  4791. dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
  4792. new_out = (dd->cspec->gpio_out & ~mask) | out;
  4793. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  4794. qib_write_kreg(dd, kr_gpio_out, new_out);
  4795. dd->cspec->gpio_out = new_out;
  4796. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  4797. }
  4798. /*
  4799. * It is unlikely that a read at this time would get valid
  4800. * data on a pin whose direction line was set in the same
  4801. * call to this function. We include the read here because
  4802. * that allows us to potentially combine a change on one pin with
  4803. * a read on another, and because the old code did something like
  4804. * this.
  4805. */
  4806. read_val = qib_read_kreg64(dd, kr_extstatus);
  4807. return SYM_FIELD(read_val, EXTStatus, GPIOIn);
  4808. }
  4809. /* Enable writes to config EEPROM, if possible. Returns previous state */
  4810. static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
  4811. {
  4812. int prev_wen;
  4813. u32 mask;
  4814. mask = 1 << QIB_EEPROM_WEN_NUM;
  4815. prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
  4816. gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
  4817. return prev_wen & 1;
  4818. }
  4819. /*
  4820. * Read fundamental info we need to use the chip. These are
  4821. * the registers that describe chip capabilities, and are
  4822. * saved in shadow registers.
  4823. */
  4824. static void get_7322_chip_params(struct qib_devdata *dd)
  4825. {
  4826. u64 val;
  4827. u32 piobufs;
  4828. int mtu;
  4829. dd->palign = qib_read_kreg32(dd, kr_pagealign);
  4830. dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
  4831. dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
  4832. dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
  4833. dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
  4834. dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
  4835. dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
  4836. val = qib_read_kreg64(dd, kr_sendpiobufcnt);
  4837. dd->piobcnt2k = val & ~0U;
  4838. dd->piobcnt4k = val >> 32;
  4839. val = qib_read_kreg64(dd, kr_sendpiosize);
  4840. dd->piosize2k = val & ~0U;
  4841. dd->piosize4k = val >> 32;
  4842. mtu = ib_mtu_enum_to_int(qib_ibmtu);
  4843. if (mtu == -1)
  4844. mtu = QIB_DEFAULT_MTU;
  4845. dd->pport[0].ibmtu = (u32)mtu;
  4846. dd->pport[1].ibmtu = (u32)mtu;
  4847. /* these may be adjusted in init_chip_wc_pat() */
  4848. dd->pio2kbase = (u32 __iomem *)
  4849. ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
  4850. dd->pio4kbase = (u32 __iomem *)
  4851. ((char __iomem *) dd->kregbase +
  4852. (dd->piobufbase >> 32));
  4853. /*
  4854. * 4K buffers take 2 pages; we use roundup just to be
  4855. * paranoid; we calculate it once here, rather than on
  4856. * ever buf allocate
  4857. */
  4858. dd->align4k = ALIGN(dd->piosize4k, dd->palign);
  4859. piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
  4860. dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
  4861. (sizeof(u64) * BITS_PER_BYTE / 2);
  4862. }
  4863. /*
  4864. * The chip base addresses in cspec and cpspec have to be set
  4865. * after possible init_chip_wc_pat(), rather than in
  4866. * get_7322_chip_params(), so split out as separate function
  4867. */
  4868. static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
  4869. {
  4870. u32 cregbase;
  4871. cregbase = qib_read_kreg32(dd, kr_counterregbase);
  4872. dd->cspec->cregbase = (u64 __iomem *)(cregbase +
  4873. (char __iomem *)dd->kregbase);
  4874. dd->egrtidbase = (u64 __iomem *)
  4875. ((char __iomem *) dd->kregbase + dd->rcvegrbase);
  4876. /* port registers are defined as relative to base of chip */
  4877. dd->pport[0].cpspec->kpregbase =
  4878. (u64 __iomem *)((char __iomem *)dd->kregbase);
  4879. dd->pport[1].cpspec->kpregbase =
  4880. (u64 __iomem *)(dd->palign +
  4881. (char __iomem *)dd->kregbase);
  4882. dd->pport[0].cpspec->cpregbase =
  4883. (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
  4884. kr_counterregbase) + (char __iomem *)dd->kregbase);
  4885. dd->pport[1].cpspec->cpregbase =
  4886. (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
  4887. kr_counterregbase) + (char __iomem *)dd->kregbase);
  4888. }
  4889. /*
  4890. * This is a fairly special-purpose observer, so we only support
  4891. * the port-specific parts of SendCtrl
  4892. */
  4893. #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
  4894. SYM_MASK(SendCtrl_0, SDmaEnable) | \
  4895. SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
  4896. SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
  4897. SYM_MASK(SendCtrl_0, SDmaHalt) | \
  4898. SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
  4899. SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
  4900. static int sendctrl_hook(struct qib_devdata *dd,
  4901. const struct diag_observer *op, u32 offs,
  4902. u64 *data, u64 mask, int only_32)
  4903. {
  4904. unsigned long flags;
  4905. unsigned idx;
  4906. unsigned pidx;
  4907. struct qib_pportdata *ppd = NULL;
  4908. u64 local_data, all_bits;
  4909. /*
  4910. * The fixed correspondence between Physical ports and pports is
  4911. * severed. We need to hunt for the ppd that corresponds
  4912. * to the offset we got. And we have to do that without admitting
  4913. * we know the stride, apparently.
  4914. */
  4915. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  4916. u64 __iomem *psptr;
  4917. u32 psoffs;
  4918. ppd = dd->pport + pidx;
  4919. if (!ppd->cpspec->kpregbase)
  4920. continue;
  4921. psptr = ppd->cpspec->kpregbase + krp_sendctrl;
  4922. psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
  4923. if (psoffs == offs)
  4924. break;
  4925. }
  4926. /* If pport is not being managed by driver, just avoid shadows. */
  4927. if (pidx >= dd->num_pports)
  4928. ppd = NULL;
  4929. /* In any case, "idx" is flat index in kreg space */
  4930. idx = offs / sizeof(u64);
  4931. all_bits = ~0ULL;
  4932. if (only_32)
  4933. all_bits >>= 32;
  4934. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  4935. if (!ppd || (mask & all_bits) != all_bits) {
  4936. /*
  4937. * At least some mask bits are zero, so we need
  4938. * to read. The judgement call is whether from
  4939. * reg or shadow. First-cut: read reg, and complain
  4940. * if any bits which should be shadowed are different
  4941. * from their shadowed value.
  4942. */
  4943. if (only_32)
  4944. local_data = (u64)qib_read_kreg32(dd, idx);
  4945. else
  4946. local_data = qib_read_kreg64(dd, idx);
  4947. *data = (local_data & ~mask) | (*data & mask);
  4948. }
  4949. if (mask) {
  4950. /*
  4951. * At least some mask bits are one, so we need
  4952. * to write, but only shadow some bits.
  4953. */
  4954. u64 sval, tval; /* Shadowed, transient */
  4955. /*
  4956. * New shadow val is bits we don't want to touch,
  4957. * ORed with bits we do, that are intended for shadow.
  4958. */
  4959. if (ppd) {
  4960. sval = ppd->p_sendctrl & ~mask;
  4961. sval |= *data & SENDCTRL_SHADOWED & mask;
  4962. ppd->p_sendctrl = sval;
  4963. } else
  4964. sval = *data & SENDCTRL_SHADOWED & mask;
  4965. tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
  4966. qib_write_kreg(dd, idx, tval);
  4967. qib_write_kreg(dd, kr_scratch, 0Ull);
  4968. }
  4969. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  4970. return only_32 ? 4 : 8;
  4971. }
  4972. static const struct diag_observer sendctrl_0_observer = {
  4973. sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
  4974. KREG_IDX(SendCtrl_0) * sizeof(u64)
  4975. };
  4976. static const struct diag_observer sendctrl_1_observer = {
  4977. sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
  4978. KREG_IDX(SendCtrl_1) * sizeof(u64)
  4979. };
  4980. static ushort sdma_fetch_prio = 8;
  4981. module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
  4982. MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
  4983. /* Besides logging QSFP events, we set appropriate TxDDS values */
  4984. static void init_txdds_table(struct qib_pportdata *ppd, int override);
  4985. static void qsfp_7322_event(struct work_struct *work)
  4986. {
  4987. struct qib_qsfp_data *qd;
  4988. struct qib_pportdata *ppd;
  4989. u64 pwrup;
  4990. int ret;
  4991. u32 le2;
  4992. qd = container_of(work, struct qib_qsfp_data, work);
  4993. ppd = qd->ppd;
  4994. pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC);
  4995. /*
  4996. * Some QSFP's not only do not respond until the full power-up
  4997. * time, but may behave badly if we try. So hold off responding
  4998. * to insertion.
  4999. */
  5000. while (1) {
  5001. u64 now = get_jiffies_64();
  5002. if (time_after64(now, pwrup))
  5003. break;
  5004. msleep(20);
  5005. }
  5006. ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
  5007. /*
  5008. * Need to change LE2 back to defaults if we couldn't
  5009. * read the cable type (to handle cable swaps), so do this
  5010. * even on failure to read cable information. We don't
  5011. * get here for QME, so IS_QME check not needed here.
  5012. */
  5013. if (!ret && !ppd->dd->cspec->r1) {
  5014. if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
  5015. le2 = LE2_QME;
  5016. else if (qd->cache.atten[1] >= qib_long_atten &&
  5017. QSFP_IS_CU(qd->cache.tech))
  5018. le2 = LE2_5m;
  5019. else
  5020. le2 = LE2_DEFAULT;
  5021. } else
  5022. le2 = LE2_DEFAULT;
  5023. ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
  5024. init_txdds_table(ppd, 0);
  5025. }
  5026. /*
  5027. * There is little we can do but complain to the user if QSFP
  5028. * initialization fails.
  5029. */
  5030. static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
  5031. {
  5032. unsigned long flags;
  5033. struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
  5034. struct qib_devdata *dd = ppd->dd;
  5035. u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
  5036. mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
  5037. qd->ppd = ppd;
  5038. qib_qsfp_init(qd, qsfp_7322_event);
  5039. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  5040. dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
  5041. dd->cspec->gpio_mask |= mod_prs_bit;
  5042. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  5043. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  5044. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  5045. }
  5046. /*
  5047. * called at device initialization time, and also if the txselect
  5048. * module parameter is changed. This is used for cables that don't
  5049. * have valid QSFP EEPROMs (not present, or attenuation is zero).
  5050. * We initialize to the default, then if there is a specific
  5051. * unit,port match, we use that (and set it immediately, for the
  5052. * current speed, if the link is at INIT or better).
  5053. * String format is "default# unit#,port#=# ... u,p=#", separators must
  5054. * be a SPACE character. A newline terminates. The u,p=# tuples may
  5055. * optionally have "u,p=#,#", where the final # is the H1 value
  5056. * The last specific match is used (actually, all are used, but last
  5057. * one is the one that winds up set); if none at all, fall back on default.
  5058. */
  5059. static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
  5060. {
  5061. char *nxt, *str;
  5062. u32 pidx, unit, port, deflt, h1;
  5063. unsigned long val;
  5064. int any = 0, seth1;
  5065. int txdds_size;
  5066. str = txselect_list;
  5067. /* default number is validated in setup_txselect() */
  5068. deflt = simple_strtoul(str, &nxt, 0);
  5069. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  5070. dd->pport[pidx].cpspec->no_eep = deflt;
  5071. txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
  5072. if (IS_QME(dd) || IS_QMH(dd))
  5073. txdds_size += TXDDS_MFG_SZ;
  5074. while (*nxt && nxt[1]) {
  5075. str = ++nxt;
  5076. unit = simple_strtoul(str, &nxt, 0);
  5077. if (nxt == str || !*nxt || *nxt != ',') {
  5078. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5079. ;
  5080. continue;
  5081. }
  5082. str = ++nxt;
  5083. port = simple_strtoul(str, &nxt, 0);
  5084. if (nxt == str || *nxt != '=') {
  5085. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5086. ;
  5087. continue;
  5088. }
  5089. str = ++nxt;
  5090. val = simple_strtoul(str, &nxt, 0);
  5091. if (nxt == str) {
  5092. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5093. ;
  5094. continue;
  5095. }
  5096. if (val >= txdds_size)
  5097. continue;
  5098. seth1 = 0;
  5099. h1 = 0; /* gcc thinks it might be used uninitted */
  5100. if (*nxt == ',' && nxt[1]) {
  5101. str = ++nxt;
  5102. h1 = (u32)simple_strtoul(str, &nxt, 0);
  5103. if (nxt == str)
  5104. while (*nxt && *nxt++ != ' ') /* skip */
  5105. ;
  5106. else
  5107. seth1 = 1;
  5108. }
  5109. for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
  5110. ++pidx) {
  5111. struct qib_pportdata *ppd = &dd->pport[pidx];
  5112. if (ppd->port != port || !ppd->link_speed_supported)
  5113. continue;
  5114. ppd->cpspec->no_eep = val;
  5115. if (seth1)
  5116. ppd->cpspec->h1_val = h1;
  5117. /* now change the IBC and serdes, overriding generic */
  5118. init_txdds_table(ppd, 1);
  5119. /* Re-enable the physical state machine on mezz boards
  5120. * now that the correct settings have been set. */
  5121. if (IS_QMH(dd) || IS_QME(dd))
  5122. qib_set_ib_7322_lstate(ppd, 0,
  5123. QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
  5124. any++;
  5125. }
  5126. if (*nxt == '\n')
  5127. break; /* done */
  5128. }
  5129. if (change && !any) {
  5130. /* no specific setting, use the default.
  5131. * Change the IBC and serdes, but since it's
  5132. * general, don't override specific settings.
  5133. */
  5134. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  5135. if (dd->pport[pidx].link_speed_supported)
  5136. init_txdds_table(&dd->pport[pidx], 0);
  5137. }
  5138. }
  5139. /* handle the txselect parameter changing */
  5140. static int setup_txselect(const char *str, struct kernel_param *kp)
  5141. {
  5142. struct qib_devdata *dd;
  5143. unsigned long val;
  5144. char *n;
  5145. if (strlen(str) >= MAX_ATTEN_LEN) {
  5146. printk(KERN_INFO QIB_DRV_NAME " txselect_values string "
  5147. "too long\n");
  5148. return -ENOSPC;
  5149. }
  5150. val = simple_strtoul(str, &n, 0);
  5151. if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
  5152. TXDDS_MFG_SZ)) {
  5153. printk(KERN_INFO QIB_DRV_NAME
  5154. "txselect_values must start with a number < %d\n",
  5155. TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
  5156. return -EINVAL;
  5157. }
  5158. strcpy(txselect_list, str);
  5159. list_for_each_entry(dd, &qib_dev_list, list)
  5160. if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
  5161. set_no_qsfp_atten(dd, 1);
  5162. return 0;
  5163. }
  5164. /*
  5165. * Write the final few registers that depend on some of the
  5166. * init setup. Done late in init, just before bringing up
  5167. * the serdes.
  5168. */
  5169. static int qib_late_7322_initreg(struct qib_devdata *dd)
  5170. {
  5171. int ret = 0, n;
  5172. u64 val;
  5173. qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
  5174. qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
  5175. qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
  5176. qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
  5177. val = qib_read_kreg64(dd, kr_sendpioavailaddr);
  5178. if (val != dd->pioavailregs_phys) {
  5179. qib_dev_err(dd, "Catastrophic software error, "
  5180. "SendPIOAvailAddr written as %lx, "
  5181. "read back as %llx\n",
  5182. (unsigned long) dd->pioavailregs_phys,
  5183. (unsigned long long) val);
  5184. ret = -EINVAL;
  5185. }
  5186. n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  5187. qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
  5188. /* driver sends get pkey, lid, etc. checking also, to catch bugs */
  5189. qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
  5190. qib_register_observer(dd, &sendctrl_0_observer);
  5191. qib_register_observer(dd, &sendctrl_1_observer);
  5192. dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
  5193. qib_write_kreg(dd, kr_control, dd->control);
  5194. /*
  5195. * Set SendDmaFetchPriority and init Tx params, including
  5196. * QSFP handler on boards that have QSFP.
  5197. * First set our default attenuation entry for cables that
  5198. * don't have valid attenuation.
  5199. */
  5200. set_no_qsfp_atten(dd, 0);
  5201. for (n = 0; n < dd->num_pports; ++n) {
  5202. struct qib_pportdata *ppd = dd->pport + n;
  5203. qib_write_kreg_port(ppd, krp_senddmaprioritythld,
  5204. sdma_fetch_prio & 0xf);
  5205. /* Initialize qsfp if present on board. */
  5206. if (dd->flags & QIB_HAS_QSFP)
  5207. qib_init_7322_qsfp(ppd);
  5208. }
  5209. dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
  5210. qib_write_kreg(dd, kr_control, dd->control);
  5211. return ret;
  5212. }
  5213. /* per IB port errors. */
  5214. #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
  5215. MASK_ACROSS(8, 15))
  5216. #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
  5217. #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
  5218. MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
  5219. MASK_ACROSS(0, 11))
  5220. /*
  5221. * Write the initialization per-port registers that need to be done at
  5222. * driver load and after reset completes (i.e., that aren't done as part
  5223. * of other init procedures called from qib_init.c).
  5224. * Some of these should be redundant on reset, but play safe.
  5225. */
  5226. static void write_7322_init_portregs(struct qib_pportdata *ppd)
  5227. {
  5228. u64 val;
  5229. int i;
  5230. if (!ppd->link_speed_supported) {
  5231. /* no buffer credits for this port */
  5232. for (i = 1; i < 8; i++)
  5233. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
  5234. qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
  5235. qib_write_kreg(ppd->dd, kr_scratch, 0);
  5236. return;
  5237. }
  5238. /*
  5239. * Set the number of supported virtual lanes in IBC,
  5240. * for flow control packet handling on unsupported VLs
  5241. */
  5242. val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
  5243. val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
  5244. val |= (u64)(ppd->vls_supported - 1) <<
  5245. SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
  5246. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  5247. qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
  5248. /* enable tx header checking */
  5249. qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
  5250. IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
  5251. IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
  5252. qib_write_kreg_port(ppd, krp_ncmodectrl,
  5253. SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
  5254. /*
  5255. * Unconditionally clear the bufmask bits. If SDMA is
  5256. * enabled, we'll set them appropriately later.
  5257. */
  5258. qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
  5259. qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
  5260. qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
  5261. if (ppd->dd->cspec->r1)
  5262. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
  5263. }
  5264. /*
  5265. * Write the initialization per-device registers that need to be done at
  5266. * driver load and after reset completes (i.e., that aren't done as part
  5267. * of other init procedures called from qib_init.c). Also write per-port
  5268. * registers that are affected by overall device config, such as QP mapping
  5269. * Some of these should be redundant on reset, but play safe.
  5270. */
  5271. static void write_7322_initregs(struct qib_devdata *dd)
  5272. {
  5273. struct qib_pportdata *ppd;
  5274. int i, pidx;
  5275. u64 val;
  5276. /* Set Multicast QPs received by port 2 to map to context one. */
  5277. qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
  5278. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  5279. unsigned n, regno;
  5280. unsigned long flags;
  5281. if (dd->n_krcv_queues < 2 ||
  5282. !dd->pport[pidx].link_speed_supported)
  5283. continue;
  5284. ppd = &dd->pport[pidx];
  5285. /* be paranoid against later code motion, etc. */
  5286. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  5287. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
  5288. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  5289. /* Initialize QP to context mapping */
  5290. regno = krp_rcvqpmaptable;
  5291. val = 0;
  5292. if (dd->num_pports > 1)
  5293. n = dd->first_user_ctxt / dd->num_pports;
  5294. else
  5295. n = dd->first_user_ctxt - 1;
  5296. for (i = 0; i < 32; ) {
  5297. unsigned ctxt;
  5298. if (dd->num_pports > 1)
  5299. ctxt = (i % n) * dd->num_pports + pidx;
  5300. else if (i % n)
  5301. ctxt = (i % n) + 1;
  5302. else
  5303. ctxt = ppd->hw_pidx;
  5304. val |= ctxt << (5 * (i % 6));
  5305. i++;
  5306. if (i % 6 == 0) {
  5307. qib_write_kreg_port(ppd, regno, val);
  5308. val = 0;
  5309. regno++;
  5310. }
  5311. }
  5312. qib_write_kreg_port(ppd, regno, val);
  5313. }
  5314. /*
  5315. * Setup up interrupt mitigation for kernel contexts, but
  5316. * not user contexts (user contexts use interrupts when
  5317. * stalled waiting for any packet, so want those interrupts
  5318. * right away).
  5319. */
  5320. for (i = 0; i < dd->first_user_ctxt; i++) {
  5321. dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
  5322. qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
  5323. }
  5324. /*
  5325. * Initialize as (disabled) rcvflow tables. Application code
  5326. * will setup each flow as it uses the flow.
  5327. * Doesn't clear any of the error bits that might be set.
  5328. */
  5329. val = TIDFLOW_ERRBITS; /* these are W1C */
  5330. for (i = 0; i < dd->cfgctxts; i++) {
  5331. int flow;
  5332. for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
  5333. qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
  5334. }
  5335. /*
  5336. * dual cards init to dual port recovery, single port cards to
  5337. * the one port. Dual port cards may later adjust to 1 port,
  5338. * and then back to dual port if both ports are connected
  5339. * */
  5340. if (dd->num_pports)
  5341. setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
  5342. }
  5343. static int qib_init_7322_variables(struct qib_devdata *dd)
  5344. {
  5345. struct qib_pportdata *ppd;
  5346. unsigned features, pidx, sbufcnt;
  5347. int ret, mtu;
  5348. u32 sbufs, updthresh;
  5349. /* pport structs are contiguous, allocated after devdata */
  5350. ppd = (struct qib_pportdata *)(dd + 1);
  5351. dd->pport = ppd;
  5352. ppd[0].dd = dd;
  5353. ppd[1].dd = dd;
  5354. dd->cspec = (struct qib_chip_specific *)(ppd + 2);
  5355. ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
  5356. ppd[1].cpspec = &ppd[0].cpspec[1];
  5357. ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
  5358. ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
  5359. spin_lock_init(&dd->cspec->rcvmod_lock);
  5360. spin_lock_init(&dd->cspec->gpio_lock);
  5361. /* we haven't yet set QIB_PRESENT, so use read directly */
  5362. dd->revision = readq(&dd->kregbase[kr_revision]);
  5363. if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
  5364. qib_dev_err(dd, "Revision register read failure, "
  5365. "giving up initialization\n");
  5366. ret = -ENODEV;
  5367. goto bail;
  5368. }
  5369. dd->flags |= QIB_PRESENT; /* now register routines work */
  5370. dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
  5371. dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
  5372. dd->cspec->r1 = dd->minrev == 1;
  5373. get_7322_chip_params(dd);
  5374. features = qib_7322_boardname(dd);
  5375. /* now that piobcnt2k and 4k set, we can allocate these */
  5376. sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
  5377. NUM_VL15_BUFS + BITS_PER_LONG - 1;
  5378. sbufcnt /= BITS_PER_LONG;
  5379. dd->cspec->sendchkenable = kmalloc(sbufcnt *
  5380. sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
  5381. dd->cspec->sendgrhchk = kmalloc(sbufcnt *
  5382. sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
  5383. dd->cspec->sendibchk = kmalloc(sbufcnt *
  5384. sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
  5385. if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
  5386. !dd->cspec->sendibchk) {
  5387. qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
  5388. ret = -ENOMEM;
  5389. goto bail;
  5390. }
  5391. ppd = dd->pport;
  5392. /*
  5393. * GPIO bits for TWSI data and clock,
  5394. * used for serial EEPROM.
  5395. */
  5396. dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
  5397. dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
  5398. dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
  5399. dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
  5400. QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
  5401. QIB_HAS_THRESH_UPDATE |
  5402. (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
  5403. dd->flags |= qib_special_trigger ?
  5404. QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
  5405. /*
  5406. * Setup initial values. These may change when PAT is enabled, but
  5407. * we need these to do initial chip register accesses.
  5408. */
  5409. qib_7322_set_baseaddrs(dd);
  5410. mtu = ib_mtu_enum_to_int(qib_ibmtu);
  5411. if (mtu == -1)
  5412. mtu = QIB_DEFAULT_MTU;
  5413. dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
  5414. /* all hwerrors become interrupts, unless special purposed */
  5415. dd->cspec->hwerrmask = ~0ULL;
  5416. /* link_recovery setup causes these errors, so ignore them,
  5417. * other than clearing them when they occur */
  5418. dd->cspec->hwerrmask &=
  5419. ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
  5420. SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
  5421. HWE_MASK(LATriggered));
  5422. for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
  5423. struct qib_chippport_specific *cp = ppd->cpspec;
  5424. ppd->link_speed_supported = features & PORT_SPD_CAP;
  5425. features >>= PORT_SPD_CAP_SHIFT;
  5426. if (!ppd->link_speed_supported) {
  5427. /* single port mode (7340, or configured) */
  5428. dd->skip_kctxt_mask |= 1 << pidx;
  5429. if (pidx == 0) {
  5430. /* Make sure port is disabled. */
  5431. qib_write_kreg_port(ppd, krp_rcvctrl, 0);
  5432. qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
  5433. ppd[0] = ppd[1];
  5434. dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
  5435. IBSerdesPClkNotDetectMask_0)
  5436. | SYM_MASK(HwErrMask,
  5437. SDmaMemReadErrMask_0));
  5438. dd->cspec->int_enable_mask &= ~(
  5439. SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
  5440. SYM_MASK(IntMask, SDmaIdleIntMask_0) |
  5441. SYM_MASK(IntMask, SDmaProgressIntMask_0) |
  5442. SYM_MASK(IntMask, SDmaIntMask_0) |
  5443. SYM_MASK(IntMask, ErrIntMask_0) |
  5444. SYM_MASK(IntMask, SendDoneIntMask_0));
  5445. } else {
  5446. /* Make sure port is disabled. */
  5447. qib_write_kreg_port(ppd, krp_rcvctrl, 0);
  5448. qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
  5449. dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
  5450. IBSerdesPClkNotDetectMask_1)
  5451. | SYM_MASK(HwErrMask,
  5452. SDmaMemReadErrMask_1));
  5453. dd->cspec->int_enable_mask &= ~(
  5454. SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
  5455. SYM_MASK(IntMask, SDmaIdleIntMask_1) |
  5456. SYM_MASK(IntMask, SDmaProgressIntMask_1) |
  5457. SYM_MASK(IntMask, SDmaIntMask_1) |
  5458. SYM_MASK(IntMask, ErrIntMask_1) |
  5459. SYM_MASK(IntMask, SendDoneIntMask_1));
  5460. }
  5461. continue;
  5462. }
  5463. dd->num_pports++;
  5464. qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
  5465. ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
  5466. ppd->link_width_enabled = IB_WIDTH_4X;
  5467. ppd->link_speed_enabled = ppd->link_speed_supported;
  5468. /*
  5469. * Set the initial values to reasonable default, will be set
  5470. * for real when link is up.
  5471. */
  5472. ppd->link_width_active = IB_WIDTH_4X;
  5473. ppd->link_speed_active = QIB_IB_SDR;
  5474. ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
  5475. switch (qib_num_cfg_vls) {
  5476. case 1:
  5477. ppd->vls_supported = IB_VL_VL0;
  5478. break;
  5479. case 2:
  5480. ppd->vls_supported = IB_VL_VL0_1;
  5481. break;
  5482. default:
  5483. qib_devinfo(dd->pcidev,
  5484. "Invalid num_vls %u, using 4 VLs\n",
  5485. qib_num_cfg_vls);
  5486. qib_num_cfg_vls = 4;
  5487. /* fall through */
  5488. case 4:
  5489. ppd->vls_supported = IB_VL_VL0_3;
  5490. break;
  5491. case 8:
  5492. if (mtu <= 2048)
  5493. ppd->vls_supported = IB_VL_VL0_7;
  5494. else {
  5495. qib_devinfo(dd->pcidev,
  5496. "Invalid num_vls %u for MTU %d "
  5497. ", using 4 VLs\n",
  5498. qib_num_cfg_vls, mtu);
  5499. ppd->vls_supported = IB_VL_VL0_3;
  5500. qib_num_cfg_vls = 4;
  5501. }
  5502. break;
  5503. }
  5504. ppd->vls_operational = ppd->vls_supported;
  5505. init_waitqueue_head(&cp->autoneg_wait);
  5506. INIT_DELAYED_WORK(&cp->autoneg_work,
  5507. autoneg_7322_work);
  5508. if (ppd->dd->cspec->r1)
  5509. INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
  5510. /*
  5511. * For Mez and similar cards, no qsfp info, so do
  5512. * the "cable info" setup here. Can be overridden
  5513. * in adapter-specific routines.
  5514. */
  5515. if (!(dd->flags & QIB_HAS_QSFP)) {
  5516. if (!IS_QMH(dd) && !IS_QME(dd))
  5517. qib_devinfo(dd->pcidev, "IB%u:%u: "
  5518. "Unknown mezzanine card type\n",
  5519. dd->unit, ppd->port);
  5520. cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
  5521. /*
  5522. * Choose center value as default tx serdes setting
  5523. * until changed through module parameter.
  5524. */
  5525. ppd->cpspec->no_eep = IS_QMH(dd) ?
  5526. TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
  5527. } else
  5528. cp->h1_val = H1_FORCE_VAL;
  5529. /* Avoid writes to chip for mini_init */
  5530. if (!qib_mini_init)
  5531. write_7322_init_portregs(ppd);
  5532. init_timer(&cp->chase_timer);
  5533. cp->chase_timer.function = reenable_chase;
  5534. cp->chase_timer.data = (unsigned long)ppd;
  5535. ppd++;
  5536. }
  5537. dd->rcvhdrentsize = qib_rcvhdrentsize ?
  5538. qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
  5539. dd->rcvhdrsize = qib_rcvhdrsize ?
  5540. qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
  5541. dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
  5542. /* we always allocate at least 2048 bytes for eager buffers */
  5543. dd->rcvegrbufsize = max(mtu, 2048);
  5544. qib_7322_tidtemplate(dd);
  5545. /*
  5546. * We can request a receive interrupt for 1 or
  5547. * more packets from current offset.
  5548. */
  5549. dd->rhdrhead_intr_off =
  5550. (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
  5551. /* setup the stats timer; the add_timer is done at end of init */
  5552. init_timer(&dd->stats_timer);
  5553. dd->stats_timer.function = qib_get_7322_faststats;
  5554. dd->stats_timer.data = (unsigned long) dd;
  5555. dd->ureg_align = 0x10000; /* 64KB alignment */
  5556. dd->piosize2kmax_dwords = dd->piosize2k >> 2;
  5557. qib_7322_config_ctxts(dd);
  5558. qib_set_ctxtcnt(dd);
  5559. if (qib_wc_pat) {
  5560. resource_size_t vl15off;
  5561. /*
  5562. * We do not set WC on the VL15 buffers to avoid
  5563. * a rare problem with unaligned writes from
  5564. * interrupt-flushed store buffers, so we need
  5565. * to map those separately here. We can't solve
  5566. * this for the rarely used mtrr case.
  5567. */
  5568. ret = init_chip_wc_pat(dd, 0);
  5569. if (ret)
  5570. goto bail;
  5571. /* vl15 buffers start just after the 4k buffers */
  5572. vl15off = dd->physaddr + (dd->piobufbase >> 32) +
  5573. dd->piobcnt4k * dd->align4k;
  5574. dd->piovl15base = ioremap_nocache(vl15off,
  5575. NUM_VL15_BUFS * dd->align4k);
  5576. if (!dd->piovl15base)
  5577. goto bail;
  5578. }
  5579. qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
  5580. ret = 0;
  5581. if (qib_mini_init)
  5582. goto bail;
  5583. if (!dd->num_pports) {
  5584. qib_dev_err(dd, "No ports enabled, giving up initialization\n");
  5585. goto bail; /* no error, so can still figure out why err */
  5586. }
  5587. write_7322_initregs(dd);
  5588. ret = qib_create_ctxts(dd);
  5589. init_7322_cntrnames(dd);
  5590. updthresh = 8U; /* update threshold */
  5591. /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
  5592. * reserve the update threshold amount for other kernel use, such
  5593. * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
  5594. * unless we aren't enabling SDMA, in which case we want to use
  5595. * all the 4k bufs for the kernel.
  5596. * if this was less than the update threshold, we could wait
  5597. * a long time for an update. Coded this way because we
  5598. * sometimes change the update threshold for various reasons,
  5599. * and we want this to remain robust.
  5600. */
  5601. if (dd->flags & QIB_HAS_SEND_DMA) {
  5602. dd->cspec->sdmabufcnt = dd->piobcnt4k;
  5603. sbufs = updthresh > 3 ? updthresh : 3;
  5604. } else {
  5605. dd->cspec->sdmabufcnt = 0;
  5606. sbufs = dd->piobcnt4k;
  5607. }
  5608. dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
  5609. dd->cspec->sdmabufcnt;
  5610. dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
  5611. dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
  5612. dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
  5613. dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
  5614. /*
  5615. * If we have 16 user contexts, we will have 7 sbufs
  5616. * per context, so reduce the update threshold to match. We
  5617. * want to update before we actually run out, at low pbufs/ctxt
  5618. * so give ourselves some margin.
  5619. */
  5620. if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
  5621. updthresh = dd->pbufsctxt - 2;
  5622. dd->cspec->updthresh_dflt = updthresh;
  5623. dd->cspec->updthresh = updthresh;
  5624. /* before full enable, no interrupts, no locking needed */
  5625. dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
  5626. << SYM_LSB(SendCtrl, AvailUpdThld)) |
  5627. SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
  5628. dd->psxmitwait_supported = 1;
  5629. dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
  5630. bail:
  5631. if (!dd->ctxtcnt)
  5632. dd->ctxtcnt = 1; /* for other initialization code */
  5633. return ret;
  5634. }
  5635. static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
  5636. u32 *pbufnum)
  5637. {
  5638. u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
  5639. struct qib_devdata *dd = ppd->dd;
  5640. /* last is same for 2k and 4k, because we use 4k if all 2k busy */
  5641. if (pbc & PBC_7322_VL15_SEND) {
  5642. first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
  5643. last = first;
  5644. } else {
  5645. if ((plen + 1) > dd->piosize2kmax_dwords)
  5646. first = dd->piobcnt2k;
  5647. else
  5648. first = 0;
  5649. last = dd->cspec->lastbuf_for_pio;
  5650. }
  5651. return qib_getsendbuf_range(dd, pbufnum, first, last);
  5652. }
  5653. static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
  5654. u32 start)
  5655. {
  5656. qib_write_kreg_port(ppd, krp_psinterval, intv);
  5657. qib_write_kreg_port(ppd, krp_psstart, start);
  5658. }
  5659. /*
  5660. * Must be called with sdma_lock held, or before init finished.
  5661. */
  5662. static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
  5663. {
  5664. qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
  5665. }
  5666. static struct sdma_set_state_action sdma_7322_action_table[] = {
  5667. [qib_sdma_state_s00_hw_down] = {
  5668. .go_s99_running_tofalse = 1,
  5669. .op_enable = 0,
  5670. .op_intenable = 0,
  5671. .op_halt = 0,
  5672. .op_drain = 0,
  5673. },
  5674. [qib_sdma_state_s10_hw_start_up_wait] = {
  5675. .op_enable = 0,
  5676. .op_intenable = 1,
  5677. .op_halt = 1,
  5678. .op_drain = 0,
  5679. },
  5680. [qib_sdma_state_s20_idle] = {
  5681. .op_enable = 1,
  5682. .op_intenable = 1,
  5683. .op_halt = 1,
  5684. .op_drain = 0,
  5685. },
  5686. [qib_sdma_state_s30_sw_clean_up_wait] = {
  5687. .op_enable = 0,
  5688. .op_intenable = 1,
  5689. .op_halt = 1,
  5690. .op_drain = 0,
  5691. },
  5692. [qib_sdma_state_s40_hw_clean_up_wait] = {
  5693. .op_enable = 1,
  5694. .op_intenable = 1,
  5695. .op_halt = 1,
  5696. .op_drain = 0,
  5697. },
  5698. [qib_sdma_state_s50_hw_halt_wait] = {
  5699. .op_enable = 1,
  5700. .op_intenable = 1,
  5701. .op_halt = 1,
  5702. .op_drain = 1,
  5703. },
  5704. [qib_sdma_state_s99_running] = {
  5705. .op_enable = 1,
  5706. .op_intenable = 1,
  5707. .op_halt = 0,
  5708. .op_drain = 0,
  5709. .go_s99_running_totrue = 1,
  5710. },
  5711. };
  5712. static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
  5713. {
  5714. ppd->sdma_state.set_state_action = sdma_7322_action_table;
  5715. }
  5716. static int init_sdma_7322_regs(struct qib_pportdata *ppd)
  5717. {
  5718. struct qib_devdata *dd = ppd->dd;
  5719. unsigned lastbuf, erstbuf;
  5720. u64 senddmabufmask[3] = { 0 };
  5721. int n, ret = 0;
  5722. qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
  5723. qib_sdma_7322_setlengen(ppd);
  5724. qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
  5725. qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
  5726. qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
  5727. qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
  5728. if (dd->num_pports)
  5729. n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
  5730. else
  5731. n = dd->cspec->sdmabufcnt; /* failsafe for init */
  5732. erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
  5733. ((dd->num_pports == 1 || ppd->port == 2) ? n :
  5734. dd->cspec->sdmabufcnt);
  5735. lastbuf = erstbuf + n;
  5736. ppd->sdma_state.first_sendbuf = erstbuf;
  5737. ppd->sdma_state.last_sendbuf = lastbuf;
  5738. for (; erstbuf < lastbuf; ++erstbuf) {
  5739. unsigned word = erstbuf / BITS_PER_LONG;
  5740. unsigned bit = erstbuf & (BITS_PER_LONG - 1);
  5741. BUG_ON(word >= 3);
  5742. senddmabufmask[word] |= 1ULL << bit;
  5743. }
  5744. qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
  5745. qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
  5746. qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
  5747. return ret;
  5748. }
  5749. /* sdma_lock must be held */
  5750. static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
  5751. {
  5752. struct qib_devdata *dd = ppd->dd;
  5753. int sane;
  5754. int use_dmahead;
  5755. u16 swhead;
  5756. u16 swtail;
  5757. u16 cnt;
  5758. u16 hwhead;
  5759. use_dmahead = __qib_sdma_running(ppd) &&
  5760. (dd->flags & QIB_HAS_SDMA_TIMEOUT);
  5761. retry:
  5762. hwhead = use_dmahead ?
  5763. (u16) le64_to_cpu(*ppd->sdma_head_dma) :
  5764. (u16) qib_read_kreg_port(ppd, krp_senddmahead);
  5765. swhead = ppd->sdma_descq_head;
  5766. swtail = ppd->sdma_descq_tail;
  5767. cnt = ppd->sdma_descq_cnt;
  5768. if (swhead < swtail)
  5769. /* not wrapped */
  5770. sane = (hwhead >= swhead) & (hwhead <= swtail);
  5771. else if (swhead > swtail)
  5772. /* wrapped around */
  5773. sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
  5774. (hwhead <= swtail);
  5775. else
  5776. /* empty */
  5777. sane = (hwhead == swhead);
  5778. if (unlikely(!sane)) {
  5779. if (use_dmahead) {
  5780. /* try one more time, directly from the register */
  5781. use_dmahead = 0;
  5782. goto retry;
  5783. }
  5784. /* proceed as if no progress */
  5785. hwhead = swhead;
  5786. }
  5787. return hwhead;
  5788. }
  5789. static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
  5790. {
  5791. u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
  5792. return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
  5793. (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
  5794. !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
  5795. !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
  5796. }
  5797. /*
  5798. * Compute the amount of delay before sending the next packet if the
  5799. * port's send rate differs from the static rate set for the QP.
  5800. * The delay affects the next packet and the amount of the delay is
  5801. * based on the length of the this packet.
  5802. */
  5803. static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
  5804. u8 srate, u8 vl)
  5805. {
  5806. u8 snd_mult = ppd->delay_mult;
  5807. u8 rcv_mult = ib_rate_to_delay[srate];
  5808. u32 ret;
  5809. ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
  5810. /* Indicate VL15, else set the VL in the control word */
  5811. if (vl == 15)
  5812. ret |= PBC_7322_VL15_SEND_CTRL;
  5813. else
  5814. ret |= vl << PBC_VL_NUM_LSB;
  5815. ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
  5816. return ret;
  5817. }
  5818. /*
  5819. * Enable the per-port VL15 send buffers for use.
  5820. * They follow the rest of the buffers, without a config parameter.
  5821. * This was in initregs, but that is done before the shadow
  5822. * is set up, and this has to be done after the shadow is
  5823. * set up.
  5824. */
  5825. static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
  5826. {
  5827. unsigned vl15bufs;
  5828. vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
  5829. qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
  5830. TXCHK_CHG_TYPE_KERN, NULL);
  5831. }
  5832. static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
  5833. {
  5834. if (rcd->ctxt < NUM_IB_PORTS) {
  5835. if (rcd->dd->num_pports > 1) {
  5836. rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
  5837. rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
  5838. } else {
  5839. rcd->rcvegrcnt = KCTXT0_EGRCNT;
  5840. rcd->rcvegr_tid_base = 0;
  5841. }
  5842. } else {
  5843. rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
  5844. rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
  5845. (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
  5846. }
  5847. }
  5848. #define QTXSLEEPS 5000
  5849. static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
  5850. u32 len, u32 which, struct qib_ctxtdata *rcd)
  5851. {
  5852. int i;
  5853. const int last = start + len - 1;
  5854. const int lastr = last / BITS_PER_LONG;
  5855. u32 sleeps = 0;
  5856. int wait = rcd != NULL;
  5857. unsigned long flags;
  5858. while (wait) {
  5859. unsigned long shadow;
  5860. int cstart, previ = -1;
  5861. /*
  5862. * when flipping from kernel to user, we can't change
  5863. * the checking type if the buffer is allocated to the
  5864. * driver. It's OK the other direction, because it's
  5865. * from close, and we have just disarm'ed all the
  5866. * buffers. All the kernel to kernel changes are also
  5867. * OK.
  5868. */
  5869. for (cstart = start; cstart <= last; cstart++) {
  5870. i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
  5871. / BITS_PER_LONG;
  5872. if (i != previ) {
  5873. shadow = (unsigned long)
  5874. le64_to_cpu(dd->pioavailregs_dma[i]);
  5875. previ = i;
  5876. }
  5877. if (test_bit(((2 * cstart) +
  5878. QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
  5879. % BITS_PER_LONG, &shadow))
  5880. break;
  5881. }
  5882. if (cstart > last)
  5883. break;
  5884. if (sleeps == QTXSLEEPS)
  5885. break;
  5886. /* make sure we see an updated copy next time around */
  5887. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  5888. sleeps++;
  5889. msleep(20);
  5890. }
  5891. switch (which) {
  5892. case TXCHK_CHG_TYPE_DIS1:
  5893. /*
  5894. * disable checking on a range; used by diags; just
  5895. * one buffer, but still written generically
  5896. */
  5897. for (i = start; i <= last; i++)
  5898. clear_bit(i, dd->cspec->sendchkenable);
  5899. break;
  5900. case TXCHK_CHG_TYPE_ENAB1:
  5901. /*
  5902. * (re)enable checking on a range; used by diags; just
  5903. * one buffer, but still written generically; read
  5904. * scratch to be sure buffer actually triggered, not
  5905. * just flushed from processor.
  5906. */
  5907. qib_read_kreg32(dd, kr_scratch);
  5908. for (i = start; i <= last; i++)
  5909. set_bit(i, dd->cspec->sendchkenable);
  5910. break;
  5911. case TXCHK_CHG_TYPE_KERN:
  5912. /* usable by kernel */
  5913. for (i = start; i <= last; i++) {
  5914. set_bit(i, dd->cspec->sendibchk);
  5915. clear_bit(i, dd->cspec->sendgrhchk);
  5916. }
  5917. spin_lock_irqsave(&dd->uctxt_lock, flags);
  5918. /* see if we need to raise avail update threshold */
  5919. for (i = dd->first_user_ctxt;
  5920. dd->cspec->updthresh != dd->cspec->updthresh_dflt
  5921. && i < dd->cfgctxts; i++)
  5922. if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
  5923. ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
  5924. < dd->cspec->updthresh_dflt)
  5925. break;
  5926. spin_unlock_irqrestore(&dd->uctxt_lock, flags);
  5927. if (i == dd->cfgctxts) {
  5928. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  5929. dd->cspec->updthresh = dd->cspec->updthresh_dflt;
  5930. dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
  5931. dd->sendctrl |= (dd->cspec->updthresh &
  5932. SYM_RMASK(SendCtrl, AvailUpdThld)) <<
  5933. SYM_LSB(SendCtrl, AvailUpdThld);
  5934. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  5935. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  5936. }
  5937. break;
  5938. case TXCHK_CHG_TYPE_USER:
  5939. /* for user process */
  5940. for (i = start; i <= last; i++) {
  5941. clear_bit(i, dd->cspec->sendibchk);
  5942. set_bit(i, dd->cspec->sendgrhchk);
  5943. }
  5944. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  5945. if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
  5946. / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
  5947. dd->cspec->updthresh = (rcd->piocnt /
  5948. rcd->subctxt_cnt) - 1;
  5949. dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
  5950. dd->sendctrl |= (dd->cspec->updthresh &
  5951. SYM_RMASK(SendCtrl, AvailUpdThld))
  5952. << SYM_LSB(SendCtrl, AvailUpdThld);
  5953. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  5954. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  5955. } else
  5956. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  5957. break;
  5958. default:
  5959. break;
  5960. }
  5961. for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
  5962. qib_write_kreg(dd, kr_sendcheckmask + i,
  5963. dd->cspec->sendchkenable[i]);
  5964. for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
  5965. qib_write_kreg(dd, kr_sendgrhcheckmask + i,
  5966. dd->cspec->sendgrhchk[i]);
  5967. qib_write_kreg(dd, kr_sendibpktmask + i,
  5968. dd->cspec->sendibchk[i]);
  5969. }
  5970. /*
  5971. * Be sure whatever we did was seen by the chip and acted upon,
  5972. * before we return. Mostly important for which >= 2.
  5973. */
  5974. qib_read_kreg32(dd, kr_scratch);
  5975. }
  5976. /* useful for trigger analyzers, etc. */
  5977. static void writescratch(struct qib_devdata *dd, u32 val)
  5978. {
  5979. qib_write_kreg(dd, kr_scratch, val);
  5980. }
  5981. /* Dummy for now, use chip regs soon */
  5982. static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
  5983. {
  5984. return -ENXIO;
  5985. }
  5986. /**
  5987. * qib_init_iba7322_funcs - set up the chip-specific function pointers
  5988. * @dev: the pci_dev for qlogic_ib device
  5989. * @ent: pci_device_id struct for this dev
  5990. *
  5991. * Also allocates, inits, and returns the devdata struct for this
  5992. * device instance
  5993. *
  5994. * This is global, and is called directly at init to set up the
  5995. * chip-specific function pointers for later use.
  5996. */
  5997. struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
  5998. const struct pci_device_id *ent)
  5999. {
  6000. struct qib_devdata *dd;
  6001. int ret, i;
  6002. u32 tabsize, actual_cnt = 0;
  6003. dd = qib_alloc_devdata(pdev,
  6004. NUM_IB_PORTS * sizeof(struct qib_pportdata) +
  6005. sizeof(struct qib_chip_specific) +
  6006. NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
  6007. if (IS_ERR(dd))
  6008. goto bail;
  6009. dd->f_bringup_serdes = qib_7322_bringup_serdes;
  6010. dd->f_cleanup = qib_setup_7322_cleanup;
  6011. dd->f_clear_tids = qib_7322_clear_tids;
  6012. dd->f_free_irq = qib_7322_free_irq;
  6013. dd->f_get_base_info = qib_7322_get_base_info;
  6014. dd->f_get_msgheader = qib_7322_get_msgheader;
  6015. dd->f_getsendbuf = qib_7322_getsendbuf;
  6016. dd->f_gpio_mod = gpio_7322_mod;
  6017. dd->f_eeprom_wen = qib_7322_eeprom_wen;
  6018. dd->f_hdrqempty = qib_7322_hdrqempty;
  6019. dd->f_ib_updown = qib_7322_ib_updown;
  6020. dd->f_init_ctxt = qib_7322_init_ctxt;
  6021. dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
  6022. dd->f_intr_fallback = qib_7322_intr_fallback;
  6023. dd->f_late_initreg = qib_late_7322_initreg;
  6024. dd->f_setpbc_control = qib_7322_setpbc_control;
  6025. dd->f_portcntr = qib_portcntr_7322;
  6026. dd->f_put_tid = qib_7322_put_tid;
  6027. dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
  6028. dd->f_rcvctrl = rcvctrl_7322_mod;
  6029. dd->f_read_cntrs = qib_read_7322cntrs;
  6030. dd->f_read_portcntrs = qib_read_7322portcntrs;
  6031. dd->f_reset = qib_do_7322_reset;
  6032. dd->f_init_sdma_regs = init_sdma_7322_regs;
  6033. dd->f_sdma_busy = qib_sdma_7322_busy;
  6034. dd->f_sdma_gethead = qib_sdma_7322_gethead;
  6035. dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
  6036. dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
  6037. dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
  6038. dd->f_sendctrl = sendctrl_7322_mod;
  6039. dd->f_set_armlaunch = qib_set_7322_armlaunch;
  6040. dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
  6041. dd->f_iblink_state = qib_7322_iblink_state;
  6042. dd->f_ibphys_portstate = qib_7322_phys_portstate;
  6043. dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
  6044. dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
  6045. dd->f_set_ib_loopback = qib_7322_set_loopback;
  6046. dd->f_get_ib_table = qib_7322_get_ib_table;
  6047. dd->f_set_ib_table = qib_7322_set_ib_table;
  6048. dd->f_set_intr_state = qib_7322_set_intr_state;
  6049. dd->f_setextled = qib_setup_7322_setextled;
  6050. dd->f_txchk_change = qib_7322_txchk_change;
  6051. dd->f_update_usrhead = qib_update_7322_usrhead;
  6052. dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
  6053. dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
  6054. dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
  6055. dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
  6056. dd->f_sdma_init_early = qib_7322_sdma_init_early;
  6057. dd->f_writescratch = writescratch;
  6058. dd->f_tempsense_rd = qib_7322_tempsense_rd;
  6059. /*
  6060. * Do remaining PCIe setup and save PCIe values in dd.
  6061. * Any error printing is already done by the init code.
  6062. * On return, we have the chip mapped, but chip registers
  6063. * are not set up until start of qib_init_7322_variables.
  6064. */
  6065. ret = qib_pcie_ddinit(dd, pdev, ent);
  6066. if (ret < 0)
  6067. goto bail_free;
  6068. /* initialize chip-specific variables */
  6069. ret = qib_init_7322_variables(dd);
  6070. if (ret)
  6071. goto bail_cleanup;
  6072. if (qib_mini_init || !dd->num_pports)
  6073. goto bail;
  6074. /*
  6075. * Determine number of vectors we want; depends on port count
  6076. * and number of configured kernel receive queues actually used.
  6077. * Should also depend on whether sdma is enabled or not, but
  6078. * that's such a rare testing case it's not worth worrying about.
  6079. */
  6080. tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
  6081. for (i = 0; i < tabsize; i++)
  6082. if ((i < ARRAY_SIZE(irq_table) &&
  6083. irq_table[i].port <= dd->num_pports) ||
  6084. (i >= ARRAY_SIZE(irq_table) &&
  6085. dd->rcd[i - ARRAY_SIZE(irq_table)]))
  6086. actual_cnt++;
  6087. /* reduce by ctxt's < 2 */
  6088. if (qib_krcvq01_no_msi)
  6089. actual_cnt -= dd->num_pports;
  6090. tabsize = actual_cnt;
  6091. dd->cspec->msix_entries = kmalloc(tabsize *
  6092. sizeof(struct msix_entry), GFP_KERNEL);
  6093. dd->cspec->msix_arg = kmalloc(tabsize *
  6094. sizeof(void *), GFP_KERNEL);
  6095. if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) {
  6096. qib_dev_err(dd, "No memory for MSIx table\n");
  6097. tabsize = 0;
  6098. }
  6099. for (i = 0; i < tabsize; i++)
  6100. dd->cspec->msix_entries[i].entry = i;
  6101. if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
  6102. qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
  6103. "continuing anyway\n");
  6104. /* may be less than we wanted, if not enough available */
  6105. dd->cspec->num_msix_entries = tabsize;
  6106. /* setup interrupt handler */
  6107. qib_setup_7322_interrupt(dd, 1);
  6108. /* clear diagctrl register, in case diags were running and crashed */
  6109. qib_write_kreg(dd, kr_hwdiagctrl, 0);
  6110. goto bail;
  6111. bail_cleanup:
  6112. qib_pcie_ddcleanup(dd);
  6113. bail_free:
  6114. qib_free_devdata(dd);
  6115. dd = ERR_PTR(ret);
  6116. bail:
  6117. return dd;
  6118. }
  6119. /*
  6120. * Set the table entry at the specified index from the table specifed.
  6121. * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
  6122. * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
  6123. * 'idx' below addresses the correct entry, while its 4 LSBs select the
  6124. * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
  6125. */
  6126. #define DDS_ENT_AMP_LSB 14
  6127. #define DDS_ENT_MAIN_LSB 9
  6128. #define DDS_ENT_POST_LSB 5
  6129. #define DDS_ENT_PRE_XTRA_LSB 3
  6130. #define DDS_ENT_PRE_LSB 0
  6131. /*
  6132. * Set one entry in the TxDDS table for spec'd port
  6133. * ridx picks one of the entries, while tp points
  6134. * to the appropriate table entry.
  6135. */
  6136. static void set_txdds(struct qib_pportdata *ppd, int ridx,
  6137. const struct txdds_ent *tp)
  6138. {
  6139. struct qib_devdata *dd = ppd->dd;
  6140. u32 pack_ent;
  6141. int regidx;
  6142. /* Get correct offset in chip-space, and in source table */
  6143. regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
  6144. /*
  6145. * We do not use qib_write_kreg_port() because it was intended
  6146. * only for registers in the lower "port specific" pages.
  6147. * So do index calculation by hand.
  6148. */
  6149. if (ppd->hw_pidx)
  6150. regidx += (dd->palign / sizeof(u64));
  6151. pack_ent = tp->amp << DDS_ENT_AMP_LSB;
  6152. pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
  6153. pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
  6154. pack_ent |= tp->post << DDS_ENT_POST_LSB;
  6155. qib_write_kreg(dd, regidx, pack_ent);
  6156. /* Prevent back-to-back writes by hitting scratch */
  6157. qib_write_kreg(ppd->dd, kr_scratch, 0);
  6158. }
  6159. static const struct vendor_txdds_ent vendor_txdds[] = {
  6160. { /* Amphenol 1m 30awg NoEq */
  6161. { 0x41, 0x50, 0x48 }, "584470002 ",
  6162. { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
  6163. },
  6164. { /* Amphenol 3m 28awg NoEq */
  6165. { 0x41, 0x50, 0x48 }, "584470004 ",
  6166. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
  6167. },
  6168. { /* Finisar 3m OM2 Optical */
  6169. { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
  6170. { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
  6171. },
  6172. { /* Finisar 30m OM2 Optical */
  6173. { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
  6174. { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
  6175. },
  6176. { /* Finisar Default OM2 Optical */
  6177. { 0x00, 0x90, 0x65 }, NULL,
  6178. { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
  6179. },
  6180. { /* Gore 1m 30awg NoEq */
  6181. { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
  6182. { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
  6183. },
  6184. { /* Gore 2m 30awg NoEq */
  6185. { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
  6186. { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
  6187. },
  6188. { /* Gore 1m 28awg NoEq */
  6189. { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
  6190. { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
  6191. },
  6192. { /* Gore 3m 28awg NoEq */
  6193. { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
  6194. { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
  6195. },
  6196. { /* Gore 5m 24awg Eq */
  6197. { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
  6198. { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
  6199. },
  6200. { /* Gore 7m 24awg Eq */
  6201. { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
  6202. { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
  6203. },
  6204. { /* Gore 5m 26awg Eq */
  6205. { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
  6206. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
  6207. },
  6208. { /* Gore 7m 26awg Eq */
  6209. { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
  6210. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
  6211. },
  6212. { /* Intersil 12m 24awg Active */
  6213. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
  6214. { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
  6215. },
  6216. { /* Intersil 10m 28awg Active */
  6217. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
  6218. { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
  6219. },
  6220. { /* Intersil 7m 30awg Active */
  6221. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
  6222. { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
  6223. },
  6224. { /* Intersil 5m 32awg Active */
  6225. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
  6226. { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
  6227. },
  6228. { /* Intersil Default Active */
  6229. { 0x00, 0x30, 0xB4 }, NULL,
  6230. { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
  6231. },
  6232. { /* Luxtera 20m Active Optical */
  6233. { 0x00, 0x25, 0x63 }, NULL,
  6234. { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
  6235. },
  6236. { /* Molex 1M Cu loopback */
  6237. { 0x00, 0x09, 0x3A }, "74763-0025 ",
  6238. { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
  6239. },
  6240. { /* Molex 2m 28awg NoEq */
  6241. { 0x00, 0x09, 0x3A }, "74757-2201 ",
  6242. { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
  6243. },
  6244. };
  6245. static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
  6246. /* amp, pre, main, post */
  6247. { 2, 2, 15, 6 }, /* Loopback */
  6248. { 0, 0, 0, 1 }, /* 2 dB */
  6249. { 0, 0, 0, 2 }, /* 3 dB */
  6250. { 0, 0, 0, 3 }, /* 4 dB */
  6251. { 0, 0, 0, 4 }, /* 5 dB */
  6252. { 0, 0, 0, 5 }, /* 6 dB */
  6253. { 0, 0, 0, 6 }, /* 7 dB */
  6254. { 0, 0, 0, 7 }, /* 8 dB */
  6255. { 0, 0, 0, 8 }, /* 9 dB */
  6256. { 0, 0, 0, 9 }, /* 10 dB */
  6257. { 0, 0, 0, 10 }, /* 11 dB */
  6258. { 0, 0, 0, 11 }, /* 12 dB */
  6259. { 0, 0, 0, 12 }, /* 13 dB */
  6260. { 0, 0, 0, 13 }, /* 14 dB */
  6261. { 0, 0, 0, 14 }, /* 15 dB */
  6262. { 0, 0, 0, 15 }, /* 16 dB */
  6263. };
  6264. static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
  6265. /* amp, pre, main, post */
  6266. { 2, 2, 15, 6 }, /* Loopback */
  6267. { 0, 0, 0, 8 }, /* 2 dB */
  6268. { 0, 0, 0, 8 }, /* 3 dB */
  6269. { 0, 0, 0, 9 }, /* 4 dB */
  6270. { 0, 0, 0, 9 }, /* 5 dB */
  6271. { 0, 0, 0, 10 }, /* 6 dB */
  6272. { 0, 0, 0, 10 }, /* 7 dB */
  6273. { 0, 0, 0, 11 }, /* 8 dB */
  6274. { 0, 0, 0, 11 }, /* 9 dB */
  6275. { 0, 0, 0, 12 }, /* 10 dB */
  6276. { 0, 0, 0, 12 }, /* 11 dB */
  6277. { 0, 0, 0, 13 }, /* 12 dB */
  6278. { 0, 0, 0, 13 }, /* 13 dB */
  6279. { 0, 0, 0, 14 }, /* 14 dB */
  6280. { 0, 0, 0, 14 }, /* 15 dB */
  6281. { 0, 0, 0, 15 }, /* 16 dB */
  6282. };
  6283. static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
  6284. /* amp, pre, main, post */
  6285. { 2, 2, 15, 6 }, /* Loopback */
  6286. { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
  6287. { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
  6288. { 0, 1, 0, 11 }, /* 4 dB */
  6289. { 0, 1, 0, 13 }, /* 5 dB */
  6290. { 0, 1, 0, 15 }, /* 6 dB */
  6291. { 0, 1, 3, 15 }, /* 7 dB */
  6292. { 0, 1, 7, 15 }, /* 8 dB */
  6293. { 0, 1, 7, 15 }, /* 9 dB */
  6294. { 0, 1, 8, 15 }, /* 10 dB */
  6295. { 0, 1, 9, 15 }, /* 11 dB */
  6296. { 0, 1, 10, 15 }, /* 12 dB */
  6297. { 0, 2, 6, 15 }, /* 13 dB */
  6298. { 0, 2, 7, 15 }, /* 14 dB */
  6299. { 0, 2, 8, 15 }, /* 15 dB */
  6300. { 0, 2, 9, 15 }, /* 16 dB */
  6301. };
  6302. /*
  6303. * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
  6304. * These are mostly used for mez cards going through connectors
  6305. * and backplane traces, but can be used to add other "unusual"
  6306. * table values as well.
  6307. */
  6308. static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
  6309. /* amp, pre, main, post */
  6310. { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
  6311. { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
  6312. { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
  6313. { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
  6314. { 0, 0, 0, 11 }, /* QME7342 backplane settings */
  6315. { 0, 0, 0, 11 }, /* QME7342 backplane settings */
  6316. { 0, 0, 0, 11 }, /* QME7342 backplane settings */
  6317. { 0, 0, 0, 11 }, /* QME7342 backplane settings */
  6318. { 0, 0, 0, 11 }, /* QME7342 backplane settings */
  6319. { 0, 0, 0, 11 }, /* QME7342 backplane settings */
  6320. { 0, 0, 0, 11 }, /* QME7342 backplane settings */
  6321. { 0, 0, 0, 3 }, /* QMH7342 backplane settings */
  6322. { 0, 0, 0, 4 }, /* QMH7342 backplane settings */
  6323. };
  6324. static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
  6325. /* amp, pre, main, post */
  6326. { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
  6327. { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
  6328. { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
  6329. { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
  6330. { 0, 0, 0, 13 }, /* QME7342 backplane settings */
  6331. { 0, 0, 0, 13 }, /* QME7342 backplane settings */
  6332. { 0, 0, 0, 13 }, /* QME7342 backplane settings */
  6333. { 0, 0, 0, 13 }, /* QME7342 backplane settings */
  6334. { 0, 0, 0, 13 }, /* QME7342 backplane settings */
  6335. { 0, 0, 0, 13 }, /* QME7342 backplane settings */
  6336. { 0, 0, 0, 13 }, /* QME7342 backplane settings */
  6337. { 0, 0, 0, 9 }, /* QMH7342 backplane settings */
  6338. { 0, 0, 0, 10 }, /* QMH7342 backplane settings */
  6339. };
  6340. static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
  6341. /* amp, pre, main, post */
  6342. { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
  6343. { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
  6344. { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
  6345. { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
  6346. { 0, 1, 12, 10 }, /* QME7342 backplane setting */
  6347. { 0, 1, 12, 11 }, /* QME7342 backplane setting */
  6348. { 0, 1, 12, 12 }, /* QME7342 backplane setting */
  6349. { 0, 1, 12, 14 }, /* QME7342 backplane setting */
  6350. { 0, 1, 12, 6 }, /* QME7342 backplane setting */
  6351. { 0, 1, 12, 7 }, /* QME7342 backplane setting */
  6352. { 0, 1, 12, 8 }, /* QME7342 backplane setting */
  6353. { 0, 1, 0, 10 }, /* QMH7342 backplane settings */
  6354. { 0, 1, 0, 12 }, /* QMH7342 backplane settings */
  6355. };
  6356. static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
  6357. /* amp, pre, main, post */
  6358. { 0, 0, 0, 0 }, /* QME7342 mfg settings */
  6359. { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
  6360. };
  6361. static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
  6362. unsigned atten)
  6363. {
  6364. /*
  6365. * The attenuation table starts at 2dB for entry 1,
  6366. * with entry 0 being the loopback entry.
  6367. */
  6368. if (atten <= 2)
  6369. atten = 1;
  6370. else if (atten > TXDDS_TABLE_SZ)
  6371. atten = TXDDS_TABLE_SZ - 1;
  6372. else
  6373. atten--;
  6374. return txdds + atten;
  6375. }
  6376. /*
  6377. * if override is set, the module parameter txselect has a value
  6378. * for this specific port, so use it, rather than our normal mechanism.
  6379. */
  6380. static void find_best_ent(struct qib_pportdata *ppd,
  6381. const struct txdds_ent **sdr_dds,
  6382. const struct txdds_ent **ddr_dds,
  6383. const struct txdds_ent **qdr_dds, int override)
  6384. {
  6385. struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
  6386. int idx;
  6387. /* Search table of known cables */
  6388. for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
  6389. const struct vendor_txdds_ent *v = vendor_txdds + idx;
  6390. if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
  6391. (!v->partnum ||
  6392. !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
  6393. *sdr_dds = &v->sdr;
  6394. *ddr_dds = &v->ddr;
  6395. *qdr_dds = &v->qdr;
  6396. return;
  6397. }
  6398. }
  6399. /* Lookup serdes setting by cable type and attenuation */
  6400. if (!override && QSFP_IS_ACTIVE(qd->tech)) {
  6401. *sdr_dds = txdds_sdr + ppd->dd->board_atten;
  6402. *ddr_dds = txdds_ddr + ppd->dd->board_atten;
  6403. *qdr_dds = txdds_qdr + ppd->dd->board_atten;
  6404. return;
  6405. }
  6406. if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
  6407. qd->atten[1])) {
  6408. *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
  6409. *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
  6410. *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
  6411. return;
  6412. } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
  6413. /*
  6414. * If we have no (or incomplete) data from the cable
  6415. * EEPROM, or no QSFP, or override is set, use the
  6416. * module parameter value to index into the attentuation
  6417. * table.
  6418. */
  6419. idx = ppd->cpspec->no_eep;
  6420. *sdr_dds = &txdds_sdr[idx];
  6421. *ddr_dds = &txdds_ddr[idx];
  6422. *qdr_dds = &txdds_qdr[idx];
  6423. } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
  6424. /* similar to above, but index into the "extra" table. */
  6425. idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
  6426. *sdr_dds = &txdds_extra_sdr[idx];
  6427. *ddr_dds = &txdds_extra_ddr[idx];
  6428. *qdr_dds = &txdds_extra_qdr[idx];
  6429. } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
  6430. ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
  6431. TXDDS_MFG_SZ)) {
  6432. idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
  6433. printk(KERN_INFO QIB_DRV_NAME
  6434. " IB%u:%u use idx %u into txdds_mfg\n",
  6435. ppd->dd->unit, ppd->port, idx);
  6436. *sdr_dds = &txdds_extra_mfg[idx];
  6437. *ddr_dds = &txdds_extra_mfg[idx];
  6438. *qdr_dds = &txdds_extra_mfg[idx];
  6439. } else {
  6440. /* this shouldn't happen, it's range checked */
  6441. *sdr_dds = txdds_sdr + qib_long_atten;
  6442. *ddr_dds = txdds_ddr + qib_long_atten;
  6443. *qdr_dds = txdds_qdr + qib_long_atten;
  6444. }
  6445. }
  6446. static void init_txdds_table(struct qib_pportdata *ppd, int override)
  6447. {
  6448. const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
  6449. struct txdds_ent *dds;
  6450. int idx;
  6451. int single_ent = 0;
  6452. find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
  6453. /* for mez cards or override, use the selected value for all entries */
  6454. if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
  6455. single_ent = 1;
  6456. /* Fill in the first entry with the best entry found. */
  6457. set_txdds(ppd, 0, sdr_dds);
  6458. set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
  6459. set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
  6460. if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
  6461. QIBL_LINKACTIVE)) {
  6462. dds = (struct txdds_ent *)(ppd->link_speed_active ==
  6463. QIB_IB_QDR ? qdr_dds :
  6464. (ppd->link_speed_active ==
  6465. QIB_IB_DDR ? ddr_dds : sdr_dds));
  6466. write_tx_serdes_param(ppd, dds);
  6467. }
  6468. /* Fill in the remaining entries with the default table values. */
  6469. for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
  6470. set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
  6471. set_txdds(ppd, idx + TXDDS_TABLE_SZ,
  6472. single_ent ? ddr_dds : txdds_ddr + idx);
  6473. set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
  6474. single_ent ? qdr_dds : txdds_qdr + idx);
  6475. }
  6476. }
  6477. #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
  6478. #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
  6479. #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
  6480. #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
  6481. #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
  6482. #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
  6483. #define AHB_TRANS_TRIES 10
  6484. /*
  6485. * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
  6486. * 5=subsystem which is why most calls have "chan + chan >> 1"
  6487. * for the channel argument.
  6488. */
  6489. static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
  6490. u32 data, u32 mask)
  6491. {
  6492. u32 rd_data, wr_data, sz_mask;
  6493. u64 trans, acc, prev_acc;
  6494. u32 ret = 0xBAD0BAD;
  6495. int tries;
  6496. prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
  6497. /* From this point on, make sure we return access */
  6498. acc = (quad << 1) | 1;
  6499. qib_write_kreg(dd, KR_AHB_ACC, acc);
  6500. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  6501. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  6502. if (trans & AHB_TRANS_RDY)
  6503. break;
  6504. }
  6505. if (tries >= AHB_TRANS_TRIES) {
  6506. qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
  6507. goto bail;
  6508. }
  6509. /* If mask is not all 1s, we need to read, but different SerDes
  6510. * entities have different sizes
  6511. */
  6512. sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
  6513. wr_data = data & mask & sz_mask;
  6514. if ((~mask & sz_mask) != 0) {
  6515. trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
  6516. qib_write_kreg(dd, KR_AHB_TRANS, trans);
  6517. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  6518. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  6519. if (trans & AHB_TRANS_RDY)
  6520. break;
  6521. }
  6522. if (tries >= AHB_TRANS_TRIES) {
  6523. qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
  6524. AHB_TRANS_TRIES);
  6525. goto bail;
  6526. }
  6527. /* Re-read in case host split reads and read data first */
  6528. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  6529. rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
  6530. wr_data |= (rd_data & ~mask & sz_mask);
  6531. }
  6532. /* If mask is not zero, we need to write. */
  6533. if (mask & sz_mask) {
  6534. trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
  6535. trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
  6536. trans |= AHB_WR;
  6537. qib_write_kreg(dd, KR_AHB_TRANS, trans);
  6538. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  6539. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  6540. if (trans & AHB_TRANS_RDY)
  6541. break;
  6542. }
  6543. if (tries >= AHB_TRANS_TRIES) {
  6544. qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
  6545. AHB_TRANS_TRIES);
  6546. goto bail;
  6547. }
  6548. }
  6549. ret = wr_data;
  6550. bail:
  6551. qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
  6552. return ret;
  6553. }
  6554. static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
  6555. unsigned mask)
  6556. {
  6557. struct qib_devdata *dd = ppd->dd;
  6558. int chan;
  6559. u32 rbc;
  6560. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  6561. ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
  6562. data, mask);
  6563. rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6564. addr, 0, 0);
  6565. }
  6566. }
  6567. static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
  6568. {
  6569. u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
  6570. u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
  6571. if (enable && !state) {
  6572. printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n",
  6573. ppd->dd->unit, ppd->port);
  6574. data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
  6575. } else if (!enable && state) {
  6576. printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n",
  6577. ppd->dd->unit, ppd->port);
  6578. data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
  6579. }
  6580. qib_write_kreg_port(ppd, krp_serdesctrl, data);
  6581. }
  6582. static int serdes_7322_init(struct qib_pportdata *ppd)
  6583. {
  6584. int ret = 0;
  6585. if (ppd->dd->cspec->r1)
  6586. ret = serdes_7322_init_old(ppd);
  6587. else
  6588. ret = serdes_7322_init_new(ppd);
  6589. return ret;
  6590. }
  6591. static int serdes_7322_init_old(struct qib_pportdata *ppd)
  6592. {
  6593. u32 le_val;
  6594. /*
  6595. * Initialize the Tx DDS tables. Also done every QSFP event,
  6596. * for adapters with QSFP
  6597. */
  6598. init_txdds_table(ppd, 0);
  6599. /* ensure no tx overrides from earlier driver loads */
  6600. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  6601. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6602. reset_tx_deemphasis_override));
  6603. /* Patch some SerDes defaults to "Better for IB" */
  6604. /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
  6605. ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
  6606. /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
  6607. ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
  6608. /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
  6609. ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
  6610. /* May be overridden in qsfp_7322_event */
  6611. le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
  6612. ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
  6613. /* enable LE1 adaptation for all but QME, which is disabled */
  6614. le_val = IS_QME(ppd->dd) ? 0 : 1;
  6615. ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
  6616. /* Clear cmode-override, may be set from older driver */
  6617. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
  6618. /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
  6619. ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
  6620. /* setup LoS params; these are subsystem, so chan == 5 */
  6621. /* LoS filter threshold_count on, ch 0-3, set to 8 */
  6622. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
  6623. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
  6624. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
  6625. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
  6626. /* LoS filter threshold_count off, ch 0-3, set to 4 */
  6627. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
  6628. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
  6629. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
  6630. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
  6631. /* LoS filter select enabled */
  6632. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
  6633. /* LoS target data: SDR=4, DDR=2, QDR=1 */
  6634. ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
  6635. ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
  6636. ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
  6637. serdes_7322_los_enable(ppd, 1);
  6638. /* rxbistena; set 0 to avoid effects of it switch later */
  6639. ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
  6640. /* Configure 4 DFE taps, and only they adapt */
  6641. ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
  6642. /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
  6643. le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
  6644. ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
  6645. /*
  6646. * Set receive adaptation mode. SDR and DDR adaptation are
  6647. * always on, and QDR is initially enabled; later disabled.
  6648. */
  6649. qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
  6650. qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
  6651. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  6652. ppd->dd->cspec->r1 ?
  6653. QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
  6654. ppd->cpspec->qdr_dfe_on = 1;
  6655. /* FLoop LOS gate: PPM filter enabled */
  6656. ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
  6657. /* rx offset center enabled */
  6658. ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
  6659. if (!ppd->dd->cspec->r1) {
  6660. ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
  6661. ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
  6662. }
  6663. /* Set the frequency loop bandwidth to 15 */
  6664. ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
  6665. return 0;
  6666. }
  6667. static int serdes_7322_init_new(struct qib_pportdata *ppd)
  6668. {
  6669. u64 tstart;
  6670. u32 le_val, rxcaldone;
  6671. int chan, chan_done = (1 << SERDES_CHANS) - 1;
  6672. /*
  6673. * Initialize the Tx DDS tables. Also done every QSFP event,
  6674. * for adapters with QSFP
  6675. */
  6676. init_txdds_table(ppd, 0);
  6677. /* Clear cmode-override, may be set from older driver */
  6678. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
  6679. /* ensure no tx overrides from earlier driver loads */
  6680. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  6681. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6682. reset_tx_deemphasis_override));
  6683. /* START OF LSI SUGGESTED SERDES BRINGUP */
  6684. /* Reset - Calibration Setup */
  6685. /* Stop DFE adaptaion */
  6686. ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
  6687. /* Disable LE1 */
  6688. ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
  6689. /* Disable autoadapt for LE1 */
  6690. ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
  6691. /* Disable LE2 */
  6692. ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
  6693. /* Disable VGA */
  6694. ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
  6695. /* Disable AFE Offset Cancel */
  6696. ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
  6697. /* Disable Timing Loop */
  6698. ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
  6699. /* Disable Frequency Loop */
  6700. ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
  6701. /* Disable Baseline Wander Correction */
  6702. ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
  6703. /* Disable RX Calibration */
  6704. ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
  6705. /* Disable RX Offset Calibration */
  6706. ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
  6707. /* Select BB CDR */
  6708. ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
  6709. /* CDR Step Size */
  6710. ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
  6711. /* Enable phase Calibration */
  6712. ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
  6713. /* DFE Bandwidth [2:14-12] */
  6714. ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
  6715. /* DFE Config (4 taps only) */
  6716. ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
  6717. /* Gain Loop Bandwidth */
  6718. if (!ppd->dd->cspec->r1) {
  6719. ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
  6720. ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
  6721. } else {
  6722. ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
  6723. }
  6724. /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
  6725. /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
  6726. /* Data Rate Select [5:7-6] (leave as default) */
  6727. /* RX Parallel Word Width [3:10-8] (leave as default) */
  6728. /* RX REST */
  6729. /* Single- or Multi-channel reset */
  6730. /* RX Analog reset */
  6731. /* RX Digital reset */
  6732. ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
  6733. msleep(20);
  6734. /* RX Analog reset */
  6735. ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
  6736. msleep(20);
  6737. /* RX Digital reset */
  6738. ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
  6739. msleep(20);
  6740. /* setup LoS params; these are subsystem, so chan == 5 */
  6741. /* LoS filter threshold_count on, ch 0-3, set to 8 */
  6742. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
  6743. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
  6744. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
  6745. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
  6746. /* LoS filter threshold_count off, ch 0-3, set to 4 */
  6747. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
  6748. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
  6749. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
  6750. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
  6751. /* LoS filter select enabled */
  6752. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
  6753. /* LoS target data: SDR=4, DDR=2, QDR=1 */
  6754. ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
  6755. ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
  6756. ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
  6757. /* Turn on LOS on initial SERDES init */
  6758. serdes_7322_los_enable(ppd, 1);
  6759. /* FLoop LOS gate: PPM filter enabled */
  6760. ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
  6761. /* RX LATCH CALIBRATION */
  6762. /* Enable Eyefinder Phase Calibration latch */
  6763. ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
  6764. /* Enable RX Offset Calibration latch */
  6765. ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
  6766. msleep(20);
  6767. /* Start Calibration */
  6768. ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
  6769. tstart = get_jiffies_64();
  6770. while (chan_done &&
  6771. !time_after64(get_jiffies_64(),
  6772. tstart + msecs_to_jiffies(500))) {
  6773. msleep(20);
  6774. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  6775. rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
  6776. (chan + (chan >> 1)),
  6777. 25, 0, 0);
  6778. if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
  6779. (~chan_done & (1 << chan)) == 0)
  6780. chan_done &= ~(1 << chan);
  6781. }
  6782. }
  6783. if (chan_done) {
  6784. printk(KERN_INFO QIB_DRV_NAME
  6785. " Serdes %d calibration not done after .5 sec: 0x%x\n",
  6786. IBSD(ppd->hw_pidx), chan_done);
  6787. } else {
  6788. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  6789. rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
  6790. (chan + (chan >> 1)),
  6791. 25, 0, 0);
  6792. if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
  6793. printk(KERN_INFO QIB_DRV_NAME
  6794. " Serdes %d chan %d calibration "
  6795. "failed\n", IBSD(ppd->hw_pidx), chan);
  6796. }
  6797. }
  6798. /* Turn off Calibration */
  6799. ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
  6800. msleep(20);
  6801. /* BRING RX UP */
  6802. /* Set LE2 value (May be overridden in qsfp_7322_event) */
  6803. le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
  6804. ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
  6805. /* Set LE2 Loop bandwidth */
  6806. ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
  6807. /* Enable LE2 */
  6808. ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
  6809. msleep(20);
  6810. /* Enable H0 only */
  6811. ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
  6812. /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
  6813. le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
  6814. ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
  6815. /* Enable VGA */
  6816. ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
  6817. msleep(20);
  6818. /* Set Frequency Loop Bandwidth */
  6819. ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
  6820. /* Enable Frequency Loop */
  6821. ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
  6822. /* Set Timing Loop Bandwidth */
  6823. ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
  6824. /* Enable Timing Loop */
  6825. ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
  6826. msleep(50);
  6827. /* Enable DFE
  6828. * Set receive adaptation mode. SDR and DDR adaptation are
  6829. * always on, and QDR is initially enabled; later disabled.
  6830. */
  6831. qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
  6832. qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
  6833. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  6834. ppd->dd->cspec->r1 ?
  6835. QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
  6836. ppd->cpspec->qdr_dfe_on = 1;
  6837. /* Disable LE1 */
  6838. ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
  6839. /* Disable auto adapt for LE1 */
  6840. ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
  6841. msleep(20);
  6842. /* Enable AFE Offset Cancel */
  6843. ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
  6844. /* Enable Baseline Wander Correction */
  6845. ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
  6846. /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
  6847. ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
  6848. /* VGA output common mode */
  6849. ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
  6850. return 0;
  6851. }
  6852. /* start adjust QMH serdes parameters */
  6853. static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
  6854. {
  6855. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6856. 9, code << 9, 0x3f << 9);
  6857. }
  6858. static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
  6859. int enable, u32 tapenable)
  6860. {
  6861. if (enable)
  6862. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6863. 1, 3 << 10, 0x1f << 10);
  6864. else
  6865. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6866. 1, 0, 0x1f << 10);
  6867. }
  6868. /* Set clock to 1, 0, 1, 0 */
  6869. static void clock_man(struct qib_pportdata *ppd, int chan)
  6870. {
  6871. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6872. 4, 0x4000, 0x4000);
  6873. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6874. 4, 0, 0x4000);
  6875. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6876. 4, 0x4000, 0x4000);
  6877. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  6878. 4, 0, 0x4000);
  6879. }
  6880. /*
  6881. * write the current Tx serdes pre,post,main,amp settings into the serdes.
  6882. * The caller must pass the settings appropriate for the current speed,
  6883. * or not care if they are correct for the current speed.
  6884. */
  6885. static void write_tx_serdes_param(struct qib_pportdata *ppd,
  6886. struct txdds_ent *txdds)
  6887. {
  6888. u64 deemph;
  6889. deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
  6890. /* field names for amp, main, post, pre, respectively */
  6891. deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
  6892. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
  6893. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
  6894. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
  6895. deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6896. tx_override_deemphasis_select);
  6897. deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6898. txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6899. txampcntl_d2a);
  6900. deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6901. txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6902. txc0_ena);
  6903. deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6904. txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6905. txcp1_ena);
  6906. deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6907. txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  6908. txcn1_ena);
  6909. qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
  6910. }
  6911. /*
  6912. * Set the parameters for mez cards on link bounce, so they are
  6913. * always exactly what was requested. Similar logic to init_txdds
  6914. * but does just the serdes.
  6915. */
  6916. static void adj_tx_serdes(struct qib_pportdata *ppd)
  6917. {
  6918. const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
  6919. struct txdds_ent *dds;
  6920. find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
  6921. dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
  6922. qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
  6923. ddr_dds : sdr_dds));
  6924. write_tx_serdes_param(ppd, dds);
  6925. }
  6926. /* set QDR forced value for H1, if needed */
  6927. static void force_h1(struct qib_pportdata *ppd)
  6928. {
  6929. int chan;
  6930. ppd->cpspec->qdr_reforce = 0;
  6931. if (!ppd->dd->cspec->r1)
  6932. return;
  6933. for (chan = 0; chan < SERDES_CHANS; chan++) {
  6934. set_man_mode_h1(ppd, chan, 1, 0);
  6935. set_man_code(ppd, chan, ppd->cpspec->h1_val);
  6936. clock_man(ppd, chan);
  6937. set_man_mode_h1(ppd, chan, 0, 0);
  6938. }
  6939. }
  6940. #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
  6941. #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
  6942. #define R_OPCODE_LSB 3
  6943. #define R_OP_NOP 0
  6944. #define R_OP_SHIFT 2
  6945. #define R_OP_UPDATE 3
  6946. #define R_TDI_LSB 2
  6947. #define R_TDO_LSB 1
  6948. #define R_RDY 1
  6949. static int qib_r_grab(struct qib_devdata *dd)
  6950. {
  6951. u64 val;
  6952. val = SJA_EN;
  6953. qib_write_kreg(dd, kr_r_access, val);
  6954. qib_read_kreg32(dd, kr_scratch);
  6955. return 0;
  6956. }
  6957. /* qib_r_wait_for_rdy() not only waits for the ready bit, it
  6958. * returns the current state of R_TDO
  6959. */
  6960. static int qib_r_wait_for_rdy(struct qib_devdata *dd)
  6961. {
  6962. u64 val;
  6963. int timeout;
  6964. for (timeout = 0; timeout < 100 ; ++timeout) {
  6965. val = qib_read_kreg32(dd, kr_r_access);
  6966. if (val & R_RDY)
  6967. return (val >> R_TDO_LSB) & 1;
  6968. }
  6969. return -1;
  6970. }
  6971. static int qib_r_shift(struct qib_devdata *dd, int bisten,
  6972. int len, u8 *inp, u8 *outp)
  6973. {
  6974. u64 valbase, val;
  6975. int ret, pos;
  6976. valbase = SJA_EN | (bisten << BISTEN_LSB) |
  6977. (R_OP_SHIFT << R_OPCODE_LSB);
  6978. ret = qib_r_wait_for_rdy(dd);
  6979. if (ret < 0)
  6980. goto bail;
  6981. for (pos = 0; pos < len; ++pos) {
  6982. val = valbase;
  6983. if (outp) {
  6984. outp[pos >> 3] &= ~(1 << (pos & 7));
  6985. outp[pos >> 3] |= (ret << (pos & 7));
  6986. }
  6987. if (inp) {
  6988. int tdi = inp[pos >> 3] >> (pos & 7);
  6989. val |= ((tdi & 1) << R_TDI_LSB);
  6990. }
  6991. qib_write_kreg(dd, kr_r_access, val);
  6992. qib_read_kreg32(dd, kr_scratch);
  6993. ret = qib_r_wait_for_rdy(dd);
  6994. if (ret < 0)
  6995. break;
  6996. }
  6997. /* Restore to NOP between operations. */
  6998. val = SJA_EN | (bisten << BISTEN_LSB);
  6999. qib_write_kreg(dd, kr_r_access, val);
  7000. qib_read_kreg32(dd, kr_scratch);
  7001. ret = qib_r_wait_for_rdy(dd);
  7002. if (ret >= 0)
  7003. ret = pos;
  7004. bail:
  7005. return ret;
  7006. }
  7007. static int qib_r_update(struct qib_devdata *dd, int bisten)
  7008. {
  7009. u64 val;
  7010. int ret;
  7011. val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
  7012. ret = qib_r_wait_for_rdy(dd);
  7013. if (ret >= 0) {
  7014. qib_write_kreg(dd, kr_r_access, val);
  7015. qib_read_kreg32(dd, kr_scratch);
  7016. }
  7017. return ret;
  7018. }
  7019. #define BISTEN_PORT_SEL 15
  7020. #define LEN_PORT_SEL 625
  7021. #define BISTEN_AT 17
  7022. #define LEN_AT 156
  7023. #define BISTEN_ETM 16
  7024. #define LEN_ETM 632
  7025. #define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
  7026. /* these are common for all IB port use cases. */
  7027. static u8 reset_at[BIT2BYTE(LEN_AT)] = {
  7028. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7029. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
  7030. };
  7031. static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
  7032. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7033. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7034. 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
  7035. 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
  7036. 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
  7037. 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
  7038. 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7039. 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
  7040. };
  7041. static u8 at[BIT2BYTE(LEN_AT)] = {
  7042. 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
  7043. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
  7044. };
  7045. /* used for IB1 or IB2, only one in use */
  7046. static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
  7047. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7048. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7049. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7050. 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
  7051. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7052. 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
  7053. 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
  7054. 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
  7055. };
  7056. /* used when both IB1 and IB2 are in use */
  7057. static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
  7058. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7059. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
  7060. 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7061. 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
  7062. 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
  7063. 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
  7064. 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
  7065. 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
  7066. };
  7067. /* used when only IB1 is in use */
  7068. static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
  7069. 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
  7070. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
  7071. 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7072. 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7073. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
  7074. 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7075. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7076. 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
  7077. };
  7078. /* used when only IB2 is in use */
  7079. static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
  7080. 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
  7081. 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
  7082. 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
  7083. 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
  7084. 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
  7085. 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
  7086. 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
  7087. 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
  7088. };
  7089. /* used when both IB1 and IB2 are in use */
  7090. static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
  7091. 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
  7092. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
  7093. 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7094. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7095. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
  7096. 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
  7097. 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7098. 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
  7099. };
  7100. /*
  7101. * Do setup to properly handle IB link recovery; if port is zero, we
  7102. * are initializing to cover both ports; otherwise we are initializing
  7103. * to cover a single port card, or the port has reached INIT and we may
  7104. * need to switch coverage types.
  7105. */
  7106. static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
  7107. {
  7108. u8 *portsel, *etm;
  7109. struct qib_devdata *dd = ppd->dd;
  7110. if (!ppd->dd->cspec->r1)
  7111. return;
  7112. if (!both) {
  7113. dd->cspec->recovery_ports_initted++;
  7114. ppd->cpspec->recovery_init = 1;
  7115. }
  7116. if (!both && dd->cspec->recovery_ports_initted == 1) {
  7117. portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
  7118. etm = atetm_1port;
  7119. } else {
  7120. portsel = portsel_2port;
  7121. etm = atetm_2port;
  7122. }
  7123. if (qib_r_grab(dd) < 0 ||
  7124. qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
  7125. qib_r_update(dd, BISTEN_ETM) < 0 ||
  7126. qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
  7127. qib_r_update(dd, BISTEN_AT) < 0 ||
  7128. qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
  7129. portsel, NULL) < 0 ||
  7130. qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
  7131. qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
  7132. qib_r_update(dd, BISTEN_AT) < 0 ||
  7133. qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
  7134. qib_r_update(dd, BISTEN_ETM) < 0)
  7135. qib_dev_err(dd, "Failed IB link recovery setup\n");
  7136. }
  7137. static void check_7322_rxe_status(struct qib_pportdata *ppd)
  7138. {
  7139. struct qib_devdata *dd = ppd->dd;
  7140. u64 fmask;
  7141. if (dd->cspec->recovery_ports_initted != 1)
  7142. return; /* rest doesn't apply to dualport */
  7143. qib_write_kreg(dd, kr_control, dd->control |
  7144. SYM_MASK(Control, FreezeMode));
  7145. (void)qib_read_kreg64(dd, kr_scratch);
  7146. udelay(3); /* ibcreset asserted 400ns, be sure that's over */
  7147. fmask = qib_read_kreg64(dd, kr_act_fmask);
  7148. if (!fmask) {
  7149. /*
  7150. * require a powercycle before we'll work again, and make
  7151. * sure we get no more interrupts, and don't turn off
  7152. * freeze.
  7153. */
  7154. ppd->dd->cspec->stay_in_freeze = 1;
  7155. qib_7322_set_intr_state(ppd->dd, 0);
  7156. qib_write_kreg(dd, kr_fmask, 0ULL);
  7157. qib_dev_err(dd, "HCA unusable until powercycled\n");
  7158. return; /* eventually reset */
  7159. }
  7160. qib_write_kreg(ppd->dd, kr_hwerrclear,
  7161. SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
  7162. /* don't do the full clear_freeze(), not needed for this */
  7163. qib_write_kreg(dd, kr_control, dd->control);
  7164. qib_read_kreg32(dd, kr_scratch);
  7165. /* take IBC out of reset */
  7166. if (ppd->link_speed_supported) {
  7167. ppd->cpspec->ibcctrl_a &=
  7168. ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  7169. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  7170. ppd->cpspec->ibcctrl_a);
  7171. qib_read_kreg32(dd, kr_scratch);
  7172. if (ppd->lflags & QIBL_IB_LINK_DISABLED)
  7173. qib_set_ib_7322_lstate(ppd, 0,
  7174. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  7175. }
  7176. }