intel_display.c 251 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312
  1. /*
  2. * Copyright © 2006-2007 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. */
  26. #include <linux/cpufreq.h>
  27. #include <linux/module.h>
  28. #include <linux/input.h>
  29. #include <linux/i2c.h>
  30. #include <linux/kernel.h>
  31. #include <linux/slab.h>
  32. #include <linux/vgaarb.h>
  33. #include <drm/drm_edid.h>
  34. #include "drmP.h"
  35. #include "intel_drv.h"
  36. #include "i915_drm.h"
  37. #include "i915_drv.h"
  38. #include "i915_trace.h"
  39. #include "drm_dp_helper.h"
  40. #include "drm_crtc_helper.h"
  41. #include <linux/dma_remapping.h>
  42. #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
  43. bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
  44. static void intel_update_watermarks(struct drm_device *dev);
  45. static void intel_increase_pllclock(struct drm_crtc *crtc);
  46. static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
  47. typedef struct {
  48. /* given values */
  49. int n;
  50. int m1, m2;
  51. int p1, p2;
  52. /* derived values */
  53. int dot;
  54. int vco;
  55. int m;
  56. int p;
  57. } intel_clock_t;
  58. typedef struct {
  59. int min, max;
  60. } intel_range_t;
  61. typedef struct {
  62. int dot_limit;
  63. int p2_slow, p2_fast;
  64. } intel_p2_t;
  65. #define INTEL_P2_NUM 2
  66. typedef struct intel_limit intel_limit_t;
  67. struct intel_limit {
  68. intel_range_t dot, vco, n, m, m1, m2, p, p1;
  69. intel_p2_t p2;
  70. bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
  71. int, int, intel_clock_t *, intel_clock_t *);
  72. };
  73. /* FDI */
  74. #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
  75. static bool
  76. intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
  77. int target, int refclk, intel_clock_t *match_clock,
  78. intel_clock_t *best_clock);
  79. static bool
  80. intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
  81. int target, int refclk, intel_clock_t *match_clock,
  82. intel_clock_t *best_clock);
  83. static bool
  84. intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
  85. int target, int refclk, intel_clock_t *match_clock,
  86. intel_clock_t *best_clock);
  87. static bool
  88. intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
  89. int target, int refclk, intel_clock_t *match_clock,
  90. intel_clock_t *best_clock);
  91. static inline u32 /* units of 100MHz */
  92. intel_fdi_link_freq(struct drm_device *dev)
  93. {
  94. if (IS_GEN5(dev)) {
  95. struct drm_i915_private *dev_priv = dev->dev_private;
  96. return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
  97. } else
  98. return 27;
  99. }
  100. static const intel_limit_t intel_limits_i8xx_dvo = {
  101. .dot = { .min = 25000, .max = 350000 },
  102. .vco = { .min = 930000, .max = 1400000 },
  103. .n = { .min = 3, .max = 16 },
  104. .m = { .min = 96, .max = 140 },
  105. .m1 = { .min = 18, .max = 26 },
  106. .m2 = { .min = 6, .max = 16 },
  107. .p = { .min = 4, .max = 128 },
  108. .p1 = { .min = 2, .max = 33 },
  109. .p2 = { .dot_limit = 165000,
  110. .p2_slow = 4, .p2_fast = 2 },
  111. .find_pll = intel_find_best_PLL,
  112. };
  113. static const intel_limit_t intel_limits_i8xx_lvds = {
  114. .dot = { .min = 25000, .max = 350000 },
  115. .vco = { .min = 930000, .max = 1400000 },
  116. .n = { .min = 3, .max = 16 },
  117. .m = { .min = 96, .max = 140 },
  118. .m1 = { .min = 18, .max = 26 },
  119. .m2 = { .min = 6, .max = 16 },
  120. .p = { .min = 4, .max = 128 },
  121. .p1 = { .min = 1, .max = 6 },
  122. .p2 = { .dot_limit = 165000,
  123. .p2_slow = 14, .p2_fast = 7 },
  124. .find_pll = intel_find_best_PLL,
  125. };
  126. static const intel_limit_t intel_limits_i9xx_sdvo = {
  127. .dot = { .min = 20000, .max = 400000 },
  128. .vco = { .min = 1400000, .max = 2800000 },
  129. .n = { .min = 1, .max = 6 },
  130. .m = { .min = 70, .max = 120 },
  131. .m1 = { .min = 10, .max = 22 },
  132. .m2 = { .min = 5, .max = 9 },
  133. .p = { .min = 5, .max = 80 },
  134. .p1 = { .min = 1, .max = 8 },
  135. .p2 = { .dot_limit = 200000,
  136. .p2_slow = 10, .p2_fast = 5 },
  137. .find_pll = intel_find_best_PLL,
  138. };
  139. static const intel_limit_t intel_limits_i9xx_lvds = {
  140. .dot = { .min = 20000, .max = 400000 },
  141. .vco = { .min = 1400000, .max = 2800000 },
  142. .n = { .min = 1, .max = 6 },
  143. .m = { .min = 70, .max = 120 },
  144. .m1 = { .min = 10, .max = 22 },
  145. .m2 = { .min = 5, .max = 9 },
  146. .p = { .min = 7, .max = 98 },
  147. .p1 = { .min = 1, .max = 8 },
  148. .p2 = { .dot_limit = 112000,
  149. .p2_slow = 14, .p2_fast = 7 },
  150. .find_pll = intel_find_best_PLL,
  151. };
  152. static const intel_limit_t intel_limits_g4x_sdvo = {
  153. .dot = { .min = 25000, .max = 270000 },
  154. .vco = { .min = 1750000, .max = 3500000},
  155. .n = { .min = 1, .max = 4 },
  156. .m = { .min = 104, .max = 138 },
  157. .m1 = { .min = 17, .max = 23 },
  158. .m2 = { .min = 5, .max = 11 },
  159. .p = { .min = 10, .max = 30 },
  160. .p1 = { .min = 1, .max = 3},
  161. .p2 = { .dot_limit = 270000,
  162. .p2_slow = 10,
  163. .p2_fast = 10
  164. },
  165. .find_pll = intel_g4x_find_best_PLL,
  166. };
  167. static const intel_limit_t intel_limits_g4x_hdmi = {
  168. .dot = { .min = 22000, .max = 400000 },
  169. .vco = { .min = 1750000, .max = 3500000},
  170. .n = { .min = 1, .max = 4 },
  171. .m = { .min = 104, .max = 138 },
  172. .m1 = { .min = 16, .max = 23 },
  173. .m2 = { .min = 5, .max = 11 },
  174. .p = { .min = 5, .max = 80 },
  175. .p1 = { .min = 1, .max = 8},
  176. .p2 = { .dot_limit = 165000,
  177. .p2_slow = 10, .p2_fast = 5 },
  178. .find_pll = intel_g4x_find_best_PLL,
  179. };
  180. static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
  181. .dot = { .min = 20000, .max = 115000 },
  182. .vco = { .min = 1750000, .max = 3500000 },
  183. .n = { .min = 1, .max = 3 },
  184. .m = { .min = 104, .max = 138 },
  185. .m1 = { .min = 17, .max = 23 },
  186. .m2 = { .min = 5, .max = 11 },
  187. .p = { .min = 28, .max = 112 },
  188. .p1 = { .min = 2, .max = 8 },
  189. .p2 = { .dot_limit = 0,
  190. .p2_slow = 14, .p2_fast = 14
  191. },
  192. .find_pll = intel_g4x_find_best_PLL,
  193. };
  194. static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
  195. .dot = { .min = 80000, .max = 224000 },
  196. .vco = { .min = 1750000, .max = 3500000 },
  197. .n = { .min = 1, .max = 3 },
  198. .m = { .min = 104, .max = 138 },
  199. .m1 = { .min = 17, .max = 23 },
  200. .m2 = { .min = 5, .max = 11 },
  201. .p = { .min = 14, .max = 42 },
  202. .p1 = { .min = 2, .max = 6 },
  203. .p2 = { .dot_limit = 0,
  204. .p2_slow = 7, .p2_fast = 7
  205. },
  206. .find_pll = intel_g4x_find_best_PLL,
  207. };
  208. static const intel_limit_t intel_limits_g4x_display_port = {
  209. .dot = { .min = 161670, .max = 227000 },
  210. .vco = { .min = 1750000, .max = 3500000},
  211. .n = { .min = 1, .max = 2 },
  212. .m = { .min = 97, .max = 108 },
  213. .m1 = { .min = 0x10, .max = 0x12 },
  214. .m2 = { .min = 0x05, .max = 0x06 },
  215. .p = { .min = 10, .max = 20 },
  216. .p1 = { .min = 1, .max = 2},
  217. .p2 = { .dot_limit = 0,
  218. .p2_slow = 10, .p2_fast = 10 },
  219. .find_pll = intel_find_pll_g4x_dp,
  220. };
  221. static const intel_limit_t intel_limits_pineview_sdvo = {
  222. .dot = { .min = 20000, .max = 400000},
  223. .vco = { .min = 1700000, .max = 3500000 },
  224. /* Pineview's Ncounter is a ring counter */
  225. .n = { .min = 3, .max = 6 },
  226. .m = { .min = 2, .max = 256 },
  227. /* Pineview only has one combined m divider, which we treat as m2. */
  228. .m1 = { .min = 0, .max = 0 },
  229. .m2 = { .min = 0, .max = 254 },
  230. .p = { .min = 5, .max = 80 },
  231. .p1 = { .min = 1, .max = 8 },
  232. .p2 = { .dot_limit = 200000,
  233. .p2_slow = 10, .p2_fast = 5 },
  234. .find_pll = intel_find_best_PLL,
  235. };
  236. static const intel_limit_t intel_limits_pineview_lvds = {
  237. .dot = { .min = 20000, .max = 400000 },
  238. .vco = { .min = 1700000, .max = 3500000 },
  239. .n = { .min = 3, .max = 6 },
  240. .m = { .min = 2, .max = 256 },
  241. .m1 = { .min = 0, .max = 0 },
  242. .m2 = { .min = 0, .max = 254 },
  243. .p = { .min = 7, .max = 112 },
  244. .p1 = { .min = 1, .max = 8 },
  245. .p2 = { .dot_limit = 112000,
  246. .p2_slow = 14, .p2_fast = 14 },
  247. .find_pll = intel_find_best_PLL,
  248. };
  249. /* Ironlake / Sandybridge
  250. *
  251. * We calculate clock using (register_value + 2) for N/M1/M2, so here
  252. * the range value for them is (actual_value - 2).
  253. */
  254. static const intel_limit_t intel_limits_ironlake_dac = {
  255. .dot = { .min = 25000, .max = 350000 },
  256. .vco = { .min = 1760000, .max = 3510000 },
  257. .n = { .min = 1, .max = 5 },
  258. .m = { .min = 79, .max = 127 },
  259. .m1 = { .min = 12, .max = 22 },
  260. .m2 = { .min = 5, .max = 9 },
  261. .p = { .min = 5, .max = 80 },
  262. .p1 = { .min = 1, .max = 8 },
  263. .p2 = { .dot_limit = 225000,
  264. .p2_slow = 10, .p2_fast = 5 },
  265. .find_pll = intel_g4x_find_best_PLL,
  266. };
  267. static const intel_limit_t intel_limits_ironlake_single_lvds = {
  268. .dot = { .min = 25000, .max = 350000 },
  269. .vco = { .min = 1760000, .max = 3510000 },
  270. .n = { .min = 1, .max = 3 },
  271. .m = { .min = 79, .max = 118 },
  272. .m1 = { .min = 12, .max = 22 },
  273. .m2 = { .min = 5, .max = 9 },
  274. .p = { .min = 28, .max = 112 },
  275. .p1 = { .min = 2, .max = 8 },
  276. .p2 = { .dot_limit = 225000,
  277. .p2_slow = 14, .p2_fast = 14 },
  278. .find_pll = intel_g4x_find_best_PLL,
  279. };
  280. static const intel_limit_t intel_limits_ironlake_dual_lvds = {
  281. .dot = { .min = 25000, .max = 350000 },
  282. .vco = { .min = 1760000, .max = 3510000 },
  283. .n = { .min = 1, .max = 3 },
  284. .m = { .min = 79, .max = 127 },
  285. .m1 = { .min = 12, .max = 22 },
  286. .m2 = { .min = 5, .max = 9 },
  287. .p = { .min = 14, .max = 56 },
  288. .p1 = { .min = 2, .max = 8 },
  289. .p2 = { .dot_limit = 225000,
  290. .p2_slow = 7, .p2_fast = 7 },
  291. .find_pll = intel_g4x_find_best_PLL,
  292. };
  293. /* LVDS 100mhz refclk limits. */
  294. static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
  295. .dot = { .min = 25000, .max = 350000 },
  296. .vco = { .min = 1760000, .max = 3510000 },
  297. .n = { .min = 1, .max = 2 },
  298. .m = { .min = 79, .max = 126 },
  299. .m1 = { .min = 12, .max = 22 },
  300. .m2 = { .min = 5, .max = 9 },
  301. .p = { .min = 28, .max = 112 },
  302. .p1 = { .min = 2, .max = 8 },
  303. .p2 = { .dot_limit = 225000,
  304. .p2_slow = 14, .p2_fast = 14 },
  305. .find_pll = intel_g4x_find_best_PLL,
  306. };
  307. static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
  308. .dot = { .min = 25000, .max = 350000 },
  309. .vco = { .min = 1760000, .max = 3510000 },
  310. .n = { .min = 1, .max = 3 },
  311. .m = { .min = 79, .max = 126 },
  312. .m1 = { .min = 12, .max = 22 },
  313. .m2 = { .min = 5, .max = 9 },
  314. .p = { .min = 14, .max = 42 },
  315. .p1 = { .min = 2, .max = 6 },
  316. .p2 = { .dot_limit = 225000,
  317. .p2_slow = 7, .p2_fast = 7 },
  318. .find_pll = intel_g4x_find_best_PLL,
  319. };
  320. static const intel_limit_t intel_limits_ironlake_display_port = {
  321. .dot = { .min = 25000, .max = 350000 },
  322. .vco = { .min = 1760000, .max = 3510000},
  323. .n = { .min = 1, .max = 2 },
  324. .m = { .min = 81, .max = 90 },
  325. .m1 = { .min = 12, .max = 22 },
  326. .m2 = { .min = 5, .max = 9 },
  327. .p = { .min = 10, .max = 20 },
  328. .p1 = { .min = 1, .max = 2},
  329. .p2 = { .dot_limit = 0,
  330. .p2_slow = 10, .p2_fast = 10 },
  331. .find_pll = intel_find_pll_ironlake_dp,
  332. };
  333. static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
  334. int refclk)
  335. {
  336. struct drm_device *dev = crtc->dev;
  337. struct drm_i915_private *dev_priv = dev->dev_private;
  338. const intel_limit_t *limit;
  339. if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
  340. if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
  341. LVDS_CLKB_POWER_UP) {
  342. /* LVDS dual channel */
  343. if (refclk == 100000)
  344. limit = &intel_limits_ironlake_dual_lvds_100m;
  345. else
  346. limit = &intel_limits_ironlake_dual_lvds;
  347. } else {
  348. if (refclk == 100000)
  349. limit = &intel_limits_ironlake_single_lvds_100m;
  350. else
  351. limit = &intel_limits_ironlake_single_lvds;
  352. }
  353. } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
  354. HAS_eDP)
  355. limit = &intel_limits_ironlake_display_port;
  356. else
  357. limit = &intel_limits_ironlake_dac;
  358. return limit;
  359. }
  360. static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
  361. {
  362. struct drm_device *dev = crtc->dev;
  363. struct drm_i915_private *dev_priv = dev->dev_private;
  364. const intel_limit_t *limit;
  365. if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
  366. if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
  367. LVDS_CLKB_POWER_UP)
  368. /* LVDS with dual channel */
  369. limit = &intel_limits_g4x_dual_channel_lvds;
  370. else
  371. /* LVDS with dual channel */
  372. limit = &intel_limits_g4x_single_channel_lvds;
  373. } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
  374. intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
  375. limit = &intel_limits_g4x_hdmi;
  376. } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
  377. limit = &intel_limits_g4x_sdvo;
  378. } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
  379. limit = &intel_limits_g4x_display_port;
  380. } else /* The option is for other outputs */
  381. limit = &intel_limits_i9xx_sdvo;
  382. return limit;
  383. }
  384. static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
  385. {
  386. struct drm_device *dev = crtc->dev;
  387. const intel_limit_t *limit;
  388. if (HAS_PCH_SPLIT(dev))
  389. limit = intel_ironlake_limit(crtc, refclk);
  390. else if (IS_G4X(dev)) {
  391. limit = intel_g4x_limit(crtc);
  392. } else if (IS_PINEVIEW(dev)) {
  393. if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
  394. limit = &intel_limits_pineview_lvds;
  395. else
  396. limit = &intel_limits_pineview_sdvo;
  397. } else if (!IS_GEN2(dev)) {
  398. if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
  399. limit = &intel_limits_i9xx_lvds;
  400. else
  401. limit = &intel_limits_i9xx_sdvo;
  402. } else {
  403. if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
  404. limit = &intel_limits_i8xx_lvds;
  405. else
  406. limit = &intel_limits_i8xx_dvo;
  407. }
  408. return limit;
  409. }
  410. /* m1 is reserved as 0 in Pineview, n is a ring counter */
  411. static void pineview_clock(int refclk, intel_clock_t *clock)
  412. {
  413. clock->m = clock->m2 + 2;
  414. clock->p = clock->p1 * clock->p2;
  415. clock->vco = refclk * clock->m / clock->n;
  416. clock->dot = clock->vco / clock->p;
  417. }
  418. static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
  419. {
  420. if (IS_PINEVIEW(dev)) {
  421. pineview_clock(refclk, clock);
  422. return;
  423. }
  424. clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
  425. clock->p = clock->p1 * clock->p2;
  426. clock->vco = refclk * clock->m / (clock->n + 2);
  427. clock->dot = clock->vco / clock->p;
  428. }
  429. /**
  430. * Returns whether any output on the specified pipe is of the specified type
  431. */
  432. bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
  433. {
  434. struct drm_device *dev = crtc->dev;
  435. struct drm_mode_config *mode_config = &dev->mode_config;
  436. struct intel_encoder *encoder;
  437. list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
  438. if (encoder->base.crtc == crtc && encoder->type == type)
  439. return true;
  440. return false;
  441. }
  442. #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
  443. /**
  444. * Returns whether the given set of divisors are valid for a given refclk with
  445. * the given connectors.
  446. */
  447. static bool intel_PLL_is_valid(struct drm_device *dev,
  448. const intel_limit_t *limit,
  449. const intel_clock_t *clock)
  450. {
  451. if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
  452. INTELPllInvalid("p1 out of range\n");
  453. if (clock->p < limit->p.min || limit->p.max < clock->p)
  454. INTELPllInvalid("p out of range\n");
  455. if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
  456. INTELPllInvalid("m2 out of range\n");
  457. if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
  458. INTELPllInvalid("m1 out of range\n");
  459. if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
  460. INTELPllInvalid("m1 <= m2\n");
  461. if (clock->m < limit->m.min || limit->m.max < clock->m)
  462. INTELPllInvalid("m out of range\n");
  463. if (clock->n < limit->n.min || limit->n.max < clock->n)
  464. INTELPllInvalid("n out of range\n");
  465. if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
  466. INTELPllInvalid("vco out of range\n");
  467. /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
  468. * connector, etc., rather than just a single range.
  469. */
  470. if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
  471. INTELPllInvalid("dot out of range\n");
  472. return true;
  473. }
  474. static bool
  475. intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
  476. int target, int refclk, intel_clock_t *match_clock,
  477. intel_clock_t *best_clock)
  478. {
  479. struct drm_device *dev = crtc->dev;
  480. struct drm_i915_private *dev_priv = dev->dev_private;
  481. intel_clock_t clock;
  482. int err = target;
  483. if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
  484. (I915_READ(LVDS)) != 0) {
  485. /*
  486. * For LVDS, if the panel is on, just rely on its current
  487. * settings for dual-channel. We haven't figured out how to
  488. * reliably set up different single/dual channel state, if we
  489. * even can.
  490. */
  491. if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
  492. LVDS_CLKB_POWER_UP)
  493. clock.p2 = limit->p2.p2_fast;
  494. else
  495. clock.p2 = limit->p2.p2_slow;
  496. } else {
  497. if (target < limit->p2.dot_limit)
  498. clock.p2 = limit->p2.p2_slow;
  499. else
  500. clock.p2 = limit->p2.p2_fast;
  501. }
  502. memset(best_clock, 0, sizeof(*best_clock));
  503. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  504. clock.m1++) {
  505. for (clock.m2 = limit->m2.min;
  506. clock.m2 <= limit->m2.max; clock.m2++) {
  507. /* m1 is always 0 in Pineview */
  508. if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
  509. break;
  510. for (clock.n = limit->n.min;
  511. clock.n <= limit->n.max; clock.n++) {
  512. for (clock.p1 = limit->p1.min;
  513. clock.p1 <= limit->p1.max; clock.p1++) {
  514. int this_err;
  515. intel_clock(dev, refclk, &clock);
  516. if (!intel_PLL_is_valid(dev, limit,
  517. &clock))
  518. continue;
  519. if (match_clock &&
  520. clock.p != match_clock->p)
  521. continue;
  522. this_err = abs(clock.dot - target);
  523. if (this_err < err) {
  524. *best_clock = clock;
  525. err = this_err;
  526. }
  527. }
  528. }
  529. }
  530. }
  531. return (err != target);
  532. }
  533. static bool
  534. intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
  535. int target, int refclk, intel_clock_t *match_clock,
  536. intel_clock_t *best_clock)
  537. {
  538. struct drm_device *dev = crtc->dev;
  539. struct drm_i915_private *dev_priv = dev->dev_private;
  540. intel_clock_t clock;
  541. int max_n;
  542. bool found;
  543. /* approximately equals target * 0.00585 */
  544. int err_most = (target >> 8) + (target >> 9);
  545. found = false;
  546. if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
  547. int lvds_reg;
  548. if (HAS_PCH_SPLIT(dev))
  549. lvds_reg = PCH_LVDS;
  550. else
  551. lvds_reg = LVDS;
  552. if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
  553. LVDS_CLKB_POWER_UP)
  554. clock.p2 = limit->p2.p2_fast;
  555. else
  556. clock.p2 = limit->p2.p2_slow;
  557. } else {
  558. if (target < limit->p2.dot_limit)
  559. clock.p2 = limit->p2.p2_slow;
  560. else
  561. clock.p2 = limit->p2.p2_fast;
  562. }
  563. memset(best_clock, 0, sizeof(*best_clock));
  564. max_n = limit->n.max;
  565. /* based on hardware requirement, prefer smaller n to precision */
  566. for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  567. /* based on hardware requirement, prefere larger m1,m2 */
  568. for (clock.m1 = limit->m1.max;
  569. clock.m1 >= limit->m1.min; clock.m1--) {
  570. for (clock.m2 = limit->m2.max;
  571. clock.m2 >= limit->m2.min; clock.m2--) {
  572. for (clock.p1 = limit->p1.max;
  573. clock.p1 >= limit->p1.min; clock.p1--) {
  574. int this_err;
  575. intel_clock(dev, refclk, &clock);
  576. if (!intel_PLL_is_valid(dev, limit,
  577. &clock))
  578. continue;
  579. if (match_clock &&
  580. clock.p != match_clock->p)
  581. continue;
  582. this_err = abs(clock.dot - target);
  583. if (this_err < err_most) {
  584. *best_clock = clock;
  585. err_most = this_err;
  586. max_n = clock.n;
  587. found = true;
  588. }
  589. }
  590. }
  591. }
  592. }
  593. return found;
  594. }
  595. static bool
  596. intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
  597. int target, int refclk, intel_clock_t *match_clock,
  598. intel_clock_t *best_clock)
  599. {
  600. struct drm_device *dev = crtc->dev;
  601. intel_clock_t clock;
  602. if (target < 200000) {
  603. clock.n = 1;
  604. clock.p1 = 2;
  605. clock.p2 = 10;
  606. clock.m1 = 12;
  607. clock.m2 = 9;
  608. } else {
  609. clock.n = 2;
  610. clock.p1 = 1;
  611. clock.p2 = 10;
  612. clock.m1 = 14;
  613. clock.m2 = 8;
  614. }
  615. intel_clock(dev, refclk, &clock);
  616. memcpy(best_clock, &clock, sizeof(intel_clock_t));
  617. return true;
  618. }
  619. /* DisplayPort has only two frequencies, 162MHz and 270MHz */
  620. static bool
  621. intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
  622. int target, int refclk, intel_clock_t *match_clock,
  623. intel_clock_t *best_clock)
  624. {
  625. intel_clock_t clock;
  626. if (target < 200000) {
  627. clock.p1 = 2;
  628. clock.p2 = 10;
  629. clock.n = 2;
  630. clock.m1 = 23;
  631. clock.m2 = 8;
  632. } else {
  633. clock.p1 = 1;
  634. clock.p2 = 10;
  635. clock.n = 1;
  636. clock.m1 = 14;
  637. clock.m2 = 2;
  638. }
  639. clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
  640. clock.p = (clock.p1 * clock.p2);
  641. clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
  642. clock.vco = 0;
  643. memcpy(best_clock, &clock, sizeof(intel_clock_t));
  644. return true;
  645. }
  646. /**
  647. * intel_wait_for_vblank - wait for vblank on a given pipe
  648. * @dev: drm device
  649. * @pipe: pipe to wait for
  650. *
  651. * Wait for vblank to occur on a given pipe. Needed for various bits of
  652. * mode setting code.
  653. */
  654. void intel_wait_for_vblank(struct drm_device *dev, int pipe)
  655. {
  656. struct drm_i915_private *dev_priv = dev->dev_private;
  657. int pipestat_reg = PIPESTAT(pipe);
  658. /* Clear existing vblank status. Note this will clear any other
  659. * sticky status fields as well.
  660. *
  661. * This races with i915_driver_irq_handler() with the result
  662. * that either function could miss a vblank event. Here it is not
  663. * fatal, as we will either wait upon the next vblank interrupt or
  664. * timeout. Generally speaking intel_wait_for_vblank() is only
  665. * called during modeset at which time the GPU should be idle and
  666. * should *not* be performing page flips and thus not waiting on
  667. * vblanks...
  668. * Currently, the result of us stealing a vblank from the irq
  669. * handler is that a single frame will be skipped during swapbuffers.
  670. */
  671. I915_WRITE(pipestat_reg,
  672. I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
  673. /* Wait for vblank interrupt bit to set */
  674. if (wait_for(I915_READ(pipestat_reg) &
  675. PIPE_VBLANK_INTERRUPT_STATUS,
  676. 50))
  677. DRM_DEBUG_KMS("vblank wait timed out\n");
  678. }
  679. /*
  680. * intel_wait_for_pipe_off - wait for pipe to turn off
  681. * @dev: drm device
  682. * @pipe: pipe to wait for
  683. *
  684. * After disabling a pipe, we can't wait for vblank in the usual way,
  685. * spinning on the vblank interrupt status bit, since we won't actually
  686. * see an interrupt when the pipe is disabled.
  687. *
  688. * On Gen4 and above:
  689. * wait for the pipe register state bit to turn off
  690. *
  691. * Otherwise:
  692. * wait for the display line value to settle (it usually
  693. * ends up stopping at the start of the next frame).
  694. *
  695. */
  696. void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
  697. {
  698. struct drm_i915_private *dev_priv = dev->dev_private;
  699. if (INTEL_INFO(dev)->gen >= 4) {
  700. int reg = PIPECONF(pipe);
  701. /* Wait for the Pipe State to go off */
  702. if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
  703. 100))
  704. DRM_DEBUG_KMS("pipe_off wait timed out\n");
  705. } else {
  706. u32 last_line;
  707. int reg = PIPEDSL(pipe);
  708. unsigned long timeout = jiffies + msecs_to_jiffies(100);
  709. /* Wait for the display line to settle */
  710. do {
  711. last_line = I915_READ(reg) & DSL_LINEMASK;
  712. mdelay(5);
  713. } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
  714. time_after(timeout, jiffies));
  715. if (time_after(jiffies, timeout))
  716. DRM_DEBUG_KMS("pipe_off wait timed out\n");
  717. }
  718. }
  719. static const char *state_string(bool enabled)
  720. {
  721. return enabled ? "on" : "off";
  722. }
  723. /* Only for pre-ILK configs */
  724. static void assert_pll(struct drm_i915_private *dev_priv,
  725. enum pipe pipe, bool state)
  726. {
  727. int reg;
  728. u32 val;
  729. bool cur_state;
  730. reg = DPLL(pipe);
  731. val = I915_READ(reg);
  732. cur_state = !!(val & DPLL_VCO_ENABLE);
  733. WARN(cur_state != state,
  734. "PLL state assertion failure (expected %s, current %s)\n",
  735. state_string(state), state_string(cur_state));
  736. }
  737. #define assert_pll_enabled(d, p) assert_pll(d, p, true)
  738. #define assert_pll_disabled(d, p) assert_pll(d, p, false)
  739. /* For ILK+ */
  740. static void assert_pch_pll(struct drm_i915_private *dev_priv,
  741. enum pipe pipe, bool state)
  742. {
  743. int reg;
  744. u32 val;
  745. bool cur_state;
  746. if (HAS_PCH_CPT(dev_priv->dev)) {
  747. u32 pch_dpll;
  748. pch_dpll = I915_READ(PCH_DPLL_SEL);
  749. /* Make sure the selected PLL is enabled to the transcoder */
  750. WARN(!((pch_dpll >> (4 * pipe)) & 8),
  751. "transcoder %d PLL not enabled\n", pipe);
  752. /* Convert the transcoder pipe number to a pll pipe number */
  753. pipe = (pch_dpll >> (4 * pipe)) & 1;
  754. }
  755. reg = PCH_DPLL(pipe);
  756. val = I915_READ(reg);
  757. cur_state = !!(val & DPLL_VCO_ENABLE);
  758. WARN(cur_state != state,
  759. "PCH PLL state assertion failure (expected %s, current %s)\n",
  760. state_string(state), state_string(cur_state));
  761. }
  762. #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
  763. #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
  764. static void assert_fdi_tx(struct drm_i915_private *dev_priv,
  765. enum pipe pipe, bool state)
  766. {
  767. int reg;
  768. u32 val;
  769. bool cur_state;
  770. reg = FDI_TX_CTL(pipe);
  771. val = I915_READ(reg);
  772. cur_state = !!(val & FDI_TX_ENABLE);
  773. WARN(cur_state != state,
  774. "FDI TX state assertion failure (expected %s, current %s)\n",
  775. state_string(state), state_string(cur_state));
  776. }
  777. #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
  778. #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
  779. static void assert_fdi_rx(struct drm_i915_private *dev_priv,
  780. enum pipe pipe, bool state)
  781. {
  782. int reg;
  783. u32 val;
  784. bool cur_state;
  785. reg = FDI_RX_CTL(pipe);
  786. val = I915_READ(reg);
  787. cur_state = !!(val & FDI_RX_ENABLE);
  788. WARN(cur_state != state,
  789. "FDI RX state assertion failure (expected %s, current %s)\n",
  790. state_string(state), state_string(cur_state));
  791. }
  792. #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
  793. #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
  794. static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
  795. enum pipe pipe)
  796. {
  797. int reg;
  798. u32 val;
  799. /* ILK FDI PLL is always enabled */
  800. if (dev_priv->info->gen == 5)
  801. return;
  802. reg = FDI_TX_CTL(pipe);
  803. val = I915_READ(reg);
  804. WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
  805. }
  806. static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
  807. enum pipe pipe)
  808. {
  809. int reg;
  810. u32 val;
  811. reg = FDI_RX_CTL(pipe);
  812. val = I915_READ(reg);
  813. WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
  814. }
  815. static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
  816. enum pipe pipe)
  817. {
  818. int pp_reg, lvds_reg;
  819. u32 val;
  820. enum pipe panel_pipe = PIPE_A;
  821. bool locked = true;
  822. if (HAS_PCH_SPLIT(dev_priv->dev)) {
  823. pp_reg = PCH_PP_CONTROL;
  824. lvds_reg = PCH_LVDS;
  825. } else {
  826. pp_reg = PP_CONTROL;
  827. lvds_reg = LVDS;
  828. }
  829. val = I915_READ(pp_reg);
  830. if (!(val & PANEL_POWER_ON) ||
  831. ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
  832. locked = false;
  833. if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
  834. panel_pipe = PIPE_B;
  835. WARN(panel_pipe == pipe && locked,
  836. "panel assertion failure, pipe %c regs locked\n",
  837. pipe_name(pipe));
  838. }
  839. void assert_pipe(struct drm_i915_private *dev_priv,
  840. enum pipe pipe, bool state)
  841. {
  842. int reg;
  843. u32 val;
  844. bool cur_state;
  845. reg = PIPECONF(pipe);
  846. val = I915_READ(reg);
  847. cur_state = !!(val & PIPECONF_ENABLE);
  848. WARN(cur_state != state,
  849. "pipe %c assertion failure (expected %s, current %s)\n",
  850. pipe_name(pipe), state_string(state), state_string(cur_state));
  851. }
  852. static void assert_plane(struct drm_i915_private *dev_priv,
  853. enum plane plane, bool state)
  854. {
  855. int reg;
  856. u32 val;
  857. bool cur_state;
  858. reg = DSPCNTR(plane);
  859. val = I915_READ(reg);
  860. cur_state = !!(val & DISPLAY_PLANE_ENABLE);
  861. WARN(cur_state != state,
  862. "plane %c assertion failure (expected %s, current %s)\n",
  863. plane_name(plane), state_string(state), state_string(cur_state));
  864. }
  865. #define assert_plane_enabled(d, p) assert_plane(d, p, true)
  866. #define assert_plane_disabled(d, p) assert_plane(d, p, false)
  867. static void assert_planes_disabled(struct drm_i915_private *dev_priv,
  868. enum pipe pipe)
  869. {
  870. int reg, i;
  871. u32 val;
  872. int cur_pipe;
  873. /* Planes are fixed to pipes on ILK+ */
  874. if (HAS_PCH_SPLIT(dev_priv->dev))
  875. return;
  876. /* Need to check both planes against the pipe */
  877. for (i = 0; i < 2; i++) {
  878. reg = DSPCNTR(i);
  879. val = I915_READ(reg);
  880. cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
  881. DISPPLANE_SEL_PIPE_SHIFT;
  882. WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
  883. "plane %c assertion failure, should be off on pipe %c but is still active\n",
  884. plane_name(i), pipe_name(pipe));
  885. }
  886. }
  887. static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
  888. {
  889. u32 val;
  890. bool enabled;
  891. val = I915_READ(PCH_DREF_CONTROL);
  892. enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
  893. DREF_SUPERSPREAD_SOURCE_MASK));
  894. WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
  895. }
  896. static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
  897. enum pipe pipe)
  898. {
  899. int reg;
  900. u32 val;
  901. bool enabled;
  902. reg = TRANSCONF(pipe);
  903. val = I915_READ(reg);
  904. enabled = !!(val & TRANS_ENABLE);
  905. WARN(enabled,
  906. "transcoder assertion failed, should be off on pipe %c but is still active\n",
  907. pipe_name(pipe));
  908. }
  909. static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
  910. enum pipe pipe, u32 port_sel, u32 val)
  911. {
  912. if ((val & DP_PORT_EN) == 0)
  913. return false;
  914. if (HAS_PCH_CPT(dev_priv->dev)) {
  915. u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
  916. u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
  917. if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
  918. return false;
  919. } else {
  920. if ((val & DP_PIPE_MASK) != (pipe << 30))
  921. return false;
  922. }
  923. return true;
  924. }
  925. static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
  926. enum pipe pipe, u32 val)
  927. {
  928. if ((val & PORT_ENABLE) == 0)
  929. return false;
  930. if (HAS_PCH_CPT(dev_priv->dev)) {
  931. if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  932. return false;
  933. } else {
  934. if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
  935. return false;
  936. }
  937. return true;
  938. }
  939. static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
  940. enum pipe pipe, u32 val)
  941. {
  942. if ((val & LVDS_PORT_EN) == 0)
  943. return false;
  944. if (HAS_PCH_CPT(dev_priv->dev)) {
  945. if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  946. return false;
  947. } else {
  948. if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
  949. return false;
  950. }
  951. return true;
  952. }
  953. static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
  954. enum pipe pipe, u32 val)
  955. {
  956. if ((val & ADPA_DAC_ENABLE) == 0)
  957. return false;
  958. if (HAS_PCH_CPT(dev_priv->dev)) {
  959. if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  960. return false;
  961. } else {
  962. if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
  963. return false;
  964. }
  965. return true;
  966. }
  967. static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
  968. enum pipe pipe, int reg, u32 port_sel)
  969. {
  970. u32 val = I915_READ(reg);
  971. WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
  972. "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
  973. reg, pipe_name(pipe));
  974. }
  975. static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
  976. enum pipe pipe, int reg)
  977. {
  978. u32 val = I915_READ(reg);
  979. WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
  980. "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
  981. reg, pipe_name(pipe));
  982. }
  983. static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
  984. enum pipe pipe)
  985. {
  986. int reg;
  987. u32 val;
  988. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
  989. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
  990. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
  991. reg = PCH_ADPA;
  992. val = I915_READ(reg);
  993. WARN(adpa_pipe_enabled(dev_priv, val, pipe),
  994. "PCH VGA enabled on transcoder %c, should be disabled\n",
  995. pipe_name(pipe));
  996. reg = PCH_LVDS;
  997. val = I915_READ(reg);
  998. WARN(lvds_pipe_enabled(dev_priv, val, pipe),
  999. "PCH LVDS enabled on transcoder %c, should be disabled\n",
  1000. pipe_name(pipe));
  1001. assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
  1002. assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
  1003. assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
  1004. }
  1005. /**
  1006. * intel_enable_pll - enable a PLL
  1007. * @dev_priv: i915 private structure
  1008. * @pipe: pipe PLL to enable
  1009. *
  1010. * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
  1011. * make sure the PLL reg is writable first though, since the panel write
  1012. * protect mechanism may be enabled.
  1013. *
  1014. * Note! This is for pre-ILK only.
  1015. */
  1016. static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1017. {
  1018. int reg;
  1019. u32 val;
  1020. /* No really, not for ILK+ */
  1021. BUG_ON(dev_priv->info->gen >= 5);
  1022. /* PLL is protected by panel, make sure we can write it */
  1023. if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
  1024. assert_panel_unlocked(dev_priv, pipe);
  1025. reg = DPLL(pipe);
  1026. val = I915_READ(reg);
  1027. val |= DPLL_VCO_ENABLE;
  1028. /* We do this three times for luck */
  1029. I915_WRITE(reg, val);
  1030. POSTING_READ(reg);
  1031. udelay(150); /* wait for warmup */
  1032. I915_WRITE(reg, val);
  1033. POSTING_READ(reg);
  1034. udelay(150); /* wait for warmup */
  1035. I915_WRITE(reg, val);
  1036. POSTING_READ(reg);
  1037. udelay(150); /* wait for warmup */
  1038. }
  1039. /**
  1040. * intel_disable_pll - disable a PLL
  1041. * @dev_priv: i915 private structure
  1042. * @pipe: pipe PLL to disable
  1043. *
  1044. * Disable the PLL for @pipe, making sure the pipe is off first.
  1045. *
  1046. * Note! This is for pre-ILK only.
  1047. */
  1048. static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1049. {
  1050. int reg;
  1051. u32 val;
  1052. /* Don't disable pipe A or pipe A PLLs if needed */
  1053. if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
  1054. return;
  1055. /* Make sure the pipe isn't still relying on us */
  1056. assert_pipe_disabled(dev_priv, pipe);
  1057. reg = DPLL(pipe);
  1058. val = I915_READ(reg);
  1059. val &= ~DPLL_VCO_ENABLE;
  1060. I915_WRITE(reg, val);
  1061. POSTING_READ(reg);
  1062. }
  1063. /**
  1064. * intel_enable_pch_pll - enable PCH PLL
  1065. * @dev_priv: i915 private structure
  1066. * @pipe: pipe PLL to enable
  1067. *
  1068. * The PCH PLL needs to be enabled before the PCH transcoder, since it
  1069. * drives the transcoder clock.
  1070. */
  1071. static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
  1072. enum pipe pipe)
  1073. {
  1074. int reg;
  1075. u32 val;
  1076. if (pipe > 1)
  1077. return;
  1078. /* PCH only available on ILK+ */
  1079. BUG_ON(dev_priv->info->gen < 5);
  1080. /* PCH refclock must be enabled first */
  1081. assert_pch_refclk_enabled(dev_priv);
  1082. reg = PCH_DPLL(pipe);
  1083. val = I915_READ(reg);
  1084. val |= DPLL_VCO_ENABLE;
  1085. I915_WRITE(reg, val);
  1086. POSTING_READ(reg);
  1087. udelay(200);
  1088. }
  1089. static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
  1090. enum pipe pipe)
  1091. {
  1092. int reg;
  1093. u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
  1094. pll_sel = TRANSC_DPLL_ENABLE;
  1095. if (pipe > 1)
  1096. return;
  1097. /* PCH only available on ILK+ */
  1098. BUG_ON(dev_priv->info->gen < 5);
  1099. /* Make sure transcoder isn't still depending on us */
  1100. assert_transcoder_disabled(dev_priv, pipe);
  1101. if (pipe == 0)
  1102. pll_sel |= TRANSC_DPLLA_SEL;
  1103. else if (pipe == 1)
  1104. pll_sel |= TRANSC_DPLLB_SEL;
  1105. if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
  1106. return;
  1107. reg = PCH_DPLL(pipe);
  1108. val = I915_READ(reg);
  1109. val &= ~DPLL_VCO_ENABLE;
  1110. I915_WRITE(reg, val);
  1111. POSTING_READ(reg);
  1112. udelay(200);
  1113. }
  1114. static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
  1115. enum pipe pipe)
  1116. {
  1117. int reg;
  1118. u32 val;
  1119. /* PCH only available on ILK+ */
  1120. BUG_ON(dev_priv->info->gen < 5);
  1121. /* Make sure PCH DPLL is enabled */
  1122. assert_pch_pll_enabled(dev_priv, pipe);
  1123. /* FDI must be feeding us bits for PCH ports */
  1124. assert_fdi_tx_enabled(dev_priv, pipe);
  1125. assert_fdi_rx_enabled(dev_priv, pipe);
  1126. reg = TRANSCONF(pipe);
  1127. val = I915_READ(reg);
  1128. if (HAS_PCH_IBX(dev_priv->dev)) {
  1129. /*
  1130. * make the BPC in transcoder be consistent with
  1131. * that in pipeconf reg.
  1132. */
  1133. val &= ~PIPE_BPC_MASK;
  1134. val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
  1135. }
  1136. I915_WRITE(reg, val | TRANS_ENABLE);
  1137. if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
  1138. DRM_ERROR("failed to enable transcoder %d\n", pipe);
  1139. }
  1140. static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
  1141. enum pipe pipe)
  1142. {
  1143. int reg;
  1144. u32 val;
  1145. /* FDI relies on the transcoder */
  1146. assert_fdi_tx_disabled(dev_priv, pipe);
  1147. assert_fdi_rx_disabled(dev_priv, pipe);
  1148. /* Ports must be off as well */
  1149. assert_pch_ports_disabled(dev_priv, pipe);
  1150. reg = TRANSCONF(pipe);
  1151. val = I915_READ(reg);
  1152. val &= ~TRANS_ENABLE;
  1153. I915_WRITE(reg, val);
  1154. /* wait for PCH transcoder off, transcoder state */
  1155. if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
  1156. DRM_ERROR("failed to disable transcoder %d\n", pipe);
  1157. }
  1158. /**
  1159. * intel_enable_pipe - enable a pipe, asserting requirements
  1160. * @dev_priv: i915 private structure
  1161. * @pipe: pipe to enable
  1162. * @pch_port: on ILK+, is this pipe driving a PCH port or not
  1163. *
  1164. * Enable @pipe, making sure that various hardware specific requirements
  1165. * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
  1166. *
  1167. * @pipe should be %PIPE_A or %PIPE_B.
  1168. *
  1169. * Will wait until the pipe is actually running (i.e. first vblank) before
  1170. * returning.
  1171. */
  1172. static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
  1173. bool pch_port)
  1174. {
  1175. int reg;
  1176. u32 val;
  1177. /*
  1178. * A pipe without a PLL won't actually be able to drive bits from
  1179. * a plane. On ILK+ the pipe PLLs are integrated, so we don't
  1180. * need the check.
  1181. */
  1182. if (!HAS_PCH_SPLIT(dev_priv->dev))
  1183. assert_pll_enabled(dev_priv, pipe);
  1184. else {
  1185. if (pch_port) {
  1186. /* if driving the PCH, we need FDI enabled */
  1187. assert_fdi_rx_pll_enabled(dev_priv, pipe);
  1188. assert_fdi_tx_pll_enabled(dev_priv, pipe);
  1189. }
  1190. /* FIXME: assert CPU port conditions for SNB+ */
  1191. }
  1192. reg = PIPECONF(pipe);
  1193. val = I915_READ(reg);
  1194. if (val & PIPECONF_ENABLE)
  1195. return;
  1196. I915_WRITE(reg, val | PIPECONF_ENABLE);
  1197. intel_wait_for_vblank(dev_priv->dev, pipe);
  1198. }
  1199. /**
  1200. * intel_disable_pipe - disable a pipe, asserting requirements
  1201. * @dev_priv: i915 private structure
  1202. * @pipe: pipe to disable
  1203. *
  1204. * Disable @pipe, making sure that various hardware specific requirements
  1205. * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
  1206. *
  1207. * @pipe should be %PIPE_A or %PIPE_B.
  1208. *
  1209. * Will wait until the pipe has shut down before returning.
  1210. */
  1211. static void intel_disable_pipe(struct drm_i915_private *dev_priv,
  1212. enum pipe pipe)
  1213. {
  1214. int reg;
  1215. u32 val;
  1216. /*
  1217. * Make sure planes won't keep trying to pump pixels to us,
  1218. * or we might hang the display.
  1219. */
  1220. assert_planes_disabled(dev_priv, pipe);
  1221. /* Don't disable pipe A or pipe A PLLs if needed */
  1222. if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
  1223. return;
  1224. reg = PIPECONF(pipe);
  1225. val = I915_READ(reg);
  1226. if ((val & PIPECONF_ENABLE) == 0)
  1227. return;
  1228. I915_WRITE(reg, val & ~PIPECONF_ENABLE);
  1229. intel_wait_for_pipe_off(dev_priv->dev, pipe);
  1230. }
  1231. /*
  1232. * Plane regs are double buffered, going from enabled->disabled needs a
  1233. * trigger in order to latch. The display address reg provides this.
  1234. */
  1235. static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
  1236. enum plane plane)
  1237. {
  1238. I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
  1239. I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
  1240. }
  1241. /**
  1242. * intel_enable_plane - enable a display plane on a given pipe
  1243. * @dev_priv: i915 private structure
  1244. * @plane: plane to enable
  1245. * @pipe: pipe being fed
  1246. *
  1247. * Enable @plane on @pipe, making sure that @pipe is running first.
  1248. */
  1249. static void intel_enable_plane(struct drm_i915_private *dev_priv,
  1250. enum plane plane, enum pipe pipe)
  1251. {
  1252. int reg;
  1253. u32 val;
  1254. /* If the pipe isn't enabled, we can't pump pixels and may hang */
  1255. assert_pipe_enabled(dev_priv, pipe);
  1256. reg = DSPCNTR(plane);
  1257. val = I915_READ(reg);
  1258. if (val & DISPLAY_PLANE_ENABLE)
  1259. return;
  1260. I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
  1261. intel_flush_display_plane(dev_priv, plane);
  1262. intel_wait_for_vblank(dev_priv->dev, pipe);
  1263. }
  1264. /**
  1265. * intel_disable_plane - disable a display plane
  1266. * @dev_priv: i915 private structure
  1267. * @plane: plane to disable
  1268. * @pipe: pipe consuming the data
  1269. *
  1270. * Disable @plane; should be an independent operation.
  1271. */
  1272. static void intel_disable_plane(struct drm_i915_private *dev_priv,
  1273. enum plane plane, enum pipe pipe)
  1274. {
  1275. int reg;
  1276. u32 val;
  1277. reg = DSPCNTR(plane);
  1278. val = I915_READ(reg);
  1279. if ((val & DISPLAY_PLANE_ENABLE) == 0)
  1280. return;
  1281. I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
  1282. intel_flush_display_plane(dev_priv, plane);
  1283. intel_wait_for_vblank(dev_priv->dev, pipe);
  1284. }
  1285. static void disable_pch_dp(struct drm_i915_private *dev_priv,
  1286. enum pipe pipe, int reg, u32 port_sel)
  1287. {
  1288. u32 val = I915_READ(reg);
  1289. if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
  1290. DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
  1291. I915_WRITE(reg, val & ~DP_PORT_EN);
  1292. }
  1293. }
  1294. static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
  1295. enum pipe pipe, int reg)
  1296. {
  1297. u32 val = I915_READ(reg);
  1298. if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
  1299. DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
  1300. reg, pipe);
  1301. I915_WRITE(reg, val & ~PORT_ENABLE);
  1302. }
  1303. }
  1304. /* Disable any ports connected to this transcoder */
  1305. static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
  1306. enum pipe pipe)
  1307. {
  1308. u32 reg, val;
  1309. val = I915_READ(PCH_PP_CONTROL);
  1310. I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
  1311. disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
  1312. disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
  1313. disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
  1314. reg = PCH_ADPA;
  1315. val = I915_READ(reg);
  1316. if (adpa_pipe_enabled(dev_priv, val, pipe))
  1317. I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
  1318. reg = PCH_LVDS;
  1319. val = I915_READ(reg);
  1320. if (lvds_pipe_enabled(dev_priv, val, pipe)) {
  1321. DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
  1322. I915_WRITE(reg, val & ~LVDS_PORT_EN);
  1323. POSTING_READ(reg);
  1324. udelay(100);
  1325. }
  1326. disable_pch_hdmi(dev_priv, pipe, HDMIB);
  1327. disable_pch_hdmi(dev_priv, pipe, HDMIC);
  1328. disable_pch_hdmi(dev_priv, pipe, HDMID);
  1329. }
  1330. static void i8xx_disable_fbc(struct drm_device *dev)
  1331. {
  1332. struct drm_i915_private *dev_priv = dev->dev_private;
  1333. u32 fbc_ctl;
  1334. /* Disable compression */
  1335. fbc_ctl = I915_READ(FBC_CONTROL);
  1336. if ((fbc_ctl & FBC_CTL_EN) == 0)
  1337. return;
  1338. fbc_ctl &= ~FBC_CTL_EN;
  1339. I915_WRITE(FBC_CONTROL, fbc_ctl);
  1340. /* Wait for compressing bit to clear */
  1341. if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
  1342. DRM_DEBUG_KMS("FBC idle timed out\n");
  1343. return;
  1344. }
  1345. DRM_DEBUG_KMS("disabled FBC\n");
  1346. }
  1347. static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
  1348. {
  1349. struct drm_device *dev = crtc->dev;
  1350. struct drm_i915_private *dev_priv = dev->dev_private;
  1351. struct drm_framebuffer *fb = crtc->fb;
  1352. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  1353. struct drm_i915_gem_object *obj = intel_fb->obj;
  1354. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1355. int cfb_pitch;
  1356. int plane, i;
  1357. u32 fbc_ctl, fbc_ctl2;
  1358. cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
  1359. if (fb->pitches[0] < cfb_pitch)
  1360. cfb_pitch = fb->pitches[0];
  1361. /* FBC_CTL wants 64B units */
  1362. cfb_pitch = (cfb_pitch / 64) - 1;
  1363. plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
  1364. /* Clear old tags */
  1365. for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
  1366. I915_WRITE(FBC_TAG + (i * 4), 0);
  1367. /* Set it up... */
  1368. fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
  1369. fbc_ctl2 |= plane;
  1370. I915_WRITE(FBC_CONTROL2, fbc_ctl2);
  1371. I915_WRITE(FBC_FENCE_OFF, crtc->y);
  1372. /* enable it... */
  1373. fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
  1374. if (IS_I945GM(dev))
  1375. fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
  1376. fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
  1377. fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
  1378. fbc_ctl |= obj->fence_reg;
  1379. I915_WRITE(FBC_CONTROL, fbc_ctl);
  1380. DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
  1381. cfb_pitch, crtc->y, intel_crtc->plane);
  1382. }
  1383. static bool i8xx_fbc_enabled(struct drm_device *dev)
  1384. {
  1385. struct drm_i915_private *dev_priv = dev->dev_private;
  1386. return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
  1387. }
  1388. static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
  1389. {
  1390. struct drm_device *dev = crtc->dev;
  1391. struct drm_i915_private *dev_priv = dev->dev_private;
  1392. struct drm_framebuffer *fb = crtc->fb;
  1393. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  1394. struct drm_i915_gem_object *obj = intel_fb->obj;
  1395. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1396. int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
  1397. unsigned long stall_watermark = 200;
  1398. u32 dpfc_ctl;
  1399. dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
  1400. dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
  1401. I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
  1402. I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
  1403. (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
  1404. (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
  1405. I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
  1406. /* enable it... */
  1407. I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
  1408. DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
  1409. }
  1410. static void g4x_disable_fbc(struct drm_device *dev)
  1411. {
  1412. struct drm_i915_private *dev_priv = dev->dev_private;
  1413. u32 dpfc_ctl;
  1414. /* Disable compression */
  1415. dpfc_ctl = I915_READ(DPFC_CONTROL);
  1416. if (dpfc_ctl & DPFC_CTL_EN) {
  1417. dpfc_ctl &= ~DPFC_CTL_EN;
  1418. I915_WRITE(DPFC_CONTROL, dpfc_ctl);
  1419. DRM_DEBUG_KMS("disabled FBC\n");
  1420. }
  1421. }
  1422. static bool g4x_fbc_enabled(struct drm_device *dev)
  1423. {
  1424. struct drm_i915_private *dev_priv = dev->dev_private;
  1425. return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
  1426. }
  1427. static void sandybridge_blit_fbc_update(struct drm_device *dev)
  1428. {
  1429. struct drm_i915_private *dev_priv = dev->dev_private;
  1430. u32 blt_ecoskpd;
  1431. /* Make sure blitter notifies FBC of writes */
  1432. gen6_gt_force_wake_get(dev_priv);
  1433. blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
  1434. blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
  1435. GEN6_BLITTER_LOCK_SHIFT;
  1436. I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
  1437. blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
  1438. I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
  1439. blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
  1440. GEN6_BLITTER_LOCK_SHIFT);
  1441. I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
  1442. POSTING_READ(GEN6_BLITTER_ECOSKPD);
  1443. gen6_gt_force_wake_put(dev_priv);
  1444. }
  1445. static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
  1446. {
  1447. struct drm_device *dev = crtc->dev;
  1448. struct drm_i915_private *dev_priv = dev->dev_private;
  1449. struct drm_framebuffer *fb = crtc->fb;
  1450. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  1451. struct drm_i915_gem_object *obj = intel_fb->obj;
  1452. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1453. int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
  1454. unsigned long stall_watermark = 200;
  1455. u32 dpfc_ctl;
  1456. dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
  1457. dpfc_ctl &= DPFC_RESERVED;
  1458. dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
  1459. /* Set persistent mode for front-buffer rendering, ala X. */
  1460. dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
  1461. dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
  1462. I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
  1463. I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
  1464. (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
  1465. (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
  1466. I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
  1467. I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
  1468. /* enable it... */
  1469. I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  1470. if (IS_GEN6(dev)) {
  1471. I915_WRITE(SNB_DPFC_CTL_SA,
  1472. SNB_CPU_FENCE_ENABLE | obj->fence_reg);
  1473. I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
  1474. sandybridge_blit_fbc_update(dev);
  1475. }
  1476. DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
  1477. }
  1478. static void ironlake_disable_fbc(struct drm_device *dev)
  1479. {
  1480. struct drm_i915_private *dev_priv = dev->dev_private;
  1481. u32 dpfc_ctl;
  1482. /* Disable compression */
  1483. dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
  1484. if (dpfc_ctl & DPFC_CTL_EN) {
  1485. dpfc_ctl &= ~DPFC_CTL_EN;
  1486. I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
  1487. DRM_DEBUG_KMS("disabled FBC\n");
  1488. }
  1489. }
  1490. static bool ironlake_fbc_enabled(struct drm_device *dev)
  1491. {
  1492. struct drm_i915_private *dev_priv = dev->dev_private;
  1493. return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
  1494. }
  1495. bool intel_fbc_enabled(struct drm_device *dev)
  1496. {
  1497. struct drm_i915_private *dev_priv = dev->dev_private;
  1498. if (!dev_priv->display.fbc_enabled)
  1499. return false;
  1500. return dev_priv->display.fbc_enabled(dev);
  1501. }
  1502. static void intel_fbc_work_fn(struct work_struct *__work)
  1503. {
  1504. struct intel_fbc_work *work =
  1505. container_of(to_delayed_work(__work),
  1506. struct intel_fbc_work, work);
  1507. struct drm_device *dev = work->crtc->dev;
  1508. struct drm_i915_private *dev_priv = dev->dev_private;
  1509. mutex_lock(&dev->struct_mutex);
  1510. if (work == dev_priv->fbc_work) {
  1511. /* Double check that we haven't switched fb without cancelling
  1512. * the prior work.
  1513. */
  1514. if (work->crtc->fb == work->fb) {
  1515. dev_priv->display.enable_fbc(work->crtc,
  1516. work->interval);
  1517. dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
  1518. dev_priv->cfb_fb = work->crtc->fb->base.id;
  1519. dev_priv->cfb_y = work->crtc->y;
  1520. }
  1521. dev_priv->fbc_work = NULL;
  1522. }
  1523. mutex_unlock(&dev->struct_mutex);
  1524. kfree(work);
  1525. }
  1526. static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
  1527. {
  1528. if (dev_priv->fbc_work == NULL)
  1529. return;
  1530. DRM_DEBUG_KMS("cancelling pending FBC enable\n");
  1531. /* Synchronisation is provided by struct_mutex and checking of
  1532. * dev_priv->fbc_work, so we can perform the cancellation
  1533. * entirely asynchronously.
  1534. */
  1535. if (cancel_delayed_work(&dev_priv->fbc_work->work))
  1536. /* tasklet was killed before being run, clean up */
  1537. kfree(dev_priv->fbc_work);
  1538. /* Mark the work as no longer wanted so that if it does
  1539. * wake-up (because the work was already running and waiting
  1540. * for our mutex), it will discover that is no longer
  1541. * necessary to run.
  1542. */
  1543. dev_priv->fbc_work = NULL;
  1544. }
  1545. static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
  1546. {
  1547. struct intel_fbc_work *work;
  1548. struct drm_device *dev = crtc->dev;
  1549. struct drm_i915_private *dev_priv = dev->dev_private;
  1550. if (!dev_priv->display.enable_fbc)
  1551. return;
  1552. intel_cancel_fbc_work(dev_priv);
  1553. work = kzalloc(sizeof *work, GFP_KERNEL);
  1554. if (work == NULL) {
  1555. dev_priv->display.enable_fbc(crtc, interval);
  1556. return;
  1557. }
  1558. work->crtc = crtc;
  1559. work->fb = crtc->fb;
  1560. work->interval = interval;
  1561. INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
  1562. dev_priv->fbc_work = work;
  1563. DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
  1564. /* Delay the actual enabling to let pageflipping cease and the
  1565. * display to settle before starting the compression. Note that
  1566. * this delay also serves a second purpose: it allows for a
  1567. * vblank to pass after disabling the FBC before we attempt
  1568. * to modify the control registers.
  1569. *
  1570. * A more complicated solution would involve tracking vblanks
  1571. * following the termination of the page-flipping sequence
  1572. * and indeed performing the enable as a co-routine and not
  1573. * waiting synchronously upon the vblank.
  1574. */
  1575. schedule_delayed_work(&work->work, msecs_to_jiffies(50));
  1576. }
  1577. void intel_disable_fbc(struct drm_device *dev)
  1578. {
  1579. struct drm_i915_private *dev_priv = dev->dev_private;
  1580. intel_cancel_fbc_work(dev_priv);
  1581. if (!dev_priv->display.disable_fbc)
  1582. return;
  1583. dev_priv->display.disable_fbc(dev);
  1584. dev_priv->cfb_plane = -1;
  1585. }
  1586. /**
  1587. * intel_update_fbc - enable/disable FBC as needed
  1588. * @dev: the drm_device
  1589. *
  1590. * Set up the framebuffer compression hardware at mode set time. We
  1591. * enable it if possible:
  1592. * - plane A only (on pre-965)
  1593. * - no pixel mulitply/line duplication
  1594. * - no alpha buffer discard
  1595. * - no dual wide
  1596. * - framebuffer <= 2048 in width, 1536 in height
  1597. *
  1598. * We can't assume that any compression will take place (worst case),
  1599. * so the compressed buffer has to be the same size as the uncompressed
  1600. * one. It also must reside (along with the line length buffer) in
  1601. * stolen memory.
  1602. *
  1603. * We need to enable/disable FBC on a global basis.
  1604. */
  1605. static void intel_update_fbc(struct drm_device *dev)
  1606. {
  1607. struct drm_i915_private *dev_priv = dev->dev_private;
  1608. struct drm_crtc *crtc = NULL, *tmp_crtc;
  1609. struct intel_crtc *intel_crtc;
  1610. struct drm_framebuffer *fb;
  1611. struct intel_framebuffer *intel_fb;
  1612. struct drm_i915_gem_object *obj;
  1613. int enable_fbc;
  1614. DRM_DEBUG_KMS("\n");
  1615. if (!i915_powersave)
  1616. return;
  1617. if (!I915_HAS_FBC(dev))
  1618. return;
  1619. /*
  1620. * If FBC is already on, we just have to verify that we can
  1621. * keep it that way...
  1622. * Need to disable if:
  1623. * - more than one pipe is active
  1624. * - changing FBC params (stride, fence, mode)
  1625. * - new fb is too large to fit in compressed buffer
  1626. * - going to an unsupported config (interlace, pixel multiply, etc.)
  1627. */
  1628. list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
  1629. if (tmp_crtc->enabled && tmp_crtc->fb) {
  1630. if (crtc) {
  1631. DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
  1632. dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
  1633. goto out_disable;
  1634. }
  1635. crtc = tmp_crtc;
  1636. }
  1637. }
  1638. if (!crtc || crtc->fb == NULL) {
  1639. DRM_DEBUG_KMS("no output, disabling\n");
  1640. dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
  1641. goto out_disable;
  1642. }
  1643. intel_crtc = to_intel_crtc(crtc);
  1644. fb = crtc->fb;
  1645. intel_fb = to_intel_framebuffer(fb);
  1646. obj = intel_fb->obj;
  1647. enable_fbc = i915_enable_fbc;
  1648. if (enable_fbc < 0) {
  1649. DRM_DEBUG_KMS("fbc set to per-chip default\n");
  1650. enable_fbc = 1;
  1651. if (INTEL_INFO(dev)->gen <= 5)
  1652. enable_fbc = 0;
  1653. }
  1654. if (!enable_fbc) {
  1655. DRM_DEBUG_KMS("fbc disabled per module param\n");
  1656. dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
  1657. goto out_disable;
  1658. }
  1659. if (intel_fb->obj->base.size > dev_priv->cfb_size) {
  1660. DRM_DEBUG_KMS("framebuffer too large, disabling "
  1661. "compression\n");
  1662. dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
  1663. goto out_disable;
  1664. }
  1665. if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
  1666. (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
  1667. DRM_DEBUG_KMS("mode incompatible with compression, "
  1668. "disabling\n");
  1669. dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
  1670. goto out_disable;
  1671. }
  1672. if ((crtc->mode.hdisplay > 2048) ||
  1673. (crtc->mode.vdisplay > 1536)) {
  1674. DRM_DEBUG_KMS("mode too large for compression, disabling\n");
  1675. dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
  1676. goto out_disable;
  1677. }
  1678. if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
  1679. DRM_DEBUG_KMS("plane not 0, disabling compression\n");
  1680. dev_priv->no_fbc_reason = FBC_BAD_PLANE;
  1681. goto out_disable;
  1682. }
  1683. /* The use of a CPU fence is mandatory in order to detect writes
  1684. * by the CPU to the scanout and trigger updates to the FBC.
  1685. */
  1686. if (obj->tiling_mode != I915_TILING_X ||
  1687. obj->fence_reg == I915_FENCE_REG_NONE) {
  1688. DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
  1689. dev_priv->no_fbc_reason = FBC_NOT_TILED;
  1690. goto out_disable;
  1691. }
  1692. /* If the kernel debugger is active, always disable compression */
  1693. if (in_dbg_master())
  1694. goto out_disable;
  1695. /* If the scanout has not changed, don't modify the FBC settings.
  1696. * Note that we make the fundamental assumption that the fb->obj
  1697. * cannot be unpinned (and have its GTT offset and fence revoked)
  1698. * without first being decoupled from the scanout and FBC disabled.
  1699. */
  1700. if (dev_priv->cfb_plane == intel_crtc->plane &&
  1701. dev_priv->cfb_fb == fb->base.id &&
  1702. dev_priv->cfb_y == crtc->y)
  1703. return;
  1704. if (intel_fbc_enabled(dev)) {
  1705. /* We update FBC along two paths, after changing fb/crtc
  1706. * configuration (modeswitching) and after page-flipping
  1707. * finishes. For the latter, we know that not only did
  1708. * we disable the FBC at the start of the page-flip
  1709. * sequence, but also more than one vblank has passed.
  1710. *
  1711. * For the former case of modeswitching, it is possible
  1712. * to switch between two FBC valid configurations
  1713. * instantaneously so we do need to disable the FBC
  1714. * before we can modify its control registers. We also
  1715. * have to wait for the next vblank for that to take
  1716. * effect. However, since we delay enabling FBC we can
  1717. * assume that a vblank has passed since disabling and
  1718. * that we can safely alter the registers in the deferred
  1719. * callback.
  1720. *
  1721. * In the scenario that we go from a valid to invalid
  1722. * and then back to valid FBC configuration we have
  1723. * no strict enforcement that a vblank occurred since
  1724. * disabling the FBC. However, along all current pipe
  1725. * disabling paths we do need to wait for a vblank at
  1726. * some point. And we wait before enabling FBC anyway.
  1727. */
  1728. DRM_DEBUG_KMS("disabling active FBC for update\n");
  1729. intel_disable_fbc(dev);
  1730. }
  1731. intel_enable_fbc(crtc, 500);
  1732. return;
  1733. out_disable:
  1734. /* Multiple disables should be harmless */
  1735. if (intel_fbc_enabled(dev)) {
  1736. DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
  1737. intel_disable_fbc(dev);
  1738. }
  1739. }
  1740. int
  1741. intel_pin_and_fence_fb_obj(struct drm_device *dev,
  1742. struct drm_i915_gem_object *obj,
  1743. struct intel_ring_buffer *pipelined)
  1744. {
  1745. struct drm_i915_private *dev_priv = dev->dev_private;
  1746. u32 alignment;
  1747. int ret;
  1748. switch (obj->tiling_mode) {
  1749. case I915_TILING_NONE:
  1750. if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
  1751. alignment = 128 * 1024;
  1752. else if (INTEL_INFO(dev)->gen >= 4)
  1753. alignment = 4 * 1024;
  1754. else
  1755. alignment = 64 * 1024;
  1756. break;
  1757. case I915_TILING_X:
  1758. /* pin() will align the object as required by fence */
  1759. alignment = 0;
  1760. break;
  1761. case I915_TILING_Y:
  1762. /* FIXME: Is this true? */
  1763. DRM_ERROR("Y tiled not allowed for scan out buffers\n");
  1764. return -EINVAL;
  1765. default:
  1766. BUG();
  1767. }
  1768. dev_priv->mm.interruptible = false;
  1769. ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
  1770. if (ret)
  1771. goto err_interruptible;
  1772. /* Install a fence for tiled scan-out. Pre-i965 always needs a
  1773. * fence, whereas 965+ only requires a fence if using
  1774. * framebuffer compression. For simplicity, we always install
  1775. * a fence as the cost is not that onerous.
  1776. */
  1777. if (obj->tiling_mode != I915_TILING_NONE) {
  1778. ret = i915_gem_object_get_fence(obj, pipelined);
  1779. if (ret)
  1780. goto err_unpin;
  1781. }
  1782. dev_priv->mm.interruptible = true;
  1783. return 0;
  1784. err_unpin:
  1785. i915_gem_object_unpin(obj);
  1786. err_interruptible:
  1787. dev_priv->mm.interruptible = true;
  1788. return ret;
  1789. }
  1790. static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
  1791. int x, int y)
  1792. {
  1793. struct drm_device *dev = crtc->dev;
  1794. struct drm_i915_private *dev_priv = dev->dev_private;
  1795. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1796. struct intel_framebuffer *intel_fb;
  1797. struct drm_i915_gem_object *obj;
  1798. int plane = intel_crtc->plane;
  1799. unsigned long Start, Offset;
  1800. u32 dspcntr;
  1801. u32 reg;
  1802. switch (plane) {
  1803. case 0:
  1804. case 1:
  1805. break;
  1806. default:
  1807. DRM_ERROR("Can't update plane %d in SAREA\n", plane);
  1808. return -EINVAL;
  1809. }
  1810. intel_fb = to_intel_framebuffer(fb);
  1811. obj = intel_fb->obj;
  1812. reg = DSPCNTR(plane);
  1813. dspcntr = I915_READ(reg);
  1814. /* Mask out pixel format bits in case we change it */
  1815. dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
  1816. switch (fb->bits_per_pixel) {
  1817. case 8:
  1818. dspcntr |= DISPPLANE_8BPP;
  1819. break;
  1820. case 16:
  1821. if (fb->depth == 15)
  1822. dspcntr |= DISPPLANE_15_16BPP;
  1823. else
  1824. dspcntr |= DISPPLANE_16BPP;
  1825. break;
  1826. case 24:
  1827. case 32:
  1828. dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
  1829. break;
  1830. default:
  1831. DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
  1832. return -EINVAL;
  1833. }
  1834. if (INTEL_INFO(dev)->gen >= 4) {
  1835. if (obj->tiling_mode != I915_TILING_NONE)
  1836. dspcntr |= DISPPLANE_TILED;
  1837. else
  1838. dspcntr &= ~DISPPLANE_TILED;
  1839. }
  1840. I915_WRITE(reg, dspcntr);
  1841. Start = obj->gtt_offset;
  1842. Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
  1843. DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
  1844. Start, Offset, x, y, fb->pitches[0]);
  1845. I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
  1846. if (INTEL_INFO(dev)->gen >= 4) {
  1847. I915_WRITE(DSPSURF(plane), Start);
  1848. I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
  1849. I915_WRITE(DSPADDR(plane), Offset);
  1850. } else
  1851. I915_WRITE(DSPADDR(plane), Start + Offset);
  1852. POSTING_READ(reg);
  1853. return 0;
  1854. }
  1855. static int ironlake_update_plane(struct drm_crtc *crtc,
  1856. struct drm_framebuffer *fb, int x, int y)
  1857. {
  1858. struct drm_device *dev = crtc->dev;
  1859. struct drm_i915_private *dev_priv = dev->dev_private;
  1860. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1861. struct intel_framebuffer *intel_fb;
  1862. struct drm_i915_gem_object *obj;
  1863. int plane = intel_crtc->plane;
  1864. unsigned long Start, Offset;
  1865. u32 dspcntr;
  1866. u32 reg;
  1867. switch (plane) {
  1868. case 0:
  1869. case 1:
  1870. case 2:
  1871. break;
  1872. default:
  1873. DRM_ERROR("Can't update plane %d in SAREA\n", plane);
  1874. return -EINVAL;
  1875. }
  1876. intel_fb = to_intel_framebuffer(fb);
  1877. obj = intel_fb->obj;
  1878. reg = DSPCNTR(plane);
  1879. dspcntr = I915_READ(reg);
  1880. /* Mask out pixel format bits in case we change it */
  1881. dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
  1882. switch (fb->bits_per_pixel) {
  1883. case 8:
  1884. dspcntr |= DISPPLANE_8BPP;
  1885. break;
  1886. case 16:
  1887. if (fb->depth != 16)
  1888. return -EINVAL;
  1889. dspcntr |= DISPPLANE_16BPP;
  1890. break;
  1891. case 24:
  1892. case 32:
  1893. if (fb->depth == 24)
  1894. dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
  1895. else if (fb->depth == 30)
  1896. dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
  1897. else
  1898. return -EINVAL;
  1899. break;
  1900. default:
  1901. DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
  1902. return -EINVAL;
  1903. }
  1904. if (obj->tiling_mode != I915_TILING_NONE)
  1905. dspcntr |= DISPPLANE_TILED;
  1906. else
  1907. dspcntr &= ~DISPPLANE_TILED;
  1908. /* must disable */
  1909. dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
  1910. I915_WRITE(reg, dspcntr);
  1911. Start = obj->gtt_offset;
  1912. Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
  1913. DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
  1914. Start, Offset, x, y, fb->pitches[0]);
  1915. I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
  1916. I915_WRITE(DSPSURF(plane), Start);
  1917. I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
  1918. I915_WRITE(DSPADDR(plane), Offset);
  1919. POSTING_READ(reg);
  1920. return 0;
  1921. }
  1922. /* Assume fb object is pinned & idle & fenced and just update base pointers */
  1923. static int
  1924. intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
  1925. int x, int y, enum mode_set_atomic state)
  1926. {
  1927. struct drm_device *dev = crtc->dev;
  1928. struct drm_i915_private *dev_priv = dev->dev_private;
  1929. int ret;
  1930. ret = dev_priv->display.update_plane(crtc, fb, x, y);
  1931. if (ret)
  1932. return ret;
  1933. intel_update_fbc(dev);
  1934. intel_increase_pllclock(crtc);
  1935. return 0;
  1936. }
  1937. static int
  1938. intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
  1939. struct drm_framebuffer *old_fb)
  1940. {
  1941. struct drm_device *dev = crtc->dev;
  1942. struct drm_i915_master_private *master_priv;
  1943. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1944. int ret;
  1945. /* no fb bound */
  1946. if (!crtc->fb) {
  1947. DRM_ERROR("No FB bound\n");
  1948. return 0;
  1949. }
  1950. switch (intel_crtc->plane) {
  1951. case 0:
  1952. case 1:
  1953. break;
  1954. case 2:
  1955. if (IS_IVYBRIDGE(dev))
  1956. break;
  1957. /* fall through otherwise */
  1958. default:
  1959. DRM_ERROR("no plane for crtc\n");
  1960. return -EINVAL;
  1961. }
  1962. mutex_lock(&dev->struct_mutex);
  1963. ret = intel_pin_and_fence_fb_obj(dev,
  1964. to_intel_framebuffer(crtc->fb)->obj,
  1965. NULL);
  1966. if (ret != 0) {
  1967. mutex_unlock(&dev->struct_mutex);
  1968. DRM_ERROR("pin & fence failed\n");
  1969. return ret;
  1970. }
  1971. if (old_fb) {
  1972. struct drm_i915_private *dev_priv = dev->dev_private;
  1973. struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
  1974. wait_event(dev_priv->pending_flip_queue,
  1975. atomic_read(&dev_priv->mm.wedged) ||
  1976. atomic_read(&obj->pending_flip) == 0);
  1977. /* Big Hammer, we also need to ensure that any pending
  1978. * MI_WAIT_FOR_EVENT inside a user batch buffer on the
  1979. * current scanout is retired before unpinning the old
  1980. * framebuffer.
  1981. *
  1982. * This should only fail upon a hung GPU, in which case we
  1983. * can safely continue.
  1984. */
  1985. ret = i915_gem_object_finish_gpu(obj);
  1986. (void) ret;
  1987. }
  1988. ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
  1989. LEAVE_ATOMIC_MODE_SET);
  1990. if (ret) {
  1991. i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
  1992. mutex_unlock(&dev->struct_mutex);
  1993. DRM_ERROR("failed to update base address\n");
  1994. return ret;
  1995. }
  1996. if (old_fb) {
  1997. intel_wait_for_vblank(dev, intel_crtc->pipe);
  1998. i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
  1999. }
  2000. mutex_unlock(&dev->struct_mutex);
  2001. if (!dev->primary->master)
  2002. return 0;
  2003. master_priv = dev->primary->master->driver_priv;
  2004. if (!master_priv->sarea_priv)
  2005. return 0;
  2006. if (intel_crtc->pipe) {
  2007. master_priv->sarea_priv->pipeB_x = x;
  2008. master_priv->sarea_priv->pipeB_y = y;
  2009. } else {
  2010. master_priv->sarea_priv->pipeA_x = x;
  2011. master_priv->sarea_priv->pipeA_y = y;
  2012. }
  2013. return 0;
  2014. }
  2015. static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
  2016. {
  2017. struct drm_device *dev = crtc->dev;
  2018. struct drm_i915_private *dev_priv = dev->dev_private;
  2019. u32 dpa_ctl;
  2020. DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
  2021. dpa_ctl = I915_READ(DP_A);
  2022. dpa_ctl &= ~DP_PLL_FREQ_MASK;
  2023. if (clock < 200000) {
  2024. u32 temp;
  2025. dpa_ctl |= DP_PLL_FREQ_160MHZ;
  2026. /* workaround for 160Mhz:
  2027. 1) program 0x4600c bits 15:0 = 0x8124
  2028. 2) program 0x46010 bit 0 = 1
  2029. 3) program 0x46034 bit 24 = 1
  2030. 4) program 0x64000 bit 14 = 1
  2031. */
  2032. temp = I915_READ(0x4600c);
  2033. temp &= 0xffff0000;
  2034. I915_WRITE(0x4600c, temp | 0x8124);
  2035. temp = I915_READ(0x46010);
  2036. I915_WRITE(0x46010, temp | 1);
  2037. temp = I915_READ(0x46034);
  2038. I915_WRITE(0x46034, temp | (1 << 24));
  2039. } else {
  2040. dpa_ctl |= DP_PLL_FREQ_270MHZ;
  2041. }
  2042. I915_WRITE(DP_A, dpa_ctl);
  2043. POSTING_READ(DP_A);
  2044. udelay(500);
  2045. }
  2046. static void intel_fdi_normal_train(struct drm_crtc *crtc)
  2047. {
  2048. struct drm_device *dev = crtc->dev;
  2049. struct drm_i915_private *dev_priv = dev->dev_private;
  2050. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2051. int pipe = intel_crtc->pipe;
  2052. u32 reg, temp;
  2053. /* enable normal train */
  2054. reg = FDI_TX_CTL(pipe);
  2055. temp = I915_READ(reg);
  2056. if (IS_IVYBRIDGE(dev)) {
  2057. temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  2058. temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
  2059. } else {
  2060. temp &= ~FDI_LINK_TRAIN_NONE;
  2061. temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
  2062. }
  2063. I915_WRITE(reg, temp);
  2064. reg = FDI_RX_CTL(pipe);
  2065. temp = I915_READ(reg);
  2066. if (HAS_PCH_CPT(dev)) {
  2067. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  2068. temp |= FDI_LINK_TRAIN_NORMAL_CPT;
  2069. } else {
  2070. temp &= ~FDI_LINK_TRAIN_NONE;
  2071. temp |= FDI_LINK_TRAIN_NONE;
  2072. }
  2073. I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
  2074. /* wait one idle pattern time */
  2075. POSTING_READ(reg);
  2076. udelay(1000);
  2077. /* IVB wants error correction enabled */
  2078. if (IS_IVYBRIDGE(dev))
  2079. I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
  2080. FDI_FE_ERRC_ENABLE);
  2081. }
  2082. static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
  2083. {
  2084. struct drm_i915_private *dev_priv = dev->dev_private;
  2085. u32 flags = I915_READ(SOUTH_CHICKEN1);
  2086. flags |= FDI_PHASE_SYNC_OVR(pipe);
  2087. I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
  2088. flags |= FDI_PHASE_SYNC_EN(pipe);
  2089. I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
  2090. POSTING_READ(SOUTH_CHICKEN1);
  2091. }
  2092. /* The FDI link training functions for ILK/Ibexpeak. */
  2093. static void ironlake_fdi_link_train(struct drm_crtc *crtc)
  2094. {
  2095. struct drm_device *dev = crtc->dev;
  2096. struct drm_i915_private *dev_priv = dev->dev_private;
  2097. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2098. int pipe = intel_crtc->pipe;
  2099. int plane = intel_crtc->plane;
  2100. u32 reg, temp, tries;
  2101. /* FDI needs bits from pipe & plane first */
  2102. assert_pipe_enabled(dev_priv, pipe);
  2103. assert_plane_enabled(dev_priv, plane);
  2104. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  2105. for train result */
  2106. reg = FDI_RX_IMR(pipe);
  2107. temp = I915_READ(reg);
  2108. temp &= ~FDI_RX_SYMBOL_LOCK;
  2109. temp &= ~FDI_RX_BIT_LOCK;
  2110. I915_WRITE(reg, temp);
  2111. I915_READ(reg);
  2112. udelay(150);
  2113. /* enable CPU FDI TX and PCH FDI RX */
  2114. reg = FDI_TX_CTL(pipe);
  2115. temp = I915_READ(reg);
  2116. temp &= ~(7 << 19);
  2117. temp |= (intel_crtc->fdi_lanes - 1) << 19;
  2118. temp &= ~FDI_LINK_TRAIN_NONE;
  2119. temp |= FDI_LINK_TRAIN_PATTERN_1;
  2120. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  2121. reg = FDI_RX_CTL(pipe);
  2122. temp = I915_READ(reg);
  2123. temp &= ~FDI_LINK_TRAIN_NONE;
  2124. temp |= FDI_LINK_TRAIN_PATTERN_1;
  2125. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  2126. POSTING_READ(reg);
  2127. udelay(150);
  2128. /* Ironlake workaround, enable clock pointer after FDI enable*/
  2129. if (HAS_PCH_IBX(dev)) {
  2130. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
  2131. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
  2132. FDI_RX_PHASE_SYNC_POINTER_EN);
  2133. }
  2134. reg = FDI_RX_IIR(pipe);
  2135. for (tries = 0; tries < 5; tries++) {
  2136. temp = I915_READ(reg);
  2137. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2138. if ((temp & FDI_RX_BIT_LOCK)) {
  2139. DRM_DEBUG_KMS("FDI train 1 done.\n");
  2140. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  2141. break;
  2142. }
  2143. }
  2144. if (tries == 5)
  2145. DRM_ERROR("FDI train 1 fail!\n");
  2146. /* Train 2 */
  2147. reg = FDI_TX_CTL(pipe);
  2148. temp = I915_READ(reg);
  2149. temp &= ~FDI_LINK_TRAIN_NONE;
  2150. temp |= FDI_LINK_TRAIN_PATTERN_2;
  2151. I915_WRITE(reg, temp);
  2152. reg = FDI_RX_CTL(pipe);
  2153. temp = I915_READ(reg);
  2154. temp &= ~FDI_LINK_TRAIN_NONE;
  2155. temp |= FDI_LINK_TRAIN_PATTERN_2;
  2156. I915_WRITE(reg, temp);
  2157. POSTING_READ(reg);
  2158. udelay(150);
  2159. reg = FDI_RX_IIR(pipe);
  2160. for (tries = 0; tries < 5; tries++) {
  2161. temp = I915_READ(reg);
  2162. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2163. if (temp & FDI_RX_SYMBOL_LOCK) {
  2164. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  2165. DRM_DEBUG_KMS("FDI train 2 done.\n");
  2166. break;
  2167. }
  2168. }
  2169. if (tries == 5)
  2170. DRM_ERROR("FDI train 2 fail!\n");
  2171. DRM_DEBUG_KMS("FDI train done\n");
  2172. }
  2173. static const int snb_b_fdi_train_param[] = {
  2174. FDI_LINK_TRAIN_400MV_0DB_SNB_B,
  2175. FDI_LINK_TRAIN_400MV_6DB_SNB_B,
  2176. FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
  2177. FDI_LINK_TRAIN_800MV_0DB_SNB_B,
  2178. };
  2179. /* The FDI link training functions for SNB/Cougarpoint. */
  2180. static void gen6_fdi_link_train(struct drm_crtc *crtc)
  2181. {
  2182. struct drm_device *dev = crtc->dev;
  2183. struct drm_i915_private *dev_priv = dev->dev_private;
  2184. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2185. int pipe = intel_crtc->pipe;
  2186. u32 reg, temp, i;
  2187. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  2188. for train result */
  2189. reg = FDI_RX_IMR(pipe);
  2190. temp = I915_READ(reg);
  2191. temp &= ~FDI_RX_SYMBOL_LOCK;
  2192. temp &= ~FDI_RX_BIT_LOCK;
  2193. I915_WRITE(reg, temp);
  2194. POSTING_READ(reg);
  2195. udelay(150);
  2196. /* enable CPU FDI TX and PCH FDI RX */
  2197. reg = FDI_TX_CTL(pipe);
  2198. temp = I915_READ(reg);
  2199. temp &= ~(7 << 19);
  2200. temp |= (intel_crtc->fdi_lanes - 1) << 19;
  2201. temp &= ~FDI_LINK_TRAIN_NONE;
  2202. temp |= FDI_LINK_TRAIN_PATTERN_1;
  2203. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2204. /* SNB-B */
  2205. temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  2206. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  2207. reg = FDI_RX_CTL(pipe);
  2208. temp = I915_READ(reg);
  2209. if (HAS_PCH_CPT(dev)) {
  2210. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  2211. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  2212. } else {
  2213. temp &= ~FDI_LINK_TRAIN_NONE;
  2214. temp |= FDI_LINK_TRAIN_PATTERN_1;
  2215. }
  2216. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  2217. POSTING_READ(reg);
  2218. udelay(150);
  2219. if (HAS_PCH_CPT(dev))
  2220. cpt_phase_pointer_enable(dev, pipe);
  2221. for (i = 0; i < 4; i++) {
  2222. reg = FDI_TX_CTL(pipe);
  2223. temp = I915_READ(reg);
  2224. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2225. temp |= snb_b_fdi_train_param[i];
  2226. I915_WRITE(reg, temp);
  2227. POSTING_READ(reg);
  2228. udelay(500);
  2229. reg = FDI_RX_IIR(pipe);
  2230. temp = I915_READ(reg);
  2231. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2232. if (temp & FDI_RX_BIT_LOCK) {
  2233. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  2234. DRM_DEBUG_KMS("FDI train 1 done.\n");
  2235. break;
  2236. }
  2237. }
  2238. if (i == 4)
  2239. DRM_ERROR("FDI train 1 fail!\n");
  2240. /* Train 2 */
  2241. reg = FDI_TX_CTL(pipe);
  2242. temp = I915_READ(reg);
  2243. temp &= ~FDI_LINK_TRAIN_NONE;
  2244. temp |= FDI_LINK_TRAIN_PATTERN_2;
  2245. if (IS_GEN6(dev)) {
  2246. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2247. /* SNB-B */
  2248. temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  2249. }
  2250. I915_WRITE(reg, temp);
  2251. reg = FDI_RX_CTL(pipe);
  2252. temp = I915_READ(reg);
  2253. if (HAS_PCH_CPT(dev)) {
  2254. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  2255. temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  2256. } else {
  2257. temp &= ~FDI_LINK_TRAIN_NONE;
  2258. temp |= FDI_LINK_TRAIN_PATTERN_2;
  2259. }
  2260. I915_WRITE(reg, temp);
  2261. POSTING_READ(reg);
  2262. udelay(150);
  2263. for (i = 0; i < 4; i++) {
  2264. reg = FDI_TX_CTL(pipe);
  2265. temp = I915_READ(reg);
  2266. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2267. temp |= snb_b_fdi_train_param[i];
  2268. I915_WRITE(reg, temp);
  2269. POSTING_READ(reg);
  2270. udelay(500);
  2271. reg = FDI_RX_IIR(pipe);
  2272. temp = I915_READ(reg);
  2273. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2274. if (temp & FDI_RX_SYMBOL_LOCK) {
  2275. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  2276. DRM_DEBUG_KMS("FDI train 2 done.\n");
  2277. break;
  2278. }
  2279. }
  2280. if (i == 4)
  2281. DRM_ERROR("FDI train 2 fail!\n");
  2282. DRM_DEBUG_KMS("FDI train done.\n");
  2283. }
  2284. /* Manual link training for Ivy Bridge A0 parts */
  2285. static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
  2286. {
  2287. struct drm_device *dev = crtc->dev;
  2288. struct drm_i915_private *dev_priv = dev->dev_private;
  2289. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2290. int pipe = intel_crtc->pipe;
  2291. u32 reg, temp, i;
  2292. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  2293. for train result */
  2294. reg = FDI_RX_IMR(pipe);
  2295. temp = I915_READ(reg);
  2296. temp &= ~FDI_RX_SYMBOL_LOCK;
  2297. temp &= ~FDI_RX_BIT_LOCK;
  2298. I915_WRITE(reg, temp);
  2299. POSTING_READ(reg);
  2300. udelay(150);
  2301. /* enable CPU FDI TX and PCH FDI RX */
  2302. reg = FDI_TX_CTL(pipe);
  2303. temp = I915_READ(reg);
  2304. temp &= ~(7 << 19);
  2305. temp |= (intel_crtc->fdi_lanes - 1) << 19;
  2306. temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
  2307. temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
  2308. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2309. temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  2310. temp |= FDI_COMPOSITE_SYNC;
  2311. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  2312. reg = FDI_RX_CTL(pipe);
  2313. temp = I915_READ(reg);
  2314. temp &= ~FDI_LINK_TRAIN_AUTO;
  2315. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  2316. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  2317. temp |= FDI_COMPOSITE_SYNC;
  2318. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  2319. POSTING_READ(reg);
  2320. udelay(150);
  2321. if (HAS_PCH_CPT(dev))
  2322. cpt_phase_pointer_enable(dev, pipe);
  2323. for (i = 0; i < 4; i++) {
  2324. reg = FDI_TX_CTL(pipe);
  2325. temp = I915_READ(reg);
  2326. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2327. temp |= snb_b_fdi_train_param[i];
  2328. I915_WRITE(reg, temp);
  2329. POSTING_READ(reg);
  2330. udelay(500);
  2331. reg = FDI_RX_IIR(pipe);
  2332. temp = I915_READ(reg);
  2333. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2334. if (temp & FDI_RX_BIT_LOCK ||
  2335. (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
  2336. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  2337. DRM_DEBUG_KMS("FDI train 1 done.\n");
  2338. break;
  2339. }
  2340. }
  2341. if (i == 4)
  2342. DRM_ERROR("FDI train 1 fail!\n");
  2343. /* Train 2 */
  2344. reg = FDI_TX_CTL(pipe);
  2345. temp = I915_READ(reg);
  2346. temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  2347. temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
  2348. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2349. temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  2350. I915_WRITE(reg, temp);
  2351. reg = FDI_RX_CTL(pipe);
  2352. temp = I915_READ(reg);
  2353. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  2354. temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  2355. I915_WRITE(reg, temp);
  2356. POSTING_READ(reg);
  2357. udelay(150);
  2358. for (i = 0; i < 4; i++) {
  2359. reg = FDI_TX_CTL(pipe);
  2360. temp = I915_READ(reg);
  2361. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2362. temp |= snb_b_fdi_train_param[i];
  2363. I915_WRITE(reg, temp);
  2364. POSTING_READ(reg);
  2365. udelay(500);
  2366. reg = FDI_RX_IIR(pipe);
  2367. temp = I915_READ(reg);
  2368. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2369. if (temp & FDI_RX_SYMBOL_LOCK) {
  2370. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  2371. DRM_DEBUG_KMS("FDI train 2 done.\n");
  2372. break;
  2373. }
  2374. }
  2375. if (i == 4)
  2376. DRM_ERROR("FDI train 2 fail!\n");
  2377. DRM_DEBUG_KMS("FDI train done.\n");
  2378. }
  2379. static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
  2380. {
  2381. struct drm_device *dev = crtc->dev;
  2382. struct drm_i915_private *dev_priv = dev->dev_private;
  2383. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2384. int pipe = intel_crtc->pipe;
  2385. u32 reg, temp;
  2386. /* Write the TU size bits so error detection works */
  2387. I915_WRITE(FDI_RX_TUSIZE1(pipe),
  2388. I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
  2389. /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
  2390. reg = FDI_RX_CTL(pipe);
  2391. temp = I915_READ(reg);
  2392. temp &= ~((0x7 << 19) | (0x7 << 16));
  2393. temp |= (intel_crtc->fdi_lanes - 1) << 19;
  2394. temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
  2395. I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
  2396. POSTING_READ(reg);
  2397. udelay(200);
  2398. /* Switch from Rawclk to PCDclk */
  2399. temp = I915_READ(reg);
  2400. I915_WRITE(reg, temp | FDI_PCDCLK);
  2401. POSTING_READ(reg);
  2402. udelay(200);
  2403. /* Enable CPU FDI TX PLL, always on for Ironlake */
  2404. reg = FDI_TX_CTL(pipe);
  2405. temp = I915_READ(reg);
  2406. if ((temp & FDI_TX_PLL_ENABLE) == 0) {
  2407. I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
  2408. POSTING_READ(reg);
  2409. udelay(100);
  2410. }
  2411. }
  2412. static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
  2413. {
  2414. struct drm_i915_private *dev_priv = dev->dev_private;
  2415. u32 flags = I915_READ(SOUTH_CHICKEN1);
  2416. flags &= ~(FDI_PHASE_SYNC_EN(pipe));
  2417. I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
  2418. flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
  2419. I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
  2420. POSTING_READ(SOUTH_CHICKEN1);
  2421. }
  2422. static void ironlake_fdi_disable(struct drm_crtc *crtc)
  2423. {
  2424. struct drm_device *dev = crtc->dev;
  2425. struct drm_i915_private *dev_priv = dev->dev_private;
  2426. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2427. int pipe = intel_crtc->pipe;
  2428. u32 reg, temp;
  2429. /* disable CPU FDI tx and PCH FDI rx */
  2430. reg = FDI_TX_CTL(pipe);
  2431. temp = I915_READ(reg);
  2432. I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
  2433. POSTING_READ(reg);
  2434. reg = FDI_RX_CTL(pipe);
  2435. temp = I915_READ(reg);
  2436. temp &= ~(0x7 << 16);
  2437. temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
  2438. I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
  2439. POSTING_READ(reg);
  2440. udelay(100);
  2441. /* Ironlake workaround, disable clock pointer after downing FDI */
  2442. if (HAS_PCH_IBX(dev)) {
  2443. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
  2444. I915_WRITE(FDI_RX_CHICKEN(pipe),
  2445. I915_READ(FDI_RX_CHICKEN(pipe) &
  2446. ~FDI_RX_PHASE_SYNC_POINTER_EN));
  2447. } else if (HAS_PCH_CPT(dev)) {
  2448. cpt_phase_pointer_disable(dev, pipe);
  2449. }
  2450. /* still set train pattern 1 */
  2451. reg = FDI_TX_CTL(pipe);
  2452. temp = I915_READ(reg);
  2453. temp &= ~FDI_LINK_TRAIN_NONE;
  2454. temp |= FDI_LINK_TRAIN_PATTERN_1;
  2455. I915_WRITE(reg, temp);
  2456. reg = FDI_RX_CTL(pipe);
  2457. temp = I915_READ(reg);
  2458. if (HAS_PCH_CPT(dev)) {
  2459. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  2460. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  2461. } else {
  2462. temp &= ~FDI_LINK_TRAIN_NONE;
  2463. temp |= FDI_LINK_TRAIN_PATTERN_1;
  2464. }
  2465. /* BPC in FDI rx is consistent with that in PIPECONF */
  2466. temp &= ~(0x07 << 16);
  2467. temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
  2468. I915_WRITE(reg, temp);
  2469. POSTING_READ(reg);
  2470. udelay(100);
  2471. }
  2472. /*
  2473. * When we disable a pipe, we need to clear any pending scanline wait events
  2474. * to avoid hanging the ring, which we assume we are waiting on.
  2475. */
  2476. static void intel_clear_scanline_wait(struct drm_device *dev)
  2477. {
  2478. struct drm_i915_private *dev_priv = dev->dev_private;
  2479. struct intel_ring_buffer *ring;
  2480. u32 tmp;
  2481. if (IS_GEN2(dev))
  2482. /* Can't break the hang on i8xx */
  2483. return;
  2484. ring = LP_RING(dev_priv);
  2485. tmp = I915_READ_CTL(ring);
  2486. if (tmp & RING_WAIT)
  2487. I915_WRITE_CTL(ring, tmp);
  2488. }
  2489. static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
  2490. {
  2491. struct drm_i915_gem_object *obj;
  2492. struct drm_i915_private *dev_priv;
  2493. if (crtc->fb == NULL)
  2494. return;
  2495. obj = to_intel_framebuffer(crtc->fb)->obj;
  2496. dev_priv = crtc->dev->dev_private;
  2497. wait_event(dev_priv->pending_flip_queue,
  2498. atomic_read(&obj->pending_flip) == 0);
  2499. }
  2500. static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
  2501. {
  2502. struct drm_device *dev = crtc->dev;
  2503. struct drm_mode_config *mode_config = &dev->mode_config;
  2504. struct intel_encoder *encoder;
  2505. /*
  2506. * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
  2507. * must be driven by its own crtc; no sharing is possible.
  2508. */
  2509. list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
  2510. if (encoder->base.crtc != crtc)
  2511. continue;
  2512. switch (encoder->type) {
  2513. case INTEL_OUTPUT_EDP:
  2514. if (!intel_encoder_is_pch_edp(&encoder->base))
  2515. return false;
  2516. continue;
  2517. }
  2518. }
  2519. return true;
  2520. }
  2521. /*
  2522. * Enable PCH resources required for PCH ports:
  2523. * - PCH PLLs
  2524. * - FDI training & RX/TX
  2525. * - update transcoder timings
  2526. * - DP transcoding bits
  2527. * - transcoder
  2528. */
  2529. static void ironlake_pch_enable(struct drm_crtc *crtc)
  2530. {
  2531. struct drm_device *dev = crtc->dev;
  2532. struct drm_i915_private *dev_priv = dev->dev_private;
  2533. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2534. int pipe = intel_crtc->pipe;
  2535. u32 reg, temp, transc_sel;
  2536. /* For PCH output, training FDI link */
  2537. dev_priv->display.fdi_link_train(crtc);
  2538. intel_enable_pch_pll(dev_priv, pipe);
  2539. if (HAS_PCH_CPT(dev)) {
  2540. transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
  2541. TRANSC_DPLLB_SEL;
  2542. /* Be sure PCH DPLL SEL is set */
  2543. temp = I915_READ(PCH_DPLL_SEL);
  2544. if (pipe == 0) {
  2545. temp &= ~(TRANSA_DPLLB_SEL);
  2546. temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
  2547. } else if (pipe == 1) {
  2548. temp &= ~(TRANSB_DPLLB_SEL);
  2549. temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
  2550. } else if (pipe == 2) {
  2551. temp &= ~(TRANSC_DPLLB_SEL);
  2552. temp |= (TRANSC_DPLL_ENABLE | transc_sel);
  2553. }
  2554. I915_WRITE(PCH_DPLL_SEL, temp);
  2555. }
  2556. /* set transcoder timing, panel must allow it */
  2557. assert_panel_unlocked(dev_priv, pipe);
  2558. I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
  2559. I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
  2560. I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
  2561. I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
  2562. I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
  2563. I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
  2564. intel_fdi_normal_train(crtc);
  2565. /* For PCH DP, enable TRANS_DP_CTL */
  2566. if (HAS_PCH_CPT(dev) &&
  2567. (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
  2568. intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
  2569. u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
  2570. reg = TRANS_DP_CTL(pipe);
  2571. temp = I915_READ(reg);
  2572. temp &= ~(TRANS_DP_PORT_SEL_MASK |
  2573. TRANS_DP_SYNC_MASK |
  2574. TRANS_DP_BPC_MASK);
  2575. temp |= (TRANS_DP_OUTPUT_ENABLE |
  2576. TRANS_DP_ENH_FRAMING);
  2577. temp |= bpc << 9; /* same format but at 11:9 */
  2578. if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
  2579. temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
  2580. if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
  2581. temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
  2582. switch (intel_trans_dp_port_sel(crtc)) {
  2583. case PCH_DP_B:
  2584. temp |= TRANS_DP_PORT_SEL_B;
  2585. break;
  2586. case PCH_DP_C:
  2587. temp |= TRANS_DP_PORT_SEL_C;
  2588. break;
  2589. case PCH_DP_D:
  2590. temp |= TRANS_DP_PORT_SEL_D;
  2591. break;
  2592. default:
  2593. DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
  2594. temp |= TRANS_DP_PORT_SEL_B;
  2595. break;
  2596. }
  2597. I915_WRITE(reg, temp);
  2598. }
  2599. intel_enable_transcoder(dev_priv, pipe);
  2600. }
  2601. void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
  2602. {
  2603. struct drm_i915_private *dev_priv = dev->dev_private;
  2604. int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
  2605. u32 temp;
  2606. temp = I915_READ(dslreg);
  2607. udelay(500);
  2608. if (wait_for(I915_READ(dslreg) != temp, 5)) {
  2609. /* Without this, mode sets may fail silently on FDI */
  2610. I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
  2611. udelay(250);
  2612. I915_WRITE(tc2reg, 0);
  2613. if (wait_for(I915_READ(dslreg) != temp, 5))
  2614. DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
  2615. }
  2616. }
  2617. static void ironlake_crtc_enable(struct drm_crtc *crtc)
  2618. {
  2619. struct drm_device *dev = crtc->dev;
  2620. struct drm_i915_private *dev_priv = dev->dev_private;
  2621. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2622. int pipe = intel_crtc->pipe;
  2623. int plane = intel_crtc->plane;
  2624. u32 temp;
  2625. bool is_pch_port;
  2626. if (intel_crtc->active)
  2627. return;
  2628. intel_crtc->active = true;
  2629. intel_update_watermarks(dev);
  2630. if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
  2631. temp = I915_READ(PCH_LVDS);
  2632. if ((temp & LVDS_PORT_EN) == 0)
  2633. I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
  2634. }
  2635. is_pch_port = intel_crtc_driving_pch(crtc);
  2636. if (is_pch_port)
  2637. ironlake_fdi_pll_enable(crtc);
  2638. else
  2639. ironlake_fdi_disable(crtc);
  2640. /* Enable panel fitting for LVDS */
  2641. if (dev_priv->pch_pf_size &&
  2642. (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
  2643. /* Force use of hard-coded filter coefficients
  2644. * as some pre-programmed values are broken,
  2645. * e.g. x201.
  2646. */
  2647. I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
  2648. I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
  2649. I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
  2650. }
  2651. /*
  2652. * On ILK+ LUT must be loaded before the pipe is running but with
  2653. * clocks enabled
  2654. */
  2655. intel_crtc_load_lut(crtc);
  2656. intel_enable_pipe(dev_priv, pipe, is_pch_port);
  2657. intel_enable_plane(dev_priv, plane, pipe);
  2658. if (is_pch_port)
  2659. ironlake_pch_enable(crtc);
  2660. mutex_lock(&dev->struct_mutex);
  2661. intel_update_fbc(dev);
  2662. mutex_unlock(&dev->struct_mutex);
  2663. intel_crtc_update_cursor(crtc, true);
  2664. }
  2665. static void ironlake_crtc_disable(struct drm_crtc *crtc)
  2666. {
  2667. struct drm_device *dev = crtc->dev;
  2668. struct drm_i915_private *dev_priv = dev->dev_private;
  2669. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2670. int pipe = intel_crtc->pipe;
  2671. int plane = intel_crtc->plane;
  2672. u32 reg, temp;
  2673. if (!intel_crtc->active)
  2674. return;
  2675. intel_crtc_wait_for_pending_flips(crtc);
  2676. drm_vblank_off(dev, pipe);
  2677. intel_crtc_update_cursor(crtc, false);
  2678. intel_disable_plane(dev_priv, plane, pipe);
  2679. if (dev_priv->cfb_plane == plane)
  2680. intel_disable_fbc(dev);
  2681. intel_disable_pipe(dev_priv, pipe);
  2682. /* Disable PF */
  2683. I915_WRITE(PF_CTL(pipe), 0);
  2684. I915_WRITE(PF_WIN_SZ(pipe), 0);
  2685. ironlake_fdi_disable(crtc);
  2686. /* This is a horrible layering violation; we should be doing this in
  2687. * the connector/encoder ->prepare instead, but we don't always have
  2688. * enough information there about the config to know whether it will
  2689. * actually be necessary or just cause undesired flicker.
  2690. */
  2691. intel_disable_pch_ports(dev_priv, pipe);
  2692. intel_disable_transcoder(dev_priv, pipe);
  2693. if (HAS_PCH_CPT(dev)) {
  2694. /* disable TRANS_DP_CTL */
  2695. reg = TRANS_DP_CTL(pipe);
  2696. temp = I915_READ(reg);
  2697. temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
  2698. temp |= TRANS_DP_PORT_SEL_NONE;
  2699. I915_WRITE(reg, temp);
  2700. /* disable DPLL_SEL */
  2701. temp = I915_READ(PCH_DPLL_SEL);
  2702. switch (pipe) {
  2703. case 0:
  2704. temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
  2705. break;
  2706. case 1:
  2707. temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
  2708. break;
  2709. case 2:
  2710. /* C shares PLL A or B */
  2711. temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
  2712. break;
  2713. default:
  2714. BUG(); /* wtf */
  2715. }
  2716. I915_WRITE(PCH_DPLL_SEL, temp);
  2717. }
  2718. /* disable PCH DPLL */
  2719. if (!intel_crtc->no_pll)
  2720. intel_disable_pch_pll(dev_priv, pipe);
  2721. /* Switch from PCDclk to Rawclk */
  2722. reg = FDI_RX_CTL(pipe);
  2723. temp = I915_READ(reg);
  2724. I915_WRITE(reg, temp & ~FDI_PCDCLK);
  2725. /* Disable CPU FDI TX PLL */
  2726. reg = FDI_TX_CTL(pipe);
  2727. temp = I915_READ(reg);
  2728. I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
  2729. POSTING_READ(reg);
  2730. udelay(100);
  2731. reg = FDI_RX_CTL(pipe);
  2732. temp = I915_READ(reg);
  2733. I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
  2734. /* Wait for the clocks to turn off. */
  2735. POSTING_READ(reg);
  2736. udelay(100);
  2737. intel_crtc->active = false;
  2738. intel_update_watermarks(dev);
  2739. mutex_lock(&dev->struct_mutex);
  2740. intel_update_fbc(dev);
  2741. intel_clear_scanline_wait(dev);
  2742. mutex_unlock(&dev->struct_mutex);
  2743. }
  2744. static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
  2745. {
  2746. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2747. int pipe = intel_crtc->pipe;
  2748. int plane = intel_crtc->plane;
  2749. /* XXX: When our outputs are all unaware of DPMS modes other than off
  2750. * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
  2751. */
  2752. switch (mode) {
  2753. case DRM_MODE_DPMS_ON:
  2754. case DRM_MODE_DPMS_STANDBY:
  2755. case DRM_MODE_DPMS_SUSPEND:
  2756. DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
  2757. ironlake_crtc_enable(crtc);
  2758. break;
  2759. case DRM_MODE_DPMS_OFF:
  2760. DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
  2761. ironlake_crtc_disable(crtc);
  2762. break;
  2763. }
  2764. }
  2765. static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
  2766. {
  2767. if (!enable && intel_crtc->overlay) {
  2768. struct drm_device *dev = intel_crtc->base.dev;
  2769. struct drm_i915_private *dev_priv = dev->dev_private;
  2770. mutex_lock(&dev->struct_mutex);
  2771. dev_priv->mm.interruptible = false;
  2772. (void) intel_overlay_switch_off(intel_crtc->overlay);
  2773. dev_priv->mm.interruptible = true;
  2774. mutex_unlock(&dev->struct_mutex);
  2775. }
  2776. /* Let userspace switch the overlay on again. In most cases userspace
  2777. * has to recompute where to put it anyway.
  2778. */
  2779. }
  2780. static void i9xx_crtc_enable(struct drm_crtc *crtc)
  2781. {
  2782. struct drm_device *dev = crtc->dev;
  2783. struct drm_i915_private *dev_priv = dev->dev_private;
  2784. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2785. int pipe = intel_crtc->pipe;
  2786. int plane = intel_crtc->plane;
  2787. if (intel_crtc->active)
  2788. return;
  2789. intel_crtc->active = true;
  2790. intel_update_watermarks(dev);
  2791. intel_enable_pll(dev_priv, pipe);
  2792. intel_enable_pipe(dev_priv, pipe, false);
  2793. intel_enable_plane(dev_priv, plane, pipe);
  2794. intel_crtc_load_lut(crtc);
  2795. intel_update_fbc(dev);
  2796. /* Give the overlay scaler a chance to enable if it's on this pipe */
  2797. intel_crtc_dpms_overlay(intel_crtc, true);
  2798. intel_crtc_update_cursor(crtc, true);
  2799. }
  2800. static void i9xx_crtc_disable(struct drm_crtc *crtc)
  2801. {
  2802. struct drm_device *dev = crtc->dev;
  2803. struct drm_i915_private *dev_priv = dev->dev_private;
  2804. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2805. int pipe = intel_crtc->pipe;
  2806. int plane = intel_crtc->plane;
  2807. if (!intel_crtc->active)
  2808. return;
  2809. /* Give the overlay scaler a chance to disable if it's on this pipe */
  2810. intel_crtc_wait_for_pending_flips(crtc);
  2811. drm_vblank_off(dev, pipe);
  2812. intel_crtc_dpms_overlay(intel_crtc, false);
  2813. intel_crtc_update_cursor(crtc, false);
  2814. if (dev_priv->cfb_plane == plane)
  2815. intel_disable_fbc(dev);
  2816. intel_disable_plane(dev_priv, plane, pipe);
  2817. intel_disable_pipe(dev_priv, pipe);
  2818. intel_disable_pll(dev_priv, pipe);
  2819. intel_crtc->active = false;
  2820. intel_update_fbc(dev);
  2821. intel_update_watermarks(dev);
  2822. intel_clear_scanline_wait(dev);
  2823. }
  2824. static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
  2825. {
  2826. /* XXX: When our outputs are all unaware of DPMS modes other than off
  2827. * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
  2828. */
  2829. switch (mode) {
  2830. case DRM_MODE_DPMS_ON:
  2831. case DRM_MODE_DPMS_STANDBY:
  2832. case DRM_MODE_DPMS_SUSPEND:
  2833. i9xx_crtc_enable(crtc);
  2834. break;
  2835. case DRM_MODE_DPMS_OFF:
  2836. i9xx_crtc_disable(crtc);
  2837. break;
  2838. }
  2839. }
  2840. /**
  2841. * Sets the power management mode of the pipe and plane.
  2842. */
  2843. static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
  2844. {
  2845. struct drm_device *dev = crtc->dev;
  2846. struct drm_i915_private *dev_priv = dev->dev_private;
  2847. struct drm_i915_master_private *master_priv;
  2848. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2849. int pipe = intel_crtc->pipe;
  2850. bool enabled;
  2851. if (intel_crtc->dpms_mode == mode)
  2852. return;
  2853. intel_crtc->dpms_mode = mode;
  2854. dev_priv->display.dpms(crtc, mode);
  2855. if (!dev->primary->master)
  2856. return;
  2857. master_priv = dev->primary->master->driver_priv;
  2858. if (!master_priv->sarea_priv)
  2859. return;
  2860. enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
  2861. switch (pipe) {
  2862. case 0:
  2863. master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
  2864. master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
  2865. break;
  2866. case 1:
  2867. master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
  2868. master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
  2869. break;
  2870. default:
  2871. DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
  2872. break;
  2873. }
  2874. }
  2875. static void intel_crtc_disable(struct drm_crtc *crtc)
  2876. {
  2877. struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  2878. struct drm_device *dev = crtc->dev;
  2879. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  2880. assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
  2881. assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
  2882. if (crtc->fb) {
  2883. mutex_lock(&dev->struct_mutex);
  2884. i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
  2885. mutex_unlock(&dev->struct_mutex);
  2886. }
  2887. }
  2888. /* Prepare for a mode set.
  2889. *
  2890. * Note we could be a lot smarter here. We need to figure out which outputs
  2891. * will be enabled, which disabled (in short, how the config will changes)
  2892. * and perform the minimum necessary steps to accomplish that, e.g. updating
  2893. * watermarks, FBC configuration, making sure PLLs are programmed correctly,
  2894. * panel fitting is in the proper state, etc.
  2895. */
  2896. static void i9xx_crtc_prepare(struct drm_crtc *crtc)
  2897. {
  2898. i9xx_crtc_disable(crtc);
  2899. }
  2900. static void i9xx_crtc_commit(struct drm_crtc *crtc)
  2901. {
  2902. i9xx_crtc_enable(crtc);
  2903. }
  2904. static void ironlake_crtc_prepare(struct drm_crtc *crtc)
  2905. {
  2906. ironlake_crtc_disable(crtc);
  2907. }
  2908. static void ironlake_crtc_commit(struct drm_crtc *crtc)
  2909. {
  2910. ironlake_crtc_enable(crtc);
  2911. }
  2912. void intel_encoder_prepare(struct drm_encoder *encoder)
  2913. {
  2914. struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
  2915. /* lvds has its own version of prepare see intel_lvds_prepare */
  2916. encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
  2917. }
  2918. void intel_encoder_commit(struct drm_encoder *encoder)
  2919. {
  2920. struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
  2921. struct drm_device *dev = encoder->dev;
  2922. struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  2923. struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
  2924. /* lvds has its own version of commit see intel_lvds_commit */
  2925. encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
  2926. if (HAS_PCH_CPT(dev))
  2927. intel_cpt_verify_modeset(dev, intel_crtc->pipe);
  2928. }
  2929. void intel_encoder_destroy(struct drm_encoder *encoder)
  2930. {
  2931. struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  2932. drm_encoder_cleanup(encoder);
  2933. kfree(intel_encoder);
  2934. }
  2935. static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
  2936. struct drm_display_mode *mode,
  2937. struct drm_display_mode *adjusted_mode)
  2938. {
  2939. struct drm_device *dev = crtc->dev;
  2940. if (HAS_PCH_SPLIT(dev)) {
  2941. /* FDI link clock is fixed at 2.7G */
  2942. if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
  2943. return false;
  2944. }
  2945. /* XXX some encoders set the crtcinfo, others don't.
  2946. * Obviously we need some form of conflict resolution here...
  2947. */
  2948. if (adjusted_mode->crtc_htotal == 0)
  2949. drm_mode_set_crtcinfo(adjusted_mode, 0);
  2950. return true;
  2951. }
  2952. static int i945_get_display_clock_speed(struct drm_device *dev)
  2953. {
  2954. return 400000;
  2955. }
  2956. static int i915_get_display_clock_speed(struct drm_device *dev)
  2957. {
  2958. return 333000;
  2959. }
  2960. static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
  2961. {
  2962. return 200000;
  2963. }
  2964. static int i915gm_get_display_clock_speed(struct drm_device *dev)
  2965. {
  2966. u16 gcfgc = 0;
  2967. pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
  2968. if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
  2969. return 133000;
  2970. else {
  2971. switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
  2972. case GC_DISPLAY_CLOCK_333_MHZ:
  2973. return 333000;
  2974. default:
  2975. case GC_DISPLAY_CLOCK_190_200_MHZ:
  2976. return 190000;
  2977. }
  2978. }
  2979. }
  2980. static int i865_get_display_clock_speed(struct drm_device *dev)
  2981. {
  2982. return 266000;
  2983. }
  2984. static int i855_get_display_clock_speed(struct drm_device *dev)
  2985. {
  2986. u16 hpllcc = 0;
  2987. /* Assume that the hardware is in the high speed state. This
  2988. * should be the default.
  2989. */
  2990. switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
  2991. case GC_CLOCK_133_200:
  2992. case GC_CLOCK_100_200:
  2993. return 200000;
  2994. case GC_CLOCK_166_250:
  2995. return 250000;
  2996. case GC_CLOCK_100_133:
  2997. return 133000;
  2998. }
  2999. /* Shouldn't happen */
  3000. return 0;
  3001. }
  3002. static int i830_get_display_clock_speed(struct drm_device *dev)
  3003. {
  3004. return 133000;
  3005. }
  3006. struct fdi_m_n {
  3007. u32 tu;
  3008. u32 gmch_m;
  3009. u32 gmch_n;
  3010. u32 link_m;
  3011. u32 link_n;
  3012. };
  3013. static void
  3014. fdi_reduce_ratio(u32 *num, u32 *den)
  3015. {
  3016. while (*num > 0xffffff || *den > 0xffffff) {
  3017. *num >>= 1;
  3018. *den >>= 1;
  3019. }
  3020. }
  3021. static void
  3022. ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
  3023. int link_clock, struct fdi_m_n *m_n)
  3024. {
  3025. m_n->tu = 64; /* default size */
  3026. /* BUG_ON(pixel_clock > INT_MAX / 36); */
  3027. m_n->gmch_m = bits_per_pixel * pixel_clock;
  3028. m_n->gmch_n = link_clock * nlanes * 8;
  3029. fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
  3030. m_n->link_m = pixel_clock;
  3031. m_n->link_n = link_clock;
  3032. fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
  3033. }
  3034. struct intel_watermark_params {
  3035. unsigned long fifo_size;
  3036. unsigned long max_wm;
  3037. unsigned long default_wm;
  3038. unsigned long guard_size;
  3039. unsigned long cacheline_size;
  3040. };
  3041. /* Pineview has different values for various configs */
  3042. static const struct intel_watermark_params pineview_display_wm = {
  3043. PINEVIEW_DISPLAY_FIFO,
  3044. PINEVIEW_MAX_WM,
  3045. PINEVIEW_DFT_WM,
  3046. PINEVIEW_GUARD_WM,
  3047. PINEVIEW_FIFO_LINE_SIZE
  3048. };
  3049. static const struct intel_watermark_params pineview_display_hplloff_wm = {
  3050. PINEVIEW_DISPLAY_FIFO,
  3051. PINEVIEW_MAX_WM,
  3052. PINEVIEW_DFT_HPLLOFF_WM,
  3053. PINEVIEW_GUARD_WM,
  3054. PINEVIEW_FIFO_LINE_SIZE
  3055. };
  3056. static const struct intel_watermark_params pineview_cursor_wm = {
  3057. PINEVIEW_CURSOR_FIFO,
  3058. PINEVIEW_CURSOR_MAX_WM,
  3059. PINEVIEW_CURSOR_DFT_WM,
  3060. PINEVIEW_CURSOR_GUARD_WM,
  3061. PINEVIEW_FIFO_LINE_SIZE,
  3062. };
  3063. static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
  3064. PINEVIEW_CURSOR_FIFO,
  3065. PINEVIEW_CURSOR_MAX_WM,
  3066. PINEVIEW_CURSOR_DFT_WM,
  3067. PINEVIEW_CURSOR_GUARD_WM,
  3068. PINEVIEW_FIFO_LINE_SIZE
  3069. };
  3070. static const struct intel_watermark_params g4x_wm_info = {
  3071. G4X_FIFO_SIZE,
  3072. G4X_MAX_WM,
  3073. G4X_MAX_WM,
  3074. 2,
  3075. G4X_FIFO_LINE_SIZE,
  3076. };
  3077. static const struct intel_watermark_params g4x_cursor_wm_info = {
  3078. I965_CURSOR_FIFO,
  3079. I965_CURSOR_MAX_WM,
  3080. I965_CURSOR_DFT_WM,
  3081. 2,
  3082. G4X_FIFO_LINE_SIZE,
  3083. };
  3084. static const struct intel_watermark_params i965_cursor_wm_info = {
  3085. I965_CURSOR_FIFO,
  3086. I965_CURSOR_MAX_WM,
  3087. I965_CURSOR_DFT_WM,
  3088. 2,
  3089. I915_FIFO_LINE_SIZE,
  3090. };
  3091. static const struct intel_watermark_params i945_wm_info = {
  3092. I945_FIFO_SIZE,
  3093. I915_MAX_WM,
  3094. 1,
  3095. 2,
  3096. I915_FIFO_LINE_SIZE
  3097. };
  3098. static const struct intel_watermark_params i915_wm_info = {
  3099. I915_FIFO_SIZE,
  3100. I915_MAX_WM,
  3101. 1,
  3102. 2,
  3103. I915_FIFO_LINE_SIZE
  3104. };
  3105. static const struct intel_watermark_params i855_wm_info = {
  3106. I855GM_FIFO_SIZE,
  3107. I915_MAX_WM,
  3108. 1,
  3109. 2,
  3110. I830_FIFO_LINE_SIZE
  3111. };
  3112. static const struct intel_watermark_params i830_wm_info = {
  3113. I830_FIFO_SIZE,
  3114. I915_MAX_WM,
  3115. 1,
  3116. 2,
  3117. I830_FIFO_LINE_SIZE
  3118. };
  3119. static const struct intel_watermark_params ironlake_display_wm_info = {
  3120. ILK_DISPLAY_FIFO,
  3121. ILK_DISPLAY_MAXWM,
  3122. ILK_DISPLAY_DFTWM,
  3123. 2,
  3124. ILK_FIFO_LINE_SIZE
  3125. };
  3126. static const struct intel_watermark_params ironlake_cursor_wm_info = {
  3127. ILK_CURSOR_FIFO,
  3128. ILK_CURSOR_MAXWM,
  3129. ILK_CURSOR_DFTWM,
  3130. 2,
  3131. ILK_FIFO_LINE_SIZE
  3132. };
  3133. static const struct intel_watermark_params ironlake_display_srwm_info = {
  3134. ILK_DISPLAY_SR_FIFO,
  3135. ILK_DISPLAY_MAX_SRWM,
  3136. ILK_DISPLAY_DFT_SRWM,
  3137. 2,
  3138. ILK_FIFO_LINE_SIZE
  3139. };
  3140. static const struct intel_watermark_params ironlake_cursor_srwm_info = {
  3141. ILK_CURSOR_SR_FIFO,
  3142. ILK_CURSOR_MAX_SRWM,
  3143. ILK_CURSOR_DFT_SRWM,
  3144. 2,
  3145. ILK_FIFO_LINE_SIZE
  3146. };
  3147. static const struct intel_watermark_params sandybridge_display_wm_info = {
  3148. SNB_DISPLAY_FIFO,
  3149. SNB_DISPLAY_MAXWM,
  3150. SNB_DISPLAY_DFTWM,
  3151. 2,
  3152. SNB_FIFO_LINE_SIZE
  3153. };
  3154. static const struct intel_watermark_params sandybridge_cursor_wm_info = {
  3155. SNB_CURSOR_FIFO,
  3156. SNB_CURSOR_MAXWM,
  3157. SNB_CURSOR_DFTWM,
  3158. 2,
  3159. SNB_FIFO_LINE_SIZE
  3160. };
  3161. static const struct intel_watermark_params sandybridge_display_srwm_info = {
  3162. SNB_DISPLAY_SR_FIFO,
  3163. SNB_DISPLAY_MAX_SRWM,
  3164. SNB_DISPLAY_DFT_SRWM,
  3165. 2,
  3166. SNB_FIFO_LINE_SIZE
  3167. };
  3168. static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
  3169. SNB_CURSOR_SR_FIFO,
  3170. SNB_CURSOR_MAX_SRWM,
  3171. SNB_CURSOR_DFT_SRWM,
  3172. 2,
  3173. SNB_FIFO_LINE_SIZE
  3174. };
  3175. /**
  3176. * intel_calculate_wm - calculate watermark level
  3177. * @clock_in_khz: pixel clock
  3178. * @wm: chip FIFO params
  3179. * @pixel_size: display pixel size
  3180. * @latency_ns: memory latency for the platform
  3181. *
  3182. * Calculate the watermark level (the level at which the display plane will
  3183. * start fetching from memory again). Each chip has a different display
  3184. * FIFO size and allocation, so the caller needs to figure that out and pass
  3185. * in the correct intel_watermark_params structure.
  3186. *
  3187. * As the pixel clock runs, the FIFO will be drained at a rate that depends
  3188. * on the pixel size. When it reaches the watermark level, it'll start
  3189. * fetching FIFO line sized based chunks from memory until the FIFO fills
  3190. * past the watermark point. If the FIFO drains completely, a FIFO underrun
  3191. * will occur, and a display engine hang could result.
  3192. */
  3193. static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
  3194. const struct intel_watermark_params *wm,
  3195. int fifo_size,
  3196. int pixel_size,
  3197. unsigned long latency_ns)
  3198. {
  3199. long entries_required, wm_size;
  3200. /*
  3201. * Note: we need to make sure we don't overflow for various clock &
  3202. * latency values.
  3203. * clocks go from a few thousand to several hundred thousand.
  3204. * latency is usually a few thousand
  3205. */
  3206. entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
  3207. 1000;
  3208. entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
  3209. DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
  3210. wm_size = fifo_size - (entries_required + wm->guard_size);
  3211. DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
  3212. /* Don't promote wm_size to unsigned... */
  3213. if (wm_size > (long)wm->max_wm)
  3214. wm_size = wm->max_wm;
  3215. if (wm_size <= 0)
  3216. wm_size = wm->default_wm;
  3217. return wm_size;
  3218. }
  3219. struct cxsr_latency {
  3220. int is_desktop;
  3221. int is_ddr3;
  3222. unsigned long fsb_freq;
  3223. unsigned long mem_freq;
  3224. unsigned long display_sr;
  3225. unsigned long display_hpll_disable;
  3226. unsigned long cursor_sr;
  3227. unsigned long cursor_hpll_disable;
  3228. };
  3229. static const struct cxsr_latency cxsr_latency_table[] = {
  3230. {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
  3231. {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
  3232. {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
  3233. {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
  3234. {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
  3235. {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
  3236. {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
  3237. {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
  3238. {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
  3239. {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
  3240. {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
  3241. {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
  3242. {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
  3243. {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
  3244. {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
  3245. {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
  3246. {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
  3247. {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
  3248. {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
  3249. {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
  3250. {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
  3251. {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
  3252. {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
  3253. {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
  3254. {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
  3255. {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
  3256. {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
  3257. {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
  3258. {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
  3259. {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
  3260. };
  3261. static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
  3262. int is_ddr3,
  3263. int fsb,
  3264. int mem)
  3265. {
  3266. const struct cxsr_latency *latency;
  3267. int i;
  3268. if (fsb == 0 || mem == 0)
  3269. return NULL;
  3270. for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
  3271. latency = &cxsr_latency_table[i];
  3272. if (is_desktop == latency->is_desktop &&
  3273. is_ddr3 == latency->is_ddr3 &&
  3274. fsb == latency->fsb_freq && mem == latency->mem_freq)
  3275. return latency;
  3276. }
  3277. DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
  3278. return NULL;
  3279. }
  3280. static void pineview_disable_cxsr(struct drm_device *dev)
  3281. {
  3282. struct drm_i915_private *dev_priv = dev->dev_private;
  3283. /* deactivate cxsr */
  3284. I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
  3285. }
  3286. /*
  3287. * Latency for FIFO fetches is dependent on several factors:
  3288. * - memory configuration (speed, channels)
  3289. * - chipset
  3290. * - current MCH state
  3291. * It can be fairly high in some situations, so here we assume a fairly
  3292. * pessimal value. It's a tradeoff between extra memory fetches (if we
  3293. * set this value too high, the FIFO will fetch frequently to stay full)
  3294. * and power consumption (set it too low to save power and we might see
  3295. * FIFO underruns and display "flicker").
  3296. *
  3297. * A value of 5us seems to be a good balance; safe for very low end
  3298. * platforms but not overly aggressive on lower latency configs.
  3299. */
  3300. static const int latency_ns = 5000;
  3301. static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
  3302. {
  3303. struct drm_i915_private *dev_priv = dev->dev_private;
  3304. uint32_t dsparb = I915_READ(DSPARB);
  3305. int size;
  3306. size = dsparb & 0x7f;
  3307. if (plane)
  3308. size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
  3309. DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
  3310. plane ? "B" : "A", size);
  3311. return size;
  3312. }
  3313. static int i85x_get_fifo_size(struct drm_device *dev, int plane)
  3314. {
  3315. struct drm_i915_private *dev_priv = dev->dev_private;
  3316. uint32_t dsparb = I915_READ(DSPARB);
  3317. int size;
  3318. size = dsparb & 0x1ff;
  3319. if (plane)
  3320. size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
  3321. size >>= 1; /* Convert to cachelines */
  3322. DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
  3323. plane ? "B" : "A", size);
  3324. return size;
  3325. }
  3326. static int i845_get_fifo_size(struct drm_device *dev, int plane)
  3327. {
  3328. struct drm_i915_private *dev_priv = dev->dev_private;
  3329. uint32_t dsparb = I915_READ(DSPARB);
  3330. int size;
  3331. size = dsparb & 0x7f;
  3332. size >>= 2; /* Convert to cachelines */
  3333. DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
  3334. plane ? "B" : "A",
  3335. size);
  3336. return size;
  3337. }
  3338. static int i830_get_fifo_size(struct drm_device *dev, int plane)
  3339. {
  3340. struct drm_i915_private *dev_priv = dev->dev_private;
  3341. uint32_t dsparb = I915_READ(DSPARB);
  3342. int size;
  3343. size = dsparb & 0x7f;
  3344. size >>= 1; /* Convert to cachelines */
  3345. DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
  3346. plane ? "B" : "A", size);
  3347. return size;
  3348. }
  3349. static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
  3350. {
  3351. struct drm_crtc *crtc, *enabled = NULL;
  3352. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  3353. if (crtc->enabled && crtc->fb) {
  3354. if (enabled)
  3355. return NULL;
  3356. enabled = crtc;
  3357. }
  3358. }
  3359. return enabled;
  3360. }
  3361. static void pineview_update_wm(struct drm_device *dev)
  3362. {
  3363. struct drm_i915_private *dev_priv = dev->dev_private;
  3364. struct drm_crtc *crtc;
  3365. const struct cxsr_latency *latency;
  3366. u32 reg;
  3367. unsigned long wm;
  3368. latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
  3369. dev_priv->fsb_freq, dev_priv->mem_freq);
  3370. if (!latency) {
  3371. DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
  3372. pineview_disable_cxsr(dev);
  3373. return;
  3374. }
  3375. crtc = single_enabled_crtc(dev);
  3376. if (crtc) {
  3377. int clock = crtc->mode.clock;
  3378. int pixel_size = crtc->fb->bits_per_pixel / 8;
  3379. /* Display SR */
  3380. wm = intel_calculate_wm(clock, &pineview_display_wm,
  3381. pineview_display_wm.fifo_size,
  3382. pixel_size, latency->display_sr);
  3383. reg = I915_READ(DSPFW1);
  3384. reg &= ~DSPFW_SR_MASK;
  3385. reg |= wm << DSPFW_SR_SHIFT;
  3386. I915_WRITE(DSPFW1, reg);
  3387. DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
  3388. /* cursor SR */
  3389. wm = intel_calculate_wm(clock, &pineview_cursor_wm,
  3390. pineview_display_wm.fifo_size,
  3391. pixel_size, latency->cursor_sr);
  3392. reg = I915_READ(DSPFW3);
  3393. reg &= ~DSPFW_CURSOR_SR_MASK;
  3394. reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
  3395. I915_WRITE(DSPFW3, reg);
  3396. /* Display HPLL off SR */
  3397. wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
  3398. pineview_display_hplloff_wm.fifo_size,
  3399. pixel_size, latency->display_hpll_disable);
  3400. reg = I915_READ(DSPFW3);
  3401. reg &= ~DSPFW_HPLL_SR_MASK;
  3402. reg |= wm & DSPFW_HPLL_SR_MASK;
  3403. I915_WRITE(DSPFW3, reg);
  3404. /* cursor HPLL off SR */
  3405. wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
  3406. pineview_display_hplloff_wm.fifo_size,
  3407. pixel_size, latency->cursor_hpll_disable);
  3408. reg = I915_READ(DSPFW3);
  3409. reg &= ~DSPFW_HPLL_CURSOR_MASK;
  3410. reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
  3411. I915_WRITE(DSPFW3, reg);
  3412. DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
  3413. /* activate cxsr */
  3414. I915_WRITE(DSPFW3,
  3415. I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
  3416. DRM_DEBUG_KMS("Self-refresh is enabled\n");
  3417. } else {
  3418. pineview_disable_cxsr(dev);
  3419. DRM_DEBUG_KMS("Self-refresh is disabled\n");
  3420. }
  3421. }
  3422. static bool g4x_compute_wm0(struct drm_device *dev,
  3423. int plane,
  3424. const struct intel_watermark_params *display,
  3425. int display_latency_ns,
  3426. const struct intel_watermark_params *cursor,
  3427. int cursor_latency_ns,
  3428. int *plane_wm,
  3429. int *cursor_wm)
  3430. {
  3431. struct drm_crtc *crtc;
  3432. int htotal, hdisplay, clock, pixel_size;
  3433. int line_time_us, line_count;
  3434. int entries, tlb_miss;
  3435. crtc = intel_get_crtc_for_plane(dev, plane);
  3436. if (crtc->fb == NULL || !crtc->enabled) {
  3437. *cursor_wm = cursor->guard_size;
  3438. *plane_wm = display->guard_size;
  3439. return false;
  3440. }
  3441. htotal = crtc->mode.htotal;
  3442. hdisplay = crtc->mode.hdisplay;
  3443. clock = crtc->mode.clock;
  3444. pixel_size = crtc->fb->bits_per_pixel / 8;
  3445. /* Use the small buffer method to calculate plane watermark */
  3446. entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
  3447. tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
  3448. if (tlb_miss > 0)
  3449. entries += tlb_miss;
  3450. entries = DIV_ROUND_UP(entries, display->cacheline_size);
  3451. *plane_wm = entries + display->guard_size;
  3452. if (*plane_wm > (int)display->max_wm)
  3453. *plane_wm = display->max_wm;
  3454. /* Use the large buffer method to calculate cursor watermark */
  3455. line_time_us = ((htotal * 1000) / clock);
  3456. line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
  3457. entries = line_count * 64 * pixel_size;
  3458. tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
  3459. if (tlb_miss > 0)
  3460. entries += tlb_miss;
  3461. entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
  3462. *cursor_wm = entries + cursor->guard_size;
  3463. if (*cursor_wm > (int)cursor->max_wm)
  3464. *cursor_wm = (int)cursor->max_wm;
  3465. return true;
  3466. }
  3467. /*
  3468. * Check the wm result.
  3469. *
  3470. * If any calculated watermark values is larger than the maximum value that
  3471. * can be programmed into the associated watermark register, that watermark
  3472. * must be disabled.
  3473. */
  3474. static bool g4x_check_srwm(struct drm_device *dev,
  3475. int display_wm, int cursor_wm,
  3476. const struct intel_watermark_params *display,
  3477. const struct intel_watermark_params *cursor)
  3478. {
  3479. DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
  3480. display_wm, cursor_wm);
  3481. if (display_wm > display->max_wm) {
  3482. DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
  3483. display_wm, display->max_wm);
  3484. return false;
  3485. }
  3486. if (cursor_wm > cursor->max_wm) {
  3487. DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
  3488. cursor_wm, cursor->max_wm);
  3489. return false;
  3490. }
  3491. if (!(display_wm || cursor_wm)) {
  3492. DRM_DEBUG_KMS("SR latency is 0, disabling\n");
  3493. return false;
  3494. }
  3495. return true;
  3496. }
  3497. static bool g4x_compute_srwm(struct drm_device *dev,
  3498. int plane,
  3499. int latency_ns,
  3500. const struct intel_watermark_params *display,
  3501. const struct intel_watermark_params *cursor,
  3502. int *display_wm, int *cursor_wm)
  3503. {
  3504. struct drm_crtc *crtc;
  3505. int hdisplay, htotal, pixel_size, clock;
  3506. unsigned long line_time_us;
  3507. int line_count, line_size;
  3508. int small, large;
  3509. int entries;
  3510. if (!latency_ns) {
  3511. *display_wm = *cursor_wm = 0;
  3512. return false;
  3513. }
  3514. crtc = intel_get_crtc_for_plane(dev, plane);
  3515. hdisplay = crtc->mode.hdisplay;
  3516. htotal = crtc->mode.htotal;
  3517. clock = crtc->mode.clock;
  3518. pixel_size = crtc->fb->bits_per_pixel / 8;
  3519. line_time_us = (htotal * 1000) / clock;
  3520. line_count = (latency_ns / line_time_us + 1000) / 1000;
  3521. line_size = hdisplay * pixel_size;
  3522. /* Use the minimum of the small and large buffer method for primary */
  3523. small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
  3524. large = line_count * line_size;
  3525. entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
  3526. *display_wm = entries + display->guard_size;
  3527. /* calculate the self-refresh watermark for display cursor */
  3528. entries = line_count * pixel_size * 64;
  3529. entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
  3530. *cursor_wm = entries + cursor->guard_size;
  3531. return g4x_check_srwm(dev,
  3532. *display_wm, *cursor_wm,
  3533. display, cursor);
  3534. }
  3535. #define single_plane_enabled(mask) is_power_of_2(mask)
  3536. static void g4x_update_wm(struct drm_device *dev)
  3537. {
  3538. static const int sr_latency_ns = 12000;
  3539. struct drm_i915_private *dev_priv = dev->dev_private;
  3540. int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
  3541. int plane_sr, cursor_sr;
  3542. unsigned int enabled = 0;
  3543. if (g4x_compute_wm0(dev, 0,
  3544. &g4x_wm_info, latency_ns,
  3545. &g4x_cursor_wm_info, latency_ns,
  3546. &planea_wm, &cursora_wm))
  3547. enabled |= 1;
  3548. if (g4x_compute_wm0(dev, 1,
  3549. &g4x_wm_info, latency_ns,
  3550. &g4x_cursor_wm_info, latency_ns,
  3551. &planeb_wm, &cursorb_wm))
  3552. enabled |= 2;
  3553. plane_sr = cursor_sr = 0;
  3554. if (single_plane_enabled(enabled) &&
  3555. g4x_compute_srwm(dev, ffs(enabled) - 1,
  3556. sr_latency_ns,
  3557. &g4x_wm_info,
  3558. &g4x_cursor_wm_info,
  3559. &plane_sr, &cursor_sr))
  3560. I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
  3561. else
  3562. I915_WRITE(FW_BLC_SELF,
  3563. I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
  3564. DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
  3565. planea_wm, cursora_wm,
  3566. planeb_wm, cursorb_wm,
  3567. plane_sr, cursor_sr);
  3568. I915_WRITE(DSPFW1,
  3569. (plane_sr << DSPFW_SR_SHIFT) |
  3570. (cursorb_wm << DSPFW_CURSORB_SHIFT) |
  3571. (planeb_wm << DSPFW_PLANEB_SHIFT) |
  3572. planea_wm);
  3573. I915_WRITE(DSPFW2,
  3574. (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
  3575. (cursora_wm << DSPFW_CURSORA_SHIFT));
  3576. /* HPLL off in SR has some issues on G4x... disable it */
  3577. I915_WRITE(DSPFW3,
  3578. (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
  3579. (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
  3580. }
  3581. static void i965_update_wm(struct drm_device *dev)
  3582. {
  3583. struct drm_i915_private *dev_priv = dev->dev_private;
  3584. struct drm_crtc *crtc;
  3585. int srwm = 1;
  3586. int cursor_sr = 16;
  3587. /* Calc sr entries for one plane configs */
  3588. crtc = single_enabled_crtc(dev);
  3589. if (crtc) {
  3590. /* self-refresh has much higher latency */
  3591. static const int sr_latency_ns = 12000;
  3592. int clock = crtc->mode.clock;
  3593. int htotal = crtc->mode.htotal;
  3594. int hdisplay = crtc->mode.hdisplay;
  3595. int pixel_size = crtc->fb->bits_per_pixel / 8;
  3596. unsigned long line_time_us;
  3597. int entries;
  3598. line_time_us = ((htotal * 1000) / clock);
  3599. /* Use ns/us then divide to preserve precision */
  3600. entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
  3601. pixel_size * hdisplay;
  3602. entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
  3603. srwm = I965_FIFO_SIZE - entries;
  3604. if (srwm < 0)
  3605. srwm = 1;
  3606. srwm &= 0x1ff;
  3607. DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
  3608. entries, srwm);
  3609. entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
  3610. pixel_size * 64;
  3611. entries = DIV_ROUND_UP(entries,
  3612. i965_cursor_wm_info.cacheline_size);
  3613. cursor_sr = i965_cursor_wm_info.fifo_size -
  3614. (entries + i965_cursor_wm_info.guard_size);
  3615. if (cursor_sr > i965_cursor_wm_info.max_wm)
  3616. cursor_sr = i965_cursor_wm_info.max_wm;
  3617. DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
  3618. "cursor %d\n", srwm, cursor_sr);
  3619. if (IS_CRESTLINE(dev))
  3620. I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
  3621. } else {
  3622. /* Turn off self refresh if both pipes are enabled */
  3623. if (IS_CRESTLINE(dev))
  3624. I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
  3625. & ~FW_BLC_SELF_EN);
  3626. }
  3627. DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
  3628. srwm);
  3629. /* 965 has limitations... */
  3630. I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
  3631. (8 << 16) | (8 << 8) | (8 << 0));
  3632. I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
  3633. /* update cursor SR watermark */
  3634. I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
  3635. }
  3636. static void i9xx_update_wm(struct drm_device *dev)
  3637. {
  3638. struct drm_i915_private *dev_priv = dev->dev_private;
  3639. const struct intel_watermark_params *wm_info;
  3640. uint32_t fwater_lo;
  3641. uint32_t fwater_hi;
  3642. int cwm, srwm = 1;
  3643. int fifo_size;
  3644. int planea_wm, planeb_wm;
  3645. struct drm_crtc *crtc, *enabled = NULL;
  3646. if (IS_I945GM(dev))
  3647. wm_info = &i945_wm_info;
  3648. else if (!IS_GEN2(dev))
  3649. wm_info = &i915_wm_info;
  3650. else
  3651. wm_info = &i855_wm_info;
  3652. fifo_size = dev_priv->display.get_fifo_size(dev, 0);
  3653. crtc = intel_get_crtc_for_plane(dev, 0);
  3654. if (crtc->enabled && crtc->fb) {
  3655. planea_wm = intel_calculate_wm(crtc->mode.clock,
  3656. wm_info, fifo_size,
  3657. crtc->fb->bits_per_pixel / 8,
  3658. latency_ns);
  3659. enabled = crtc;
  3660. } else
  3661. planea_wm = fifo_size - wm_info->guard_size;
  3662. fifo_size = dev_priv->display.get_fifo_size(dev, 1);
  3663. crtc = intel_get_crtc_for_plane(dev, 1);
  3664. if (crtc->enabled && crtc->fb) {
  3665. planeb_wm = intel_calculate_wm(crtc->mode.clock,
  3666. wm_info, fifo_size,
  3667. crtc->fb->bits_per_pixel / 8,
  3668. latency_ns);
  3669. if (enabled == NULL)
  3670. enabled = crtc;
  3671. else
  3672. enabled = NULL;
  3673. } else
  3674. planeb_wm = fifo_size - wm_info->guard_size;
  3675. DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
  3676. /*
  3677. * Overlay gets an aggressive default since video jitter is bad.
  3678. */
  3679. cwm = 2;
  3680. /* Play safe and disable self-refresh before adjusting watermarks. */
  3681. if (IS_I945G(dev) || IS_I945GM(dev))
  3682. I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
  3683. else if (IS_I915GM(dev))
  3684. I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
  3685. /* Calc sr entries for one plane configs */
  3686. if (HAS_FW_BLC(dev) && enabled) {
  3687. /* self-refresh has much higher latency */
  3688. static const int sr_latency_ns = 6000;
  3689. int clock = enabled->mode.clock;
  3690. int htotal = enabled->mode.htotal;
  3691. int hdisplay = enabled->mode.hdisplay;
  3692. int pixel_size = enabled->fb->bits_per_pixel / 8;
  3693. unsigned long line_time_us;
  3694. int entries;
  3695. line_time_us = (htotal * 1000) / clock;
  3696. /* Use ns/us then divide to preserve precision */
  3697. entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
  3698. pixel_size * hdisplay;
  3699. entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
  3700. DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
  3701. srwm = wm_info->fifo_size - entries;
  3702. if (srwm < 0)
  3703. srwm = 1;
  3704. if (IS_I945G(dev) || IS_I945GM(dev))
  3705. I915_WRITE(FW_BLC_SELF,
  3706. FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
  3707. else if (IS_I915GM(dev))
  3708. I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
  3709. }
  3710. DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
  3711. planea_wm, planeb_wm, cwm, srwm);
  3712. fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
  3713. fwater_hi = (cwm & 0x1f);
  3714. /* Set request length to 8 cachelines per fetch */
  3715. fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
  3716. fwater_hi = fwater_hi | (1 << 8);
  3717. I915_WRITE(FW_BLC, fwater_lo);
  3718. I915_WRITE(FW_BLC2, fwater_hi);
  3719. if (HAS_FW_BLC(dev)) {
  3720. if (enabled) {
  3721. if (IS_I945G(dev) || IS_I945GM(dev))
  3722. I915_WRITE(FW_BLC_SELF,
  3723. FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
  3724. else if (IS_I915GM(dev))
  3725. I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
  3726. DRM_DEBUG_KMS("memory self refresh enabled\n");
  3727. } else
  3728. DRM_DEBUG_KMS("memory self refresh disabled\n");
  3729. }
  3730. }
  3731. static void i830_update_wm(struct drm_device *dev)
  3732. {
  3733. struct drm_i915_private *dev_priv = dev->dev_private;
  3734. struct drm_crtc *crtc;
  3735. uint32_t fwater_lo;
  3736. int planea_wm;
  3737. crtc = single_enabled_crtc(dev);
  3738. if (crtc == NULL)
  3739. return;
  3740. planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
  3741. dev_priv->display.get_fifo_size(dev, 0),
  3742. crtc->fb->bits_per_pixel / 8,
  3743. latency_ns);
  3744. fwater_lo = I915_READ(FW_BLC) & ~0xfff;
  3745. fwater_lo |= (3<<8) | planea_wm;
  3746. DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
  3747. I915_WRITE(FW_BLC, fwater_lo);
  3748. }
  3749. #define ILK_LP0_PLANE_LATENCY 700
  3750. #define ILK_LP0_CURSOR_LATENCY 1300
  3751. /*
  3752. * Check the wm result.
  3753. *
  3754. * If any calculated watermark values is larger than the maximum value that
  3755. * can be programmed into the associated watermark register, that watermark
  3756. * must be disabled.
  3757. */
  3758. static bool ironlake_check_srwm(struct drm_device *dev, int level,
  3759. int fbc_wm, int display_wm, int cursor_wm,
  3760. const struct intel_watermark_params *display,
  3761. const struct intel_watermark_params *cursor)
  3762. {
  3763. struct drm_i915_private *dev_priv = dev->dev_private;
  3764. DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
  3765. " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
  3766. if (fbc_wm > SNB_FBC_MAX_SRWM) {
  3767. DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
  3768. fbc_wm, SNB_FBC_MAX_SRWM, level);
  3769. /* fbc has it's own way to disable FBC WM */
  3770. I915_WRITE(DISP_ARB_CTL,
  3771. I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
  3772. return false;
  3773. }
  3774. if (display_wm > display->max_wm) {
  3775. DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
  3776. display_wm, SNB_DISPLAY_MAX_SRWM, level);
  3777. return false;
  3778. }
  3779. if (cursor_wm > cursor->max_wm) {
  3780. DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
  3781. cursor_wm, SNB_CURSOR_MAX_SRWM, level);
  3782. return false;
  3783. }
  3784. if (!(fbc_wm || display_wm || cursor_wm)) {
  3785. DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
  3786. return false;
  3787. }
  3788. return true;
  3789. }
  3790. /*
  3791. * Compute watermark values of WM[1-3],
  3792. */
  3793. static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
  3794. int latency_ns,
  3795. const struct intel_watermark_params *display,
  3796. const struct intel_watermark_params *cursor,
  3797. int *fbc_wm, int *display_wm, int *cursor_wm)
  3798. {
  3799. struct drm_crtc *crtc;
  3800. unsigned long line_time_us;
  3801. int hdisplay, htotal, pixel_size, clock;
  3802. int line_count, line_size;
  3803. int small, large;
  3804. int entries;
  3805. if (!latency_ns) {
  3806. *fbc_wm = *display_wm = *cursor_wm = 0;
  3807. return false;
  3808. }
  3809. crtc = intel_get_crtc_for_plane(dev, plane);
  3810. hdisplay = crtc->mode.hdisplay;
  3811. htotal = crtc->mode.htotal;
  3812. clock = crtc->mode.clock;
  3813. pixel_size = crtc->fb->bits_per_pixel / 8;
  3814. line_time_us = (htotal * 1000) / clock;
  3815. line_count = (latency_ns / line_time_us + 1000) / 1000;
  3816. line_size = hdisplay * pixel_size;
  3817. /* Use the minimum of the small and large buffer method for primary */
  3818. small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
  3819. large = line_count * line_size;
  3820. entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
  3821. *display_wm = entries + display->guard_size;
  3822. /*
  3823. * Spec says:
  3824. * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
  3825. */
  3826. *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
  3827. /* calculate the self-refresh watermark for display cursor */
  3828. entries = line_count * pixel_size * 64;
  3829. entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
  3830. *cursor_wm = entries + cursor->guard_size;
  3831. return ironlake_check_srwm(dev, level,
  3832. *fbc_wm, *display_wm, *cursor_wm,
  3833. display, cursor);
  3834. }
  3835. static void ironlake_update_wm(struct drm_device *dev)
  3836. {
  3837. struct drm_i915_private *dev_priv = dev->dev_private;
  3838. int fbc_wm, plane_wm, cursor_wm;
  3839. unsigned int enabled;
  3840. enabled = 0;
  3841. if (g4x_compute_wm0(dev, 0,
  3842. &ironlake_display_wm_info,
  3843. ILK_LP0_PLANE_LATENCY,
  3844. &ironlake_cursor_wm_info,
  3845. ILK_LP0_CURSOR_LATENCY,
  3846. &plane_wm, &cursor_wm)) {
  3847. I915_WRITE(WM0_PIPEA_ILK,
  3848. (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
  3849. DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
  3850. " plane %d, " "cursor: %d\n",
  3851. plane_wm, cursor_wm);
  3852. enabled |= 1;
  3853. }
  3854. if (g4x_compute_wm0(dev, 1,
  3855. &ironlake_display_wm_info,
  3856. ILK_LP0_PLANE_LATENCY,
  3857. &ironlake_cursor_wm_info,
  3858. ILK_LP0_CURSOR_LATENCY,
  3859. &plane_wm, &cursor_wm)) {
  3860. I915_WRITE(WM0_PIPEB_ILK,
  3861. (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
  3862. DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
  3863. " plane %d, cursor: %d\n",
  3864. plane_wm, cursor_wm);
  3865. enabled |= 2;
  3866. }
  3867. /*
  3868. * Calculate and update the self-refresh watermark only when one
  3869. * display plane is used.
  3870. */
  3871. I915_WRITE(WM3_LP_ILK, 0);
  3872. I915_WRITE(WM2_LP_ILK, 0);
  3873. I915_WRITE(WM1_LP_ILK, 0);
  3874. if (!single_plane_enabled(enabled))
  3875. return;
  3876. enabled = ffs(enabled) - 1;
  3877. /* WM1 */
  3878. if (!ironlake_compute_srwm(dev, 1, enabled,
  3879. ILK_READ_WM1_LATENCY() * 500,
  3880. &ironlake_display_srwm_info,
  3881. &ironlake_cursor_srwm_info,
  3882. &fbc_wm, &plane_wm, &cursor_wm))
  3883. return;
  3884. I915_WRITE(WM1_LP_ILK,
  3885. WM1_LP_SR_EN |
  3886. (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
  3887. (fbc_wm << WM1_LP_FBC_SHIFT) |
  3888. (plane_wm << WM1_LP_SR_SHIFT) |
  3889. cursor_wm);
  3890. /* WM2 */
  3891. if (!ironlake_compute_srwm(dev, 2, enabled,
  3892. ILK_READ_WM2_LATENCY() * 500,
  3893. &ironlake_display_srwm_info,
  3894. &ironlake_cursor_srwm_info,
  3895. &fbc_wm, &plane_wm, &cursor_wm))
  3896. return;
  3897. I915_WRITE(WM2_LP_ILK,
  3898. WM2_LP_EN |
  3899. (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
  3900. (fbc_wm << WM1_LP_FBC_SHIFT) |
  3901. (plane_wm << WM1_LP_SR_SHIFT) |
  3902. cursor_wm);
  3903. /*
  3904. * WM3 is unsupported on ILK, probably because we don't have latency
  3905. * data for that power state
  3906. */
  3907. }
  3908. void sandybridge_update_wm(struct drm_device *dev)
  3909. {
  3910. struct drm_i915_private *dev_priv = dev->dev_private;
  3911. int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
  3912. int fbc_wm, plane_wm, cursor_wm;
  3913. unsigned int enabled;
  3914. enabled = 0;
  3915. if (g4x_compute_wm0(dev, 0,
  3916. &sandybridge_display_wm_info, latency,
  3917. &sandybridge_cursor_wm_info, latency,
  3918. &plane_wm, &cursor_wm)) {
  3919. I915_WRITE(WM0_PIPEA_ILK,
  3920. (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
  3921. DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
  3922. " plane %d, " "cursor: %d\n",
  3923. plane_wm, cursor_wm);
  3924. enabled |= 1;
  3925. }
  3926. if (g4x_compute_wm0(dev, 1,
  3927. &sandybridge_display_wm_info, latency,
  3928. &sandybridge_cursor_wm_info, latency,
  3929. &plane_wm, &cursor_wm)) {
  3930. I915_WRITE(WM0_PIPEB_ILK,
  3931. (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
  3932. DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
  3933. " plane %d, cursor: %d\n",
  3934. plane_wm, cursor_wm);
  3935. enabled |= 2;
  3936. }
  3937. /* IVB has 3 pipes */
  3938. if (IS_IVYBRIDGE(dev) &&
  3939. g4x_compute_wm0(dev, 2,
  3940. &sandybridge_display_wm_info, latency,
  3941. &sandybridge_cursor_wm_info, latency,
  3942. &plane_wm, &cursor_wm)) {
  3943. I915_WRITE(WM0_PIPEC_IVB,
  3944. (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
  3945. DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
  3946. " plane %d, cursor: %d\n",
  3947. plane_wm, cursor_wm);
  3948. enabled |= 3;
  3949. }
  3950. /*
  3951. * Calculate and update the self-refresh watermark only when one
  3952. * display plane is used.
  3953. *
  3954. * SNB support 3 levels of watermark.
  3955. *
  3956. * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
  3957. * and disabled in the descending order
  3958. *
  3959. */
  3960. I915_WRITE(WM3_LP_ILK, 0);
  3961. I915_WRITE(WM2_LP_ILK, 0);
  3962. I915_WRITE(WM1_LP_ILK, 0);
  3963. if (!single_plane_enabled(enabled) ||
  3964. dev_priv->sprite_scaling_enabled)
  3965. return;
  3966. enabled = ffs(enabled) - 1;
  3967. /* WM1 */
  3968. if (!ironlake_compute_srwm(dev, 1, enabled,
  3969. SNB_READ_WM1_LATENCY() * 500,
  3970. &sandybridge_display_srwm_info,
  3971. &sandybridge_cursor_srwm_info,
  3972. &fbc_wm, &plane_wm, &cursor_wm))
  3973. return;
  3974. I915_WRITE(WM1_LP_ILK,
  3975. WM1_LP_SR_EN |
  3976. (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
  3977. (fbc_wm << WM1_LP_FBC_SHIFT) |
  3978. (plane_wm << WM1_LP_SR_SHIFT) |
  3979. cursor_wm);
  3980. /* WM2 */
  3981. if (!ironlake_compute_srwm(dev, 2, enabled,
  3982. SNB_READ_WM2_LATENCY() * 500,
  3983. &sandybridge_display_srwm_info,
  3984. &sandybridge_cursor_srwm_info,
  3985. &fbc_wm, &plane_wm, &cursor_wm))
  3986. return;
  3987. I915_WRITE(WM2_LP_ILK,
  3988. WM2_LP_EN |
  3989. (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
  3990. (fbc_wm << WM1_LP_FBC_SHIFT) |
  3991. (plane_wm << WM1_LP_SR_SHIFT) |
  3992. cursor_wm);
  3993. /* WM3 */
  3994. if (!ironlake_compute_srwm(dev, 3, enabled,
  3995. SNB_READ_WM3_LATENCY() * 500,
  3996. &sandybridge_display_srwm_info,
  3997. &sandybridge_cursor_srwm_info,
  3998. &fbc_wm, &plane_wm, &cursor_wm))
  3999. return;
  4000. I915_WRITE(WM3_LP_ILK,
  4001. WM3_LP_EN |
  4002. (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
  4003. (fbc_wm << WM1_LP_FBC_SHIFT) |
  4004. (plane_wm << WM1_LP_SR_SHIFT) |
  4005. cursor_wm);
  4006. }
  4007. static bool
  4008. sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
  4009. uint32_t sprite_width, int pixel_size,
  4010. const struct intel_watermark_params *display,
  4011. int display_latency_ns, int *sprite_wm)
  4012. {
  4013. struct drm_crtc *crtc;
  4014. int clock;
  4015. int entries, tlb_miss;
  4016. crtc = intel_get_crtc_for_plane(dev, plane);
  4017. if (crtc->fb == NULL || !crtc->enabled) {
  4018. *sprite_wm = display->guard_size;
  4019. return false;
  4020. }
  4021. clock = crtc->mode.clock;
  4022. /* Use the small buffer method to calculate the sprite watermark */
  4023. entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
  4024. tlb_miss = display->fifo_size*display->cacheline_size -
  4025. sprite_width * 8;
  4026. if (tlb_miss > 0)
  4027. entries += tlb_miss;
  4028. entries = DIV_ROUND_UP(entries, display->cacheline_size);
  4029. *sprite_wm = entries + display->guard_size;
  4030. if (*sprite_wm > (int)display->max_wm)
  4031. *sprite_wm = display->max_wm;
  4032. return true;
  4033. }
  4034. static bool
  4035. sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
  4036. uint32_t sprite_width, int pixel_size,
  4037. const struct intel_watermark_params *display,
  4038. int latency_ns, int *sprite_wm)
  4039. {
  4040. struct drm_crtc *crtc;
  4041. unsigned long line_time_us;
  4042. int clock;
  4043. int line_count, line_size;
  4044. int small, large;
  4045. int entries;
  4046. if (!latency_ns) {
  4047. *sprite_wm = 0;
  4048. return false;
  4049. }
  4050. crtc = intel_get_crtc_for_plane(dev, plane);
  4051. clock = crtc->mode.clock;
  4052. line_time_us = (sprite_width * 1000) / clock;
  4053. line_count = (latency_ns / line_time_us + 1000) / 1000;
  4054. line_size = sprite_width * pixel_size;
  4055. /* Use the minimum of the small and large buffer method for primary */
  4056. small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
  4057. large = line_count * line_size;
  4058. entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
  4059. *sprite_wm = entries + display->guard_size;
  4060. return *sprite_wm > 0x3ff ? false : true;
  4061. }
  4062. static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
  4063. uint32_t sprite_width, int pixel_size)
  4064. {
  4065. struct drm_i915_private *dev_priv = dev->dev_private;
  4066. int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
  4067. int sprite_wm, reg;
  4068. int ret;
  4069. switch (pipe) {
  4070. case 0:
  4071. reg = WM0_PIPEA_ILK;
  4072. break;
  4073. case 1:
  4074. reg = WM0_PIPEB_ILK;
  4075. break;
  4076. case 2:
  4077. reg = WM0_PIPEC_IVB;
  4078. break;
  4079. default:
  4080. return; /* bad pipe */
  4081. }
  4082. ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
  4083. &sandybridge_display_wm_info,
  4084. latency, &sprite_wm);
  4085. if (!ret) {
  4086. DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
  4087. pipe);
  4088. return;
  4089. }
  4090. I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
  4091. DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
  4092. ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
  4093. pixel_size,
  4094. &sandybridge_display_srwm_info,
  4095. SNB_READ_WM1_LATENCY() * 500,
  4096. &sprite_wm);
  4097. if (!ret) {
  4098. DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
  4099. pipe);
  4100. return;
  4101. }
  4102. I915_WRITE(WM1S_LP_ILK, sprite_wm);
  4103. /* Only IVB has two more LP watermarks for sprite */
  4104. if (!IS_IVYBRIDGE(dev))
  4105. return;
  4106. ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
  4107. pixel_size,
  4108. &sandybridge_display_srwm_info,
  4109. SNB_READ_WM2_LATENCY() * 500,
  4110. &sprite_wm);
  4111. if (!ret) {
  4112. DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
  4113. pipe);
  4114. return;
  4115. }
  4116. I915_WRITE(WM2S_LP_IVB, sprite_wm);
  4117. ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
  4118. pixel_size,
  4119. &sandybridge_display_srwm_info,
  4120. SNB_READ_WM3_LATENCY() * 500,
  4121. &sprite_wm);
  4122. if (!ret) {
  4123. DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
  4124. pipe);
  4125. return;
  4126. }
  4127. I915_WRITE(WM3S_LP_IVB, sprite_wm);
  4128. }
  4129. /**
  4130. * intel_update_watermarks - update FIFO watermark values based on current modes
  4131. *
  4132. * Calculate watermark values for the various WM regs based on current mode
  4133. * and plane configuration.
  4134. *
  4135. * There are several cases to deal with here:
  4136. * - normal (i.e. non-self-refresh)
  4137. * - self-refresh (SR) mode
  4138. * - lines are large relative to FIFO size (buffer can hold up to 2)
  4139. * - lines are small relative to FIFO size (buffer can hold more than 2
  4140. * lines), so need to account for TLB latency
  4141. *
  4142. * The normal calculation is:
  4143. * watermark = dotclock * bytes per pixel * latency
  4144. * where latency is platform & configuration dependent (we assume pessimal
  4145. * values here).
  4146. *
  4147. * The SR calculation is:
  4148. * watermark = (trunc(latency/line time)+1) * surface width *
  4149. * bytes per pixel
  4150. * where
  4151. * line time = htotal / dotclock
  4152. * surface width = hdisplay for normal plane and 64 for cursor
  4153. * and latency is assumed to be high, as above.
  4154. *
  4155. * The final value programmed to the register should always be rounded up,
  4156. * and include an extra 2 entries to account for clock crossings.
  4157. *
  4158. * We don't use the sprite, so we can ignore that. And on Crestline we have
  4159. * to set the non-SR watermarks to 8.
  4160. */
  4161. static void intel_update_watermarks(struct drm_device *dev)
  4162. {
  4163. struct drm_i915_private *dev_priv = dev->dev_private;
  4164. if (dev_priv->display.update_wm)
  4165. dev_priv->display.update_wm(dev);
  4166. }
  4167. void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
  4168. uint32_t sprite_width, int pixel_size)
  4169. {
  4170. struct drm_i915_private *dev_priv = dev->dev_private;
  4171. if (dev_priv->display.update_sprite_wm)
  4172. dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
  4173. pixel_size);
  4174. }
  4175. static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  4176. {
  4177. if (i915_panel_use_ssc >= 0)
  4178. return i915_panel_use_ssc != 0;
  4179. return dev_priv->lvds_use_ssc
  4180. && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
  4181. }
  4182. /**
  4183. * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
  4184. * @crtc: CRTC structure
  4185. * @mode: requested mode
  4186. *
  4187. * A pipe may be connected to one or more outputs. Based on the depth of the
  4188. * attached framebuffer, choose a good color depth to use on the pipe.
  4189. *
  4190. * If possible, match the pipe depth to the fb depth. In some cases, this
  4191. * isn't ideal, because the connected output supports a lesser or restricted
  4192. * set of depths. Resolve that here:
  4193. * LVDS typically supports only 6bpc, so clamp down in that case
  4194. * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
  4195. * Displays may support a restricted set as well, check EDID and clamp as
  4196. * appropriate.
  4197. * DP may want to dither down to 6bpc to fit larger modes
  4198. *
  4199. * RETURNS:
  4200. * Dithering requirement (i.e. false if display bpc and pipe bpc match,
  4201. * true if they don't match).
  4202. */
  4203. static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
  4204. unsigned int *pipe_bpp,
  4205. struct drm_display_mode *mode)
  4206. {
  4207. struct drm_device *dev = crtc->dev;
  4208. struct drm_i915_private *dev_priv = dev->dev_private;
  4209. struct drm_encoder *encoder;
  4210. struct drm_connector *connector;
  4211. unsigned int display_bpc = UINT_MAX, bpc;
  4212. /* Walk the encoders & connectors on this crtc, get min bpc */
  4213. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  4214. struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  4215. if (encoder->crtc != crtc)
  4216. continue;
  4217. if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
  4218. unsigned int lvds_bpc;
  4219. if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
  4220. LVDS_A3_POWER_UP)
  4221. lvds_bpc = 8;
  4222. else
  4223. lvds_bpc = 6;
  4224. if (lvds_bpc < display_bpc) {
  4225. DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
  4226. display_bpc = lvds_bpc;
  4227. }
  4228. continue;
  4229. }
  4230. if (intel_encoder->type == INTEL_OUTPUT_EDP) {
  4231. /* Use VBT settings if we have an eDP panel */
  4232. unsigned int edp_bpc = dev_priv->edp.bpp / 3;
  4233. if (edp_bpc < display_bpc) {
  4234. DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
  4235. display_bpc = edp_bpc;
  4236. }
  4237. continue;
  4238. }
  4239. /* Not one of the known troublemakers, check the EDID */
  4240. list_for_each_entry(connector, &dev->mode_config.connector_list,
  4241. head) {
  4242. if (connector->encoder != encoder)
  4243. continue;
  4244. /* Don't use an invalid EDID bpc value */
  4245. if (connector->display_info.bpc &&
  4246. connector->display_info.bpc < display_bpc) {
  4247. DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
  4248. display_bpc = connector->display_info.bpc;
  4249. }
  4250. }
  4251. /*
  4252. * HDMI is either 12 or 8, so if the display lets 10bpc sneak
  4253. * through, clamp it down. (Note: >12bpc will be caught below.)
  4254. */
  4255. if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
  4256. if (display_bpc > 8 && display_bpc < 12) {
  4257. DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
  4258. display_bpc = 12;
  4259. } else {
  4260. DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
  4261. display_bpc = 8;
  4262. }
  4263. }
  4264. }
  4265. if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
  4266. DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
  4267. display_bpc = 6;
  4268. }
  4269. /*
  4270. * We could just drive the pipe at the highest bpc all the time and
  4271. * enable dithering as needed, but that costs bandwidth. So choose
  4272. * the minimum value that expresses the full color range of the fb but
  4273. * also stays within the max display bpc discovered above.
  4274. */
  4275. switch (crtc->fb->depth) {
  4276. case 8:
  4277. bpc = 8; /* since we go through a colormap */
  4278. break;
  4279. case 15:
  4280. case 16:
  4281. bpc = 6; /* min is 18bpp */
  4282. break;
  4283. case 24:
  4284. bpc = 8;
  4285. break;
  4286. case 30:
  4287. bpc = 10;
  4288. break;
  4289. case 48:
  4290. bpc = 12;
  4291. break;
  4292. default:
  4293. DRM_DEBUG("unsupported depth, assuming 24 bits\n");
  4294. bpc = min((unsigned int)8, display_bpc);
  4295. break;
  4296. }
  4297. display_bpc = min(display_bpc, bpc);
  4298. DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
  4299. bpc, display_bpc);
  4300. *pipe_bpp = display_bpc * 3;
  4301. return display_bpc != bpc;
  4302. }
  4303. static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
  4304. {
  4305. struct drm_device *dev = crtc->dev;
  4306. struct drm_i915_private *dev_priv = dev->dev_private;
  4307. int refclk;
  4308. if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
  4309. intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
  4310. refclk = dev_priv->lvds_ssc_freq * 1000;
  4311. DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
  4312. refclk / 1000);
  4313. } else if (!IS_GEN2(dev)) {
  4314. refclk = 96000;
  4315. } else {
  4316. refclk = 48000;
  4317. }
  4318. return refclk;
  4319. }
  4320. static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
  4321. intel_clock_t *clock)
  4322. {
  4323. /* SDVO TV has fixed PLL values depend on its clock range,
  4324. this mirrors vbios setting. */
  4325. if (adjusted_mode->clock >= 100000
  4326. && adjusted_mode->clock < 140500) {
  4327. clock->p1 = 2;
  4328. clock->p2 = 10;
  4329. clock->n = 3;
  4330. clock->m1 = 16;
  4331. clock->m2 = 8;
  4332. } else if (adjusted_mode->clock >= 140500
  4333. && adjusted_mode->clock <= 200000) {
  4334. clock->p1 = 1;
  4335. clock->p2 = 10;
  4336. clock->n = 6;
  4337. clock->m1 = 12;
  4338. clock->m2 = 8;
  4339. }
  4340. }
  4341. static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
  4342. intel_clock_t *clock,
  4343. intel_clock_t *reduced_clock)
  4344. {
  4345. struct drm_device *dev = crtc->dev;
  4346. struct drm_i915_private *dev_priv = dev->dev_private;
  4347. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4348. int pipe = intel_crtc->pipe;
  4349. u32 fp, fp2 = 0;
  4350. if (IS_PINEVIEW(dev)) {
  4351. fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
  4352. if (reduced_clock)
  4353. fp2 = (1 << reduced_clock->n) << 16 |
  4354. reduced_clock->m1 << 8 | reduced_clock->m2;
  4355. } else {
  4356. fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
  4357. if (reduced_clock)
  4358. fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
  4359. reduced_clock->m2;
  4360. }
  4361. I915_WRITE(FP0(pipe), fp);
  4362. intel_crtc->lowfreq_avail = false;
  4363. if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
  4364. reduced_clock && i915_powersave) {
  4365. I915_WRITE(FP1(pipe), fp2);
  4366. intel_crtc->lowfreq_avail = true;
  4367. } else {
  4368. I915_WRITE(FP1(pipe), fp);
  4369. }
  4370. }
  4371. static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
  4372. struct drm_display_mode *mode,
  4373. struct drm_display_mode *adjusted_mode,
  4374. int x, int y,
  4375. struct drm_framebuffer *old_fb)
  4376. {
  4377. struct drm_device *dev = crtc->dev;
  4378. struct drm_i915_private *dev_priv = dev->dev_private;
  4379. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4380. int pipe = intel_crtc->pipe;
  4381. int plane = intel_crtc->plane;
  4382. int refclk, num_connectors = 0;
  4383. intel_clock_t clock, reduced_clock;
  4384. u32 dpll, dspcntr, pipeconf;
  4385. bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
  4386. bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
  4387. struct drm_mode_config *mode_config = &dev->mode_config;
  4388. struct intel_encoder *encoder;
  4389. const intel_limit_t *limit;
  4390. int ret;
  4391. u32 temp;
  4392. u32 lvds_sync = 0;
  4393. list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
  4394. if (encoder->base.crtc != crtc)
  4395. continue;
  4396. switch (encoder->type) {
  4397. case INTEL_OUTPUT_LVDS:
  4398. is_lvds = true;
  4399. break;
  4400. case INTEL_OUTPUT_SDVO:
  4401. case INTEL_OUTPUT_HDMI:
  4402. is_sdvo = true;
  4403. if (encoder->needs_tv_clock)
  4404. is_tv = true;
  4405. break;
  4406. case INTEL_OUTPUT_DVO:
  4407. is_dvo = true;
  4408. break;
  4409. case INTEL_OUTPUT_TVOUT:
  4410. is_tv = true;
  4411. break;
  4412. case INTEL_OUTPUT_ANALOG:
  4413. is_crt = true;
  4414. break;
  4415. case INTEL_OUTPUT_DISPLAYPORT:
  4416. is_dp = true;
  4417. break;
  4418. }
  4419. num_connectors++;
  4420. }
  4421. refclk = i9xx_get_refclk(crtc, num_connectors);
  4422. /*
  4423. * Returns a set of divisors for the desired target clock with the given
  4424. * refclk, or FALSE. The returned values represent the clock equation:
  4425. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  4426. */
  4427. limit = intel_limit(crtc, refclk);
  4428. ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
  4429. &clock);
  4430. if (!ok) {
  4431. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  4432. return -EINVAL;
  4433. }
  4434. /* Ensure that the cursor is valid for the new mode before changing... */
  4435. intel_crtc_update_cursor(crtc, true);
  4436. if (is_lvds && dev_priv->lvds_downclock_avail) {
  4437. /*
  4438. * Ensure we match the reduced clock's P to the target clock.
  4439. * If the clocks don't match, we can't switch the display clock
  4440. * by using the FP0/FP1. In such case we will disable the LVDS
  4441. * downclock feature.
  4442. */
  4443. has_reduced_clock = limit->find_pll(limit, crtc,
  4444. dev_priv->lvds_downclock,
  4445. refclk,
  4446. &clock,
  4447. &reduced_clock);
  4448. }
  4449. if (is_sdvo && is_tv)
  4450. i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
  4451. i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
  4452. &reduced_clock : NULL);
  4453. dpll = DPLL_VGA_MODE_DIS;
  4454. if (!IS_GEN2(dev)) {
  4455. if (is_lvds)
  4456. dpll |= DPLLB_MODE_LVDS;
  4457. else
  4458. dpll |= DPLLB_MODE_DAC_SERIAL;
  4459. if (is_sdvo) {
  4460. int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
  4461. if (pixel_multiplier > 1) {
  4462. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  4463. dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
  4464. }
  4465. dpll |= DPLL_DVO_HIGH_SPEED;
  4466. }
  4467. if (is_dp)
  4468. dpll |= DPLL_DVO_HIGH_SPEED;
  4469. /* compute bitmask from p1 value */
  4470. if (IS_PINEVIEW(dev))
  4471. dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
  4472. else {
  4473. dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  4474. if (IS_G4X(dev) && has_reduced_clock)
  4475. dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
  4476. }
  4477. switch (clock.p2) {
  4478. case 5:
  4479. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
  4480. break;
  4481. case 7:
  4482. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
  4483. break;
  4484. case 10:
  4485. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
  4486. break;
  4487. case 14:
  4488. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
  4489. break;
  4490. }
  4491. if (INTEL_INFO(dev)->gen >= 4)
  4492. dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
  4493. } else {
  4494. if (is_lvds) {
  4495. dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  4496. } else {
  4497. if (clock.p1 == 2)
  4498. dpll |= PLL_P1_DIVIDE_BY_TWO;
  4499. else
  4500. dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  4501. if (clock.p2 == 4)
  4502. dpll |= PLL_P2_DIVIDE_BY_4;
  4503. }
  4504. }
  4505. if (is_sdvo && is_tv)
  4506. dpll |= PLL_REF_INPUT_TVCLKINBC;
  4507. else if (is_tv)
  4508. /* XXX: just matching BIOS for now */
  4509. /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
  4510. dpll |= 3;
  4511. else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
  4512. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  4513. else
  4514. dpll |= PLL_REF_INPUT_DREFCLK;
  4515. /* setup pipeconf */
  4516. pipeconf = I915_READ(PIPECONF(pipe));
  4517. /* Set up the display plane register */
  4518. dspcntr = DISPPLANE_GAMMA_ENABLE;
  4519. /* Ironlake's plane is forced to pipe, bit 24 is to
  4520. enable color space conversion */
  4521. if (pipe == 0)
  4522. dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
  4523. else
  4524. dspcntr |= DISPPLANE_SEL_PIPE_B;
  4525. if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
  4526. /* Enable pixel doubling when the dot clock is > 90% of the (display)
  4527. * core speed.
  4528. *
  4529. * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
  4530. * pipe == 0 check?
  4531. */
  4532. if (mode->clock >
  4533. dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
  4534. pipeconf |= PIPECONF_DOUBLE_WIDE;
  4535. else
  4536. pipeconf &= ~PIPECONF_DOUBLE_WIDE;
  4537. }
  4538. /* default to 8bpc */
  4539. pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
  4540. if (is_dp) {
  4541. if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
  4542. pipeconf |= PIPECONF_BPP_6 |
  4543. PIPECONF_DITHER_EN |
  4544. PIPECONF_DITHER_TYPE_SP;
  4545. }
  4546. }
  4547. dpll |= DPLL_VCO_ENABLE;
  4548. DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
  4549. drm_mode_debug_printmodeline(mode);
  4550. I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
  4551. POSTING_READ(DPLL(pipe));
  4552. udelay(150);
  4553. /* The LVDS pin pair needs to be on before the DPLLs are enabled.
  4554. * This is an exception to the general rule that mode_set doesn't turn
  4555. * things on.
  4556. */
  4557. if (is_lvds) {
  4558. temp = I915_READ(LVDS);
  4559. temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
  4560. if (pipe == 1) {
  4561. temp |= LVDS_PIPEB_SELECT;
  4562. } else {
  4563. temp &= ~LVDS_PIPEB_SELECT;
  4564. }
  4565. /* set the corresponsding LVDS_BORDER bit */
  4566. temp |= dev_priv->lvds_border_bits;
  4567. /* Set the B0-B3 data pairs corresponding to whether we're going to
  4568. * set the DPLLs for dual-channel mode or not.
  4569. */
  4570. if (clock.p2 == 7)
  4571. temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
  4572. else
  4573. temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
  4574. /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
  4575. * appropriately here, but we need to look more thoroughly into how
  4576. * panels behave in the two modes.
  4577. */
  4578. /* set the dithering flag on LVDS as needed */
  4579. if (INTEL_INFO(dev)->gen >= 4) {
  4580. if (dev_priv->lvds_dither)
  4581. temp |= LVDS_ENABLE_DITHER;
  4582. else
  4583. temp &= ~LVDS_ENABLE_DITHER;
  4584. }
  4585. if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
  4586. lvds_sync |= LVDS_HSYNC_POLARITY;
  4587. if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
  4588. lvds_sync |= LVDS_VSYNC_POLARITY;
  4589. if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
  4590. != lvds_sync) {
  4591. char flags[2] = "-+";
  4592. DRM_INFO("Changing LVDS panel from "
  4593. "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
  4594. flags[!(temp & LVDS_HSYNC_POLARITY)],
  4595. flags[!(temp & LVDS_VSYNC_POLARITY)],
  4596. flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
  4597. flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
  4598. temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
  4599. temp |= lvds_sync;
  4600. }
  4601. I915_WRITE(LVDS, temp);
  4602. }
  4603. if (is_dp) {
  4604. intel_dp_set_m_n(crtc, mode, adjusted_mode);
  4605. }
  4606. I915_WRITE(DPLL(pipe), dpll);
  4607. /* Wait for the clocks to stabilize. */
  4608. POSTING_READ(DPLL(pipe));
  4609. udelay(150);
  4610. if (INTEL_INFO(dev)->gen >= 4) {
  4611. temp = 0;
  4612. if (is_sdvo) {
  4613. temp = intel_mode_get_pixel_multiplier(adjusted_mode);
  4614. if (temp > 1)
  4615. temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  4616. else
  4617. temp = 0;
  4618. }
  4619. I915_WRITE(DPLL_MD(pipe), temp);
  4620. } else {
  4621. /* The pixel multiplier can only be updated once the
  4622. * DPLL is enabled and the clocks are stable.
  4623. *
  4624. * So write it again.
  4625. */
  4626. I915_WRITE(DPLL(pipe), dpll);
  4627. }
  4628. if (HAS_PIPE_CXSR(dev)) {
  4629. if (intel_crtc->lowfreq_avail) {
  4630. DRM_DEBUG_KMS("enabling CxSR downclocking\n");
  4631. pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
  4632. } else {
  4633. DRM_DEBUG_KMS("disabling CxSR downclocking\n");
  4634. pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
  4635. }
  4636. }
  4637. if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
  4638. pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
  4639. /* the chip adds 2 halflines automatically */
  4640. adjusted_mode->crtc_vdisplay -= 1;
  4641. adjusted_mode->crtc_vtotal -= 1;
  4642. adjusted_mode->crtc_vblank_start -= 1;
  4643. adjusted_mode->crtc_vblank_end -= 1;
  4644. adjusted_mode->crtc_vsync_end -= 1;
  4645. adjusted_mode->crtc_vsync_start -= 1;
  4646. } else
  4647. pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
  4648. I915_WRITE(HTOTAL(pipe),
  4649. (adjusted_mode->crtc_hdisplay - 1) |
  4650. ((adjusted_mode->crtc_htotal - 1) << 16));
  4651. I915_WRITE(HBLANK(pipe),
  4652. (adjusted_mode->crtc_hblank_start - 1) |
  4653. ((adjusted_mode->crtc_hblank_end - 1) << 16));
  4654. I915_WRITE(HSYNC(pipe),
  4655. (adjusted_mode->crtc_hsync_start - 1) |
  4656. ((adjusted_mode->crtc_hsync_end - 1) << 16));
  4657. I915_WRITE(VTOTAL(pipe),
  4658. (adjusted_mode->crtc_vdisplay - 1) |
  4659. ((adjusted_mode->crtc_vtotal - 1) << 16));
  4660. I915_WRITE(VBLANK(pipe),
  4661. (adjusted_mode->crtc_vblank_start - 1) |
  4662. ((adjusted_mode->crtc_vblank_end - 1) << 16));
  4663. I915_WRITE(VSYNC(pipe),
  4664. (adjusted_mode->crtc_vsync_start - 1) |
  4665. ((adjusted_mode->crtc_vsync_end - 1) << 16));
  4666. /* pipesrc and dspsize control the size that is scaled from,
  4667. * which should always be the user's requested size.
  4668. */
  4669. I915_WRITE(DSPSIZE(plane),
  4670. ((mode->vdisplay - 1) << 16) |
  4671. (mode->hdisplay - 1));
  4672. I915_WRITE(DSPPOS(plane), 0);
  4673. I915_WRITE(PIPESRC(pipe),
  4674. ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
  4675. I915_WRITE(PIPECONF(pipe), pipeconf);
  4676. POSTING_READ(PIPECONF(pipe));
  4677. intel_enable_pipe(dev_priv, pipe, false);
  4678. intel_wait_for_vblank(dev, pipe);
  4679. I915_WRITE(DSPCNTR(plane), dspcntr);
  4680. POSTING_READ(DSPCNTR(plane));
  4681. intel_enable_plane(dev_priv, plane, pipe);
  4682. ret = intel_pipe_set_base(crtc, x, y, old_fb);
  4683. intel_update_watermarks(dev);
  4684. return ret;
  4685. }
  4686. /*
  4687. * Initialize reference clocks when the driver loads
  4688. */
  4689. void ironlake_init_pch_refclk(struct drm_device *dev)
  4690. {
  4691. struct drm_i915_private *dev_priv = dev->dev_private;
  4692. struct drm_mode_config *mode_config = &dev->mode_config;
  4693. struct intel_encoder *encoder;
  4694. u32 temp;
  4695. bool has_lvds = false;
  4696. bool has_cpu_edp = false;
  4697. bool has_pch_edp = false;
  4698. bool has_panel = false;
  4699. bool has_ck505 = false;
  4700. bool can_ssc = false;
  4701. /* We need to take the global config into account */
  4702. list_for_each_entry(encoder, &mode_config->encoder_list,
  4703. base.head) {
  4704. switch (encoder->type) {
  4705. case INTEL_OUTPUT_LVDS:
  4706. has_panel = true;
  4707. has_lvds = true;
  4708. break;
  4709. case INTEL_OUTPUT_EDP:
  4710. has_panel = true;
  4711. if (intel_encoder_is_pch_edp(&encoder->base))
  4712. has_pch_edp = true;
  4713. else
  4714. has_cpu_edp = true;
  4715. break;
  4716. }
  4717. }
  4718. if (HAS_PCH_IBX(dev)) {
  4719. has_ck505 = dev_priv->display_clock_mode;
  4720. can_ssc = has_ck505;
  4721. } else {
  4722. has_ck505 = false;
  4723. can_ssc = true;
  4724. }
  4725. DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
  4726. has_panel, has_lvds, has_pch_edp, has_cpu_edp,
  4727. has_ck505);
  4728. /* Ironlake: try to setup display ref clock before DPLL
  4729. * enabling. This is only under driver's control after
  4730. * PCH B stepping, previous chipset stepping should be
  4731. * ignoring this setting.
  4732. */
  4733. temp = I915_READ(PCH_DREF_CONTROL);
  4734. /* Always enable nonspread source */
  4735. temp &= ~DREF_NONSPREAD_SOURCE_MASK;
  4736. if (has_ck505)
  4737. temp |= DREF_NONSPREAD_CK505_ENABLE;
  4738. else
  4739. temp |= DREF_NONSPREAD_SOURCE_ENABLE;
  4740. if (has_panel) {
  4741. temp &= ~DREF_SSC_SOURCE_MASK;
  4742. temp |= DREF_SSC_SOURCE_ENABLE;
  4743. /* SSC must be turned on before enabling the CPU output */
  4744. if (intel_panel_use_ssc(dev_priv) && can_ssc) {
  4745. DRM_DEBUG_KMS("Using SSC on panel\n");
  4746. temp |= DREF_SSC1_ENABLE;
  4747. }
  4748. /* Get SSC going before enabling the outputs */
  4749. I915_WRITE(PCH_DREF_CONTROL, temp);
  4750. POSTING_READ(PCH_DREF_CONTROL);
  4751. udelay(200);
  4752. temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  4753. /* Enable CPU source on CPU attached eDP */
  4754. if (has_cpu_edp) {
  4755. if (intel_panel_use_ssc(dev_priv) && can_ssc) {
  4756. DRM_DEBUG_KMS("Using SSC on eDP\n");
  4757. temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
  4758. }
  4759. else
  4760. temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
  4761. } else
  4762. temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  4763. I915_WRITE(PCH_DREF_CONTROL, temp);
  4764. POSTING_READ(PCH_DREF_CONTROL);
  4765. udelay(200);
  4766. } else {
  4767. DRM_DEBUG_KMS("Disabling SSC entirely\n");
  4768. temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  4769. /* Turn off CPU output */
  4770. temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  4771. I915_WRITE(PCH_DREF_CONTROL, temp);
  4772. POSTING_READ(PCH_DREF_CONTROL);
  4773. udelay(200);
  4774. /* Turn off the SSC source */
  4775. temp &= ~DREF_SSC_SOURCE_MASK;
  4776. temp |= DREF_SSC_SOURCE_DISABLE;
  4777. /* Turn off SSC1 */
  4778. temp &= ~ DREF_SSC1_ENABLE;
  4779. I915_WRITE(PCH_DREF_CONTROL, temp);
  4780. POSTING_READ(PCH_DREF_CONTROL);
  4781. udelay(200);
  4782. }
  4783. }
  4784. static int ironlake_get_refclk(struct drm_crtc *crtc)
  4785. {
  4786. struct drm_device *dev = crtc->dev;
  4787. struct drm_i915_private *dev_priv = dev->dev_private;
  4788. struct intel_encoder *encoder;
  4789. struct drm_mode_config *mode_config = &dev->mode_config;
  4790. struct intel_encoder *edp_encoder = NULL;
  4791. int num_connectors = 0;
  4792. bool is_lvds = false;
  4793. list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
  4794. if (encoder->base.crtc != crtc)
  4795. continue;
  4796. switch (encoder->type) {
  4797. case INTEL_OUTPUT_LVDS:
  4798. is_lvds = true;
  4799. break;
  4800. case INTEL_OUTPUT_EDP:
  4801. edp_encoder = encoder;
  4802. break;
  4803. }
  4804. num_connectors++;
  4805. }
  4806. if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
  4807. DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
  4808. dev_priv->lvds_ssc_freq);
  4809. return dev_priv->lvds_ssc_freq * 1000;
  4810. }
  4811. return 120000;
  4812. }
  4813. static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
  4814. struct drm_display_mode *mode,
  4815. struct drm_display_mode *adjusted_mode,
  4816. int x, int y,
  4817. struct drm_framebuffer *old_fb)
  4818. {
  4819. struct drm_device *dev = crtc->dev;
  4820. struct drm_i915_private *dev_priv = dev->dev_private;
  4821. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4822. int pipe = intel_crtc->pipe;
  4823. int plane = intel_crtc->plane;
  4824. int refclk, num_connectors = 0;
  4825. intel_clock_t clock, reduced_clock;
  4826. u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
  4827. bool ok, has_reduced_clock = false, is_sdvo = false;
  4828. bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
  4829. struct intel_encoder *has_edp_encoder = NULL;
  4830. struct drm_mode_config *mode_config = &dev->mode_config;
  4831. struct intel_encoder *encoder;
  4832. const intel_limit_t *limit;
  4833. int ret;
  4834. struct fdi_m_n m_n = {0};
  4835. u32 temp;
  4836. u32 lvds_sync = 0;
  4837. int target_clock, pixel_multiplier, lane, link_bw, factor;
  4838. unsigned int pipe_bpp;
  4839. bool dither;
  4840. list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
  4841. if (encoder->base.crtc != crtc)
  4842. continue;
  4843. switch (encoder->type) {
  4844. case INTEL_OUTPUT_LVDS:
  4845. is_lvds = true;
  4846. break;
  4847. case INTEL_OUTPUT_SDVO:
  4848. case INTEL_OUTPUT_HDMI:
  4849. is_sdvo = true;
  4850. if (encoder->needs_tv_clock)
  4851. is_tv = true;
  4852. break;
  4853. case INTEL_OUTPUT_TVOUT:
  4854. is_tv = true;
  4855. break;
  4856. case INTEL_OUTPUT_ANALOG:
  4857. is_crt = true;
  4858. break;
  4859. case INTEL_OUTPUT_DISPLAYPORT:
  4860. is_dp = true;
  4861. break;
  4862. case INTEL_OUTPUT_EDP:
  4863. has_edp_encoder = encoder;
  4864. break;
  4865. }
  4866. num_connectors++;
  4867. }
  4868. refclk = ironlake_get_refclk(crtc);
  4869. /*
  4870. * Returns a set of divisors for the desired target clock with the given
  4871. * refclk, or FALSE. The returned values represent the clock equation:
  4872. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  4873. */
  4874. limit = intel_limit(crtc, refclk);
  4875. ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
  4876. &clock);
  4877. if (!ok) {
  4878. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  4879. return -EINVAL;
  4880. }
  4881. /* Ensure that the cursor is valid for the new mode before changing... */
  4882. intel_crtc_update_cursor(crtc, true);
  4883. if (is_lvds && dev_priv->lvds_downclock_avail) {
  4884. /*
  4885. * Ensure we match the reduced clock's P to the target clock.
  4886. * If the clocks don't match, we can't switch the display clock
  4887. * by using the FP0/FP1. In such case we will disable the LVDS
  4888. * downclock feature.
  4889. */
  4890. has_reduced_clock = limit->find_pll(limit, crtc,
  4891. dev_priv->lvds_downclock,
  4892. refclk,
  4893. &clock,
  4894. &reduced_clock);
  4895. }
  4896. /* SDVO TV has fixed PLL values depend on its clock range,
  4897. this mirrors vbios setting. */
  4898. if (is_sdvo && is_tv) {
  4899. if (adjusted_mode->clock >= 100000
  4900. && adjusted_mode->clock < 140500) {
  4901. clock.p1 = 2;
  4902. clock.p2 = 10;
  4903. clock.n = 3;
  4904. clock.m1 = 16;
  4905. clock.m2 = 8;
  4906. } else if (adjusted_mode->clock >= 140500
  4907. && adjusted_mode->clock <= 200000) {
  4908. clock.p1 = 1;
  4909. clock.p2 = 10;
  4910. clock.n = 6;
  4911. clock.m1 = 12;
  4912. clock.m2 = 8;
  4913. }
  4914. }
  4915. /* FDI link */
  4916. pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
  4917. lane = 0;
  4918. /* CPU eDP doesn't require FDI link, so just set DP M/N
  4919. according to current link config */
  4920. if (has_edp_encoder &&
  4921. !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
  4922. target_clock = mode->clock;
  4923. intel_edp_link_config(has_edp_encoder,
  4924. &lane, &link_bw);
  4925. } else {
  4926. /* [e]DP over FDI requires target mode clock
  4927. instead of link clock */
  4928. if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
  4929. target_clock = mode->clock;
  4930. else
  4931. target_clock = adjusted_mode->clock;
  4932. /* FDI is a binary signal running at ~2.7GHz, encoding
  4933. * each output octet as 10 bits. The actual frequency
  4934. * is stored as a divider into a 100MHz clock, and the
  4935. * mode pixel clock is stored in units of 1KHz.
  4936. * Hence the bw of each lane in terms of the mode signal
  4937. * is:
  4938. */
  4939. link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
  4940. }
  4941. /* determine panel color depth */
  4942. temp = I915_READ(PIPECONF(pipe));
  4943. temp &= ~PIPE_BPC_MASK;
  4944. dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
  4945. switch (pipe_bpp) {
  4946. case 18:
  4947. temp |= PIPE_6BPC;
  4948. break;
  4949. case 24:
  4950. temp |= PIPE_8BPC;
  4951. break;
  4952. case 30:
  4953. temp |= PIPE_10BPC;
  4954. break;
  4955. case 36:
  4956. temp |= PIPE_12BPC;
  4957. break;
  4958. default:
  4959. WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
  4960. pipe_bpp);
  4961. temp |= PIPE_8BPC;
  4962. pipe_bpp = 24;
  4963. break;
  4964. }
  4965. intel_crtc->bpp = pipe_bpp;
  4966. I915_WRITE(PIPECONF(pipe), temp);
  4967. if (!lane) {
  4968. /*
  4969. * Account for spread spectrum to avoid
  4970. * oversubscribing the link. Max center spread
  4971. * is 2.5%; use 5% for safety's sake.
  4972. */
  4973. u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
  4974. lane = bps / (link_bw * 8) + 1;
  4975. }
  4976. intel_crtc->fdi_lanes = lane;
  4977. if (pixel_multiplier > 1)
  4978. link_bw *= pixel_multiplier;
  4979. ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
  4980. &m_n);
  4981. fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
  4982. if (has_reduced_clock)
  4983. fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
  4984. reduced_clock.m2;
  4985. /* Enable autotuning of the PLL clock (if permissible) */
  4986. factor = 21;
  4987. if (is_lvds) {
  4988. if ((intel_panel_use_ssc(dev_priv) &&
  4989. dev_priv->lvds_ssc_freq == 100) ||
  4990. (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
  4991. factor = 25;
  4992. } else if (is_sdvo && is_tv)
  4993. factor = 20;
  4994. if (clock.m < factor * clock.n)
  4995. fp |= FP_CB_TUNE;
  4996. dpll = 0;
  4997. if (is_lvds)
  4998. dpll |= DPLLB_MODE_LVDS;
  4999. else
  5000. dpll |= DPLLB_MODE_DAC_SERIAL;
  5001. if (is_sdvo) {
  5002. int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
  5003. if (pixel_multiplier > 1) {
  5004. dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
  5005. }
  5006. dpll |= DPLL_DVO_HIGH_SPEED;
  5007. }
  5008. if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
  5009. dpll |= DPLL_DVO_HIGH_SPEED;
  5010. /* compute bitmask from p1 value */
  5011. dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  5012. /* also FPA1 */
  5013. dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
  5014. switch (clock.p2) {
  5015. case 5:
  5016. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
  5017. break;
  5018. case 7:
  5019. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
  5020. break;
  5021. case 10:
  5022. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
  5023. break;
  5024. case 14:
  5025. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
  5026. break;
  5027. }
  5028. if (is_sdvo && is_tv)
  5029. dpll |= PLL_REF_INPUT_TVCLKINBC;
  5030. else if (is_tv)
  5031. /* XXX: just matching BIOS for now */
  5032. /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
  5033. dpll |= 3;
  5034. else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
  5035. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  5036. else
  5037. dpll |= PLL_REF_INPUT_DREFCLK;
  5038. /* setup pipeconf */
  5039. pipeconf = I915_READ(PIPECONF(pipe));
  5040. /* Set up the display plane register */
  5041. dspcntr = DISPPLANE_GAMMA_ENABLE;
  5042. DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
  5043. drm_mode_debug_printmodeline(mode);
  5044. /* PCH eDP needs FDI, but CPU eDP does not */
  5045. if (!intel_crtc->no_pll) {
  5046. if (!has_edp_encoder ||
  5047. intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
  5048. I915_WRITE(PCH_FP0(pipe), fp);
  5049. I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
  5050. POSTING_READ(PCH_DPLL(pipe));
  5051. udelay(150);
  5052. }
  5053. } else {
  5054. if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
  5055. fp == I915_READ(PCH_FP0(0))) {
  5056. intel_crtc->use_pll_a = true;
  5057. DRM_DEBUG_KMS("using pipe a dpll\n");
  5058. } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
  5059. fp == I915_READ(PCH_FP0(1))) {
  5060. intel_crtc->use_pll_a = false;
  5061. DRM_DEBUG_KMS("using pipe b dpll\n");
  5062. } else {
  5063. DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
  5064. return -EINVAL;
  5065. }
  5066. }
  5067. /* The LVDS pin pair needs to be on before the DPLLs are enabled.
  5068. * This is an exception to the general rule that mode_set doesn't turn
  5069. * things on.
  5070. */
  5071. if (is_lvds) {
  5072. temp = I915_READ(PCH_LVDS);
  5073. temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
  5074. if (HAS_PCH_CPT(dev))
  5075. temp |= PORT_TRANS_SEL_CPT(pipe);
  5076. else if (pipe == 1)
  5077. temp |= LVDS_PIPEB_SELECT;
  5078. else
  5079. temp &= ~LVDS_PIPEB_SELECT;
  5080. /* set the corresponsding LVDS_BORDER bit */
  5081. temp |= dev_priv->lvds_border_bits;
  5082. /* Set the B0-B3 data pairs corresponding to whether we're going to
  5083. * set the DPLLs for dual-channel mode or not.
  5084. */
  5085. if (clock.p2 == 7)
  5086. temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
  5087. else
  5088. temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
  5089. /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
  5090. * appropriately here, but we need to look more thoroughly into how
  5091. * panels behave in the two modes.
  5092. */
  5093. if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
  5094. lvds_sync |= LVDS_HSYNC_POLARITY;
  5095. if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
  5096. lvds_sync |= LVDS_VSYNC_POLARITY;
  5097. if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
  5098. != lvds_sync) {
  5099. char flags[2] = "-+";
  5100. DRM_INFO("Changing LVDS panel from "
  5101. "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
  5102. flags[!(temp & LVDS_HSYNC_POLARITY)],
  5103. flags[!(temp & LVDS_VSYNC_POLARITY)],
  5104. flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
  5105. flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
  5106. temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
  5107. temp |= lvds_sync;
  5108. }
  5109. I915_WRITE(PCH_LVDS, temp);
  5110. }
  5111. pipeconf &= ~PIPECONF_DITHER_EN;
  5112. pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
  5113. if ((is_lvds && dev_priv->lvds_dither) || dither) {
  5114. pipeconf |= PIPECONF_DITHER_EN;
  5115. pipeconf |= PIPECONF_DITHER_TYPE_SP;
  5116. }
  5117. if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
  5118. intel_dp_set_m_n(crtc, mode, adjusted_mode);
  5119. } else {
  5120. /* For non-DP output, clear any trans DP clock recovery setting.*/
  5121. I915_WRITE(TRANSDATA_M1(pipe), 0);
  5122. I915_WRITE(TRANSDATA_N1(pipe), 0);
  5123. I915_WRITE(TRANSDPLINK_M1(pipe), 0);
  5124. I915_WRITE(TRANSDPLINK_N1(pipe), 0);
  5125. }
  5126. if (!intel_crtc->no_pll &&
  5127. (!has_edp_encoder ||
  5128. intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
  5129. I915_WRITE(PCH_DPLL(pipe), dpll);
  5130. /* Wait for the clocks to stabilize. */
  5131. POSTING_READ(PCH_DPLL(pipe));
  5132. udelay(150);
  5133. /* The pixel multiplier can only be updated once the
  5134. * DPLL is enabled and the clocks are stable.
  5135. *
  5136. * So write it again.
  5137. */
  5138. I915_WRITE(PCH_DPLL(pipe), dpll);
  5139. }
  5140. intel_crtc->lowfreq_avail = false;
  5141. if (!intel_crtc->no_pll) {
  5142. if (is_lvds && has_reduced_clock && i915_powersave) {
  5143. I915_WRITE(PCH_FP1(pipe), fp2);
  5144. intel_crtc->lowfreq_avail = true;
  5145. if (HAS_PIPE_CXSR(dev)) {
  5146. DRM_DEBUG_KMS("enabling CxSR downclocking\n");
  5147. pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
  5148. }
  5149. } else {
  5150. I915_WRITE(PCH_FP1(pipe), fp);
  5151. if (HAS_PIPE_CXSR(dev)) {
  5152. DRM_DEBUG_KMS("disabling CxSR downclocking\n");
  5153. pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
  5154. }
  5155. }
  5156. }
  5157. if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
  5158. pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
  5159. /* the chip adds 2 halflines automatically */
  5160. adjusted_mode->crtc_vdisplay -= 1;
  5161. adjusted_mode->crtc_vtotal -= 1;
  5162. adjusted_mode->crtc_vblank_start -= 1;
  5163. adjusted_mode->crtc_vblank_end -= 1;
  5164. adjusted_mode->crtc_vsync_end -= 1;
  5165. adjusted_mode->crtc_vsync_start -= 1;
  5166. } else
  5167. pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
  5168. I915_WRITE(HTOTAL(pipe),
  5169. (adjusted_mode->crtc_hdisplay - 1) |
  5170. ((adjusted_mode->crtc_htotal - 1) << 16));
  5171. I915_WRITE(HBLANK(pipe),
  5172. (adjusted_mode->crtc_hblank_start - 1) |
  5173. ((adjusted_mode->crtc_hblank_end - 1) << 16));
  5174. I915_WRITE(HSYNC(pipe),
  5175. (adjusted_mode->crtc_hsync_start - 1) |
  5176. ((adjusted_mode->crtc_hsync_end - 1) << 16));
  5177. I915_WRITE(VTOTAL(pipe),
  5178. (adjusted_mode->crtc_vdisplay - 1) |
  5179. ((adjusted_mode->crtc_vtotal - 1) << 16));
  5180. I915_WRITE(VBLANK(pipe),
  5181. (adjusted_mode->crtc_vblank_start - 1) |
  5182. ((adjusted_mode->crtc_vblank_end - 1) << 16));
  5183. I915_WRITE(VSYNC(pipe),
  5184. (adjusted_mode->crtc_vsync_start - 1) |
  5185. ((adjusted_mode->crtc_vsync_end - 1) << 16));
  5186. /* pipesrc controls the size that is scaled from, which should
  5187. * always be the user's requested size.
  5188. */
  5189. I915_WRITE(PIPESRC(pipe),
  5190. ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
  5191. I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
  5192. I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
  5193. I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
  5194. I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
  5195. if (has_edp_encoder &&
  5196. !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
  5197. ironlake_set_pll_edp(crtc, adjusted_mode->clock);
  5198. }
  5199. I915_WRITE(PIPECONF(pipe), pipeconf);
  5200. POSTING_READ(PIPECONF(pipe));
  5201. intel_wait_for_vblank(dev, pipe);
  5202. if (IS_GEN5(dev)) {
  5203. /* enable address swizzle for tiling buffer */
  5204. temp = I915_READ(DISP_ARB_CTL);
  5205. I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
  5206. }
  5207. I915_WRITE(DSPCNTR(plane), dspcntr);
  5208. POSTING_READ(DSPCNTR(plane));
  5209. ret = intel_pipe_set_base(crtc, x, y, old_fb);
  5210. intel_update_watermarks(dev);
  5211. return ret;
  5212. }
  5213. static int intel_crtc_mode_set(struct drm_crtc *crtc,
  5214. struct drm_display_mode *mode,
  5215. struct drm_display_mode *adjusted_mode,
  5216. int x, int y,
  5217. struct drm_framebuffer *old_fb)
  5218. {
  5219. struct drm_device *dev = crtc->dev;
  5220. struct drm_i915_private *dev_priv = dev->dev_private;
  5221. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5222. int pipe = intel_crtc->pipe;
  5223. int ret;
  5224. drm_vblank_pre_modeset(dev, pipe);
  5225. ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
  5226. x, y, old_fb);
  5227. drm_vblank_post_modeset(dev, pipe);
  5228. if (ret)
  5229. intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
  5230. else
  5231. intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
  5232. return ret;
  5233. }
  5234. static bool intel_eld_uptodate(struct drm_connector *connector,
  5235. int reg_eldv, uint32_t bits_eldv,
  5236. int reg_elda, uint32_t bits_elda,
  5237. int reg_edid)
  5238. {
  5239. struct drm_i915_private *dev_priv = connector->dev->dev_private;
  5240. uint8_t *eld = connector->eld;
  5241. uint32_t i;
  5242. i = I915_READ(reg_eldv);
  5243. i &= bits_eldv;
  5244. if (!eld[0])
  5245. return !i;
  5246. if (!i)
  5247. return false;
  5248. i = I915_READ(reg_elda);
  5249. i &= ~bits_elda;
  5250. I915_WRITE(reg_elda, i);
  5251. for (i = 0; i < eld[2]; i++)
  5252. if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
  5253. return false;
  5254. return true;
  5255. }
  5256. static void g4x_write_eld(struct drm_connector *connector,
  5257. struct drm_crtc *crtc)
  5258. {
  5259. struct drm_i915_private *dev_priv = connector->dev->dev_private;
  5260. uint8_t *eld = connector->eld;
  5261. uint32_t eldv;
  5262. uint32_t len;
  5263. uint32_t i;
  5264. i = I915_READ(G4X_AUD_VID_DID);
  5265. if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
  5266. eldv = G4X_ELDV_DEVCL_DEVBLC;
  5267. else
  5268. eldv = G4X_ELDV_DEVCTG;
  5269. if (intel_eld_uptodate(connector,
  5270. G4X_AUD_CNTL_ST, eldv,
  5271. G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
  5272. G4X_HDMIW_HDMIEDID))
  5273. return;
  5274. i = I915_READ(G4X_AUD_CNTL_ST);
  5275. i &= ~(eldv | G4X_ELD_ADDR);
  5276. len = (i >> 9) & 0x1f; /* ELD buffer size */
  5277. I915_WRITE(G4X_AUD_CNTL_ST, i);
  5278. if (!eld[0])
  5279. return;
  5280. len = min_t(uint8_t, eld[2], len);
  5281. DRM_DEBUG_DRIVER("ELD size %d\n", len);
  5282. for (i = 0; i < len; i++)
  5283. I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
  5284. i = I915_READ(G4X_AUD_CNTL_ST);
  5285. i |= eldv;
  5286. I915_WRITE(G4X_AUD_CNTL_ST, i);
  5287. }
  5288. static void ironlake_write_eld(struct drm_connector *connector,
  5289. struct drm_crtc *crtc)
  5290. {
  5291. struct drm_i915_private *dev_priv = connector->dev->dev_private;
  5292. uint8_t *eld = connector->eld;
  5293. uint32_t eldv;
  5294. uint32_t i;
  5295. int len;
  5296. int hdmiw_hdmiedid;
  5297. int aud_cntl_st;
  5298. int aud_cntrl_st2;
  5299. if (HAS_PCH_IBX(connector->dev)) {
  5300. hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
  5301. aud_cntl_st = IBX_AUD_CNTL_ST_A;
  5302. aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
  5303. } else {
  5304. hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
  5305. aud_cntl_st = CPT_AUD_CNTL_ST_A;
  5306. aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
  5307. }
  5308. i = to_intel_crtc(crtc)->pipe;
  5309. hdmiw_hdmiedid += i * 0x100;
  5310. aud_cntl_st += i * 0x100;
  5311. DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
  5312. i = I915_READ(aud_cntl_st);
  5313. i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */
  5314. if (!i) {
  5315. DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
  5316. /* operate blindly on all ports */
  5317. eldv = IBX_ELD_VALIDB;
  5318. eldv |= IBX_ELD_VALIDB << 4;
  5319. eldv |= IBX_ELD_VALIDB << 8;
  5320. } else {
  5321. DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
  5322. eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
  5323. }
  5324. if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
  5325. DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
  5326. eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
  5327. }
  5328. if (intel_eld_uptodate(connector,
  5329. aud_cntrl_st2, eldv,
  5330. aud_cntl_st, IBX_ELD_ADDRESS,
  5331. hdmiw_hdmiedid))
  5332. return;
  5333. i = I915_READ(aud_cntrl_st2);
  5334. i &= ~eldv;
  5335. I915_WRITE(aud_cntrl_st2, i);
  5336. if (!eld[0])
  5337. return;
  5338. i = I915_READ(aud_cntl_st);
  5339. i &= ~IBX_ELD_ADDRESS;
  5340. I915_WRITE(aud_cntl_st, i);
  5341. len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
  5342. DRM_DEBUG_DRIVER("ELD size %d\n", len);
  5343. for (i = 0; i < len; i++)
  5344. I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
  5345. i = I915_READ(aud_cntrl_st2);
  5346. i |= eldv;
  5347. I915_WRITE(aud_cntrl_st2, i);
  5348. }
  5349. void intel_write_eld(struct drm_encoder *encoder,
  5350. struct drm_display_mode *mode)
  5351. {
  5352. struct drm_crtc *crtc = encoder->crtc;
  5353. struct drm_connector *connector;
  5354. struct drm_device *dev = encoder->dev;
  5355. struct drm_i915_private *dev_priv = dev->dev_private;
  5356. connector = drm_select_eld(encoder, mode);
  5357. if (!connector)
  5358. return;
  5359. DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
  5360. connector->base.id,
  5361. drm_get_connector_name(connector),
  5362. connector->encoder->base.id,
  5363. drm_get_encoder_name(connector->encoder));
  5364. connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
  5365. if (dev_priv->display.write_eld)
  5366. dev_priv->display.write_eld(connector, crtc);
  5367. }
  5368. /** Loads the palette/gamma unit for the CRTC with the prepared values */
  5369. void intel_crtc_load_lut(struct drm_crtc *crtc)
  5370. {
  5371. struct drm_device *dev = crtc->dev;
  5372. struct drm_i915_private *dev_priv = dev->dev_private;
  5373. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5374. int palreg = PALETTE(intel_crtc->pipe);
  5375. int i;
  5376. /* The clocks have to be on to load the palette. */
  5377. if (!crtc->enabled)
  5378. return;
  5379. /* use legacy palette for Ironlake */
  5380. if (HAS_PCH_SPLIT(dev))
  5381. palreg = LGC_PALETTE(intel_crtc->pipe);
  5382. for (i = 0; i < 256; i++) {
  5383. I915_WRITE(palreg + 4 * i,
  5384. (intel_crtc->lut_r[i] << 16) |
  5385. (intel_crtc->lut_g[i] << 8) |
  5386. intel_crtc->lut_b[i]);
  5387. }
  5388. }
  5389. static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
  5390. {
  5391. struct drm_device *dev = crtc->dev;
  5392. struct drm_i915_private *dev_priv = dev->dev_private;
  5393. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5394. bool visible = base != 0;
  5395. u32 cntl;
  5396. if (intel_crtc->cursor_visible == visible)
  5397. return;
  5398. cntl = I915_READ(_CURACNTR);
  5399. if (visible) {
  5400. /* On these chipsets we can only modify the base whilst
  5401. * the cursor is disabled.
  5402. */
  5403. I915_WRITE(_CURABASE, base);
  5404. cntl &= ~(CURSOR_FORMAT_MASK);
  5405. /* XXX width must be 64, stride 256 => 0x00 << 28 */
  5406. cntl |= CURSOR_ENABLE |
  5407. CURSOR_GAMMA_ENABLE |
  5408. CURSOR_FORMAT_ARGB;
  5409. } else
  5410. cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
  5411. I915_WRITE(_CURACNTR, cntl);
  5412. intel_crtc->cursor_visible = visible;
  5413. }
  5414. static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
  5415. {
  5416. struct drm_device *dev = crtc->dev;
  5417. struct drm_i915_private *dev_priv = dev->dev_private;
  5418. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5419. int pipe = intel_crtc->pipe;
  5420. bool visible = base != 0;
  5421. if (intel_crtc->cursor_visible != visible) {
  5422. uint32_t cntl = I915_READ(CURCNTR(pipe));
  5423. if (base) {
  5424. cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
  5425. cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
  5426. cntl |= pipe << 28; /* Connect to correct pipe */
  5427. } else {
  5428. cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
  5429. cntl |= CURSOR_MODE_DISABLE;
  5430. }
  5431. I915_WRITE(CURCNTR(pipe), cntl);
  5432. intel_crtc->cursor_visible = visible;
  5433. }
  5434. /* and commit changes on next vblank */
  5435. I915_WRITE(CURBASE(pipe), base);
  5436. }
  5437. static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
  5438. {
  5439. struct drm_device *dev = crtc->dev;
  5440. struct drm_i915_private *dev_priv = dev->dev_private;
  5441. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5442. int pipe = intel_crtc->pipe;
  5443. bool visible = base != 0;
  5444. if (intel_crtc->cursor_visible != visible) {
  5445. uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
  5446. if (base) {
  5447. cntl &= ~CURSOR_MODE;
  5448. cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
  5449. } else {
  5450. cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
  5451. cntl |= CURSOR_MODE_DISABLE;
  5452. }
  5453. I915_WRITE(CURCNTR_IVB(pipe), cntl);
  5454. intel_crtc->cursor_visible = visible;
  5455. }
  5456. /* and commit changes on next vblank */
  5457. I915_WRITE(CURBASE_IVB(pipe), base);
  5458. }
  5459. /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
  5460. static void intel_crtc_update_cursor(struct drm_crtc *crtc,
  5461. bool on)
  5462. {
  5463. struct drm_device *dev = crtc->dev;
  5464. struct drm_i915_private *dev_priv = dev->dev_private;
  5465. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5466. int pipe = intel_crtc->pipe;
  5467. int x = intel_crtc->cursor_x;
  5468. int y = intel_crtc->cursor_y;
  5469. u32 base, pos;
  5470. bool visible;
  5471. pos = 0;
  5472. if (on && crtc->enabled && crtc->fb) {
  5473. base = intel_crtc->cursor_addr;
  5474. if (x > (int) crtc->fb->width)
  5475. base = 0;
  5476. if (y > (int) crtc->fb->height)
  5477. base = 0;
  5478. } else
  5479. base = 0;
  5480. if (x < 0) {
  5481. if (x + intel_crtc->cursor_width < 0)
  5482. base = 0;
  5483. pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
  5484. x = -x;
  5485. }
  5486. pos |= x << CURSOR_X_SHIFT;
  5487. if (y < 0) {
  5488. if (y + intel_crtc->cursor_height < 0)
  5489. base = 0;
  5490. pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
  5491. y = -y;
  5492. }
  5493. pos |= y << CURSOR_Y_SHIFT;
  5494. visible = base != 0;
  5495. if (!visible && !intel_crtc->cursor_visible)
  5496. return;
  5497. if (IS_IVYBRIDGE(dev)) {
  5498. I915_WRITE(CURPOS_IVB(pipe), pos);
  5499. ivb_update_cursor(crtc, base);
  5500. } else {
  5501. I915_WRITE(CURPOS(pipe), pos);
  5502. if (IS_845G(dev) || IS_I865G(dev))
  5503. i845_update_cursor(crtc, base);
  5504. else
  5505. i9xx_update_cursor(crtc, base);
  5506. }
  5507. if (visible)
  5508. intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
  5509. }
  5510. static int intel_crtc_cursor_set(struct drm_crtc *crtc,
  5511. struct drm_file *file,
  5512. uint32_t handle,
  5513. uint32_t width, uint32_t height)
  5514. {
  5515. struct drm_device *dev = crtc->dev;
  5516. struct drm_i915_private *dev_priv = dev->dev_private;
  5517. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5518. struct drm_i915_gem_object *obj;
  5519. uint32_t addr;
  5520. int ret;
  5521. DRM_DEBUG_KMS("\n");
  5522. /* if we want to turn off the cursor ignore width and height */
  5523. if (!handle) {
  5524. DRM_DEBUG_KMS("cursor off\n");
  5525. addr = 0;
  5526. obj = NULL;
  5527. mutex_lock(&dev->struct_mutex);
  5528. goto finish;
  5529. }
  5530. /* Currently we only support 64x64 cursors */
  5531. if (width != 64 || height != 64) {
  5532. DRM_ERROR("we currently only support 64x64 cursors\n");
  5533. return -EINVAL;
  5534. }
  5535. obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
  5536. if (&obj->base == NULL)
  5537. return -ENOENT;
  5538. if (obj->base.size < width * height * 4) {
  5539. DRM_ERROR("buffer is to small\n");
  5540. ret = -ENOMEM;
  5541. goto fail;
  5542. }
  5543. /* we only need to pin inside GTT if cursor is non-phy */
  5544. mutex_lock(&dev->struct_mutex);
  5545. if (!dev_priv->info->cursor_needs_physical) {
  5546. if (obj->tiling_mode) {
  5547. DRM_ERROR("cursor cannot be tiled\n");
  5548. ret = -EINVAL;
  5549. goto fail_locked;
  5550. }
  5551. ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
  5552. if (ret) {
  5553. DRM_ERROR("failed to move cursor bo into the GTT\n");
  5554. goto fail_locked;
  5555. }
  5556. ret = i915_gem_object_put_fence(obj);
  5557. if (ret) {
  5558. DRM_ERROR("failed to release fence for cursor");
  5559. goto fail_unpin;
  5560. }
  5561. addr = obj->gtt_offset;
  5562. } else {
  5563. int align = IS_I830(dev) ? 16 * 1024 : 256;
  5564. ret = i915_gem_attach_phys_object(dev, obj,
  5565. (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
  5566. align);
  5567. if (ret) {
  5568. DRM_ERROR("failed to attach phys object\n");
  5569. goto fail_locked;
  5570. }
  5571. addr = obj->phys_obj->handle->busaddr;
  5572. }
  5573. if (IS_GEN2(dev))
  5574. I915_WRITE(CURSIZE, (height << 12) | width);
  5575. finish:
  5576. if (intel_crtc->cursor_bo) {
  5577. if (dev_priv->info->cursor_needs_physical) {
  5578. if (intel_crtc->cursor_bo != obj)
  5579. i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
  5580. } else
  5581. i915_gem_object_unpin(intel_crtc->cursor_bo);
  5582. drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
  5583. }
  5584. mutex_unlock(&dev->struct_mutex);
  5585. intel_crtc->cursor_addr = addr;
  5586. intel_crtc->cursor_bo = obj;
  5587. intel_crtc->cursor_width = width;
  5588. intel_crtc->cursor_height = height;
  5589. intel_crtc_update_cursor(crtc, true);
  5590. return 0;
  5591. fail_unpin:
  5592. i915_gem_object_unpin(obj);
  5593. fail_locked:
  5594. mutex_unlock(&dev->struct_mutex);
  5595. fail:
  5596. drm_gem_object_unreference_unlocked(&obj->base);
  5597. return ret;
  5598. }
  5599. static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  5600. {
  5601. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5602. intel_crtc->cursor_x = x;
  5603. intel_crtc->cursor_y = y;
  5604. intel_crtc_update_cursor(crtc, true);
  5605. return 0;
  5606. }
  5607. /** Sets the color ramps on behalf of RandR */
  5608. void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
  5609. u16 blue, int regno)
  5610. {
  5611. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5612. intel_crtc->lut_r[regno] = red >> 8;
  5613. intel_crtc->lut_g[regno] = green >> 8;
  5614. intel_crtc->lut_b[regno] = blue >> 8;
  5615. }
  5616. void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
  5617. u16 *blue, int regno)
  5618. {
  5619. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5620. *red = intel_crtc->lut_r[regno] << 8;
  5621. *green = intel_crtc->lut_g[regno] << 8;
  5622. *blue = intel_crtc->lut_b[regno] << 8;
  5623. }
  5624. static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
  5625. u16 *blue, uint32_t start, uint32_t size)
  5626. {
  5627. int end = (start + size > 256) ? 256 : start + size, i;
  5628. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5629. for (i = start; i < end; i++) {
  5630. intel_crtc->lut_r[i] = red[i] >> 8;
  5631. intel_crtc->lut_g[i] = green[i] >> 8;
  5632. intel_crtc->lut_b[i] = blue[i] >> 8;
  5633. }
  5634. intel_crtc_load_lut(crtc);
  5635. }
  5636. /**
  5637. * Get a pipe with a simple mode set on it for doing load-based monitor
  5638. * detection.
  5639. *
  5640. * It will be up to the load-detect code to adjust the pipe as appropriate for
  5641. * its requirements. The pipe will be connected to no other encoders.
  5642. *
  5643. * Currently this code will only succeed if there is a pipe with no encoders
  5644. * configured for it. In the future, it could choose to temporarily disable
  5645. * some outputs to free up a pipe for its use.
  5646. *
  5647. * \return crtc, or NULL if no pipes are available.
  5648. */
  5649. /* VESA 640x480x72Hz mode to set on the pipe */
  5650. static struct drm_display_mode load_detect_mode = {
  5651. DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
  5652. 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
  5653. };
  5654. static struct drm_framebuffer *
  5655. intel_framebuffer_create(struct drm_device *dev,
  5656. struct drm_mode_fb_cmd2 *mode_cmd,
  5657. struct drm_i915_gem_object *obj)
  5658. {
  5659. struct intel_framebuffer *intel_fb;
  5660. int ret;
  5661. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  5662. if (!intel_fb) {
  5663. drm_gem_object_unreference_unlocked(&obj->base);
  5664. return ERR_PTR(-ENOMEM);
  5665. }
  5666. ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
  5667. if (ret) {
  5668. drm_gem_object_unreference_unlocked(&obj->base);
  5669. kfree(intel_fb);
  5670. return ERR_PTR(ret);
  5671. }
  5672. return &intel_fb->base;
  5673. }
  5674. static u32
  5675. intel_framebuffer_pitch_for_width(int width, int bpp)
  5676. {
  5677. u32 pitch = DIV_ROUND_UP(width * bpp, 8);
  5678. return ALIGN(pitch, 64);
  5679. }
  5680. static u32
  5681. intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
  5682. {
  5683. u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
  5684. return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
  5685. }
  5686. static struct drm_framebuffer *
  5687. intel_framebuffer_create_for_mode(struct drm_device *dev,
  5688. struct drm_display_mode *mode,
  5689. int depth, int bpp)
  5690. {
  5691. struct drm_i915_gem_object *obj;
  5692. struct drm_mode_fb_cmd2 mode_cmd;
  5693. obj = i915_gem_alloc_object(dev,
  5694. intel_framebuffer_size_for_mode(mode, bpp));
  5695. if (obj == NULL)
  5696. return ERR_PTR(-ENOMEM);
  5697. mode_cmd.width = mode->hdisplay;
  5698. mode_cmd.height = mode->vdisplay;
  5699. mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
  5700. bpp);
  5701. mode_cmd.pixel_format = 0;
  5702. return intel_framebuffer_create(dev, &mode_cmd, obj);
  5703. }
  5704. static struct drm_framebuffer *
  5705. mode_fits_in_fbdev(struct drm_device *dev,
  5706. struct drm_display_mode *mode)
  5707. {
  5708. struct drm_i915_private *dev_priv = dev->dev_private;
  5709. struct drm_i915_gem_object *obj;
  5710. struct drm_framebuffer *fb;
  5711. if (dev_priv->fbdev == NULL)
  5712. return NULL;
  5713. obj = dev_priv->fbdev->ifb.obj;
  5714. if (obj == NULL)
  5715. return NULL;
  5716. fb = &dev_priv->fbdev->ifb.base;
  5717. if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
  5718. fb->bits_per_pixel))
  5719. return NULL;
  5720. if (obj->base.size < mode->vdisplay * fb->pitches[0])
  5721. return NULL;
  5722. return fb;
  5723. }
  5724. bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
  5725. struct drm_connector *connector,
  5726. struct drm_display_mode *mode,
  5727. struct intel_load_detect_pipe *old)
  5728. {
  5729. struct intel_crtc *intel_crtc;
  5730. struct drm_crtc *possible_crtc;
  5731. struct drm_encoder *encoder = &intel_encoder->base;
  5732. struct drm_crtc *crtc = NULL;
  5733. struct drm_device *dev = encoder->dev;
  5734. struct drm_framebuffer *old_fb;
  5735. int i = -1;
  5736. DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
  5737. connector->base.id, drm_get_connector_name(connector),
  5738. encoder->base.id, drm_get_encoder_name(encoder));
  5739. /*
  5740. * Algorithm gets a little messy:
  5741. *
  5742. * - if the connector already has an assigned crtc, use it (but make
  5743. * sure it's on first)
  5744. *
  5745. * - try to find the first unused crtc that can drive this connector,
  5746. * and use that if we find one
  5747. */
  5748. /* See if we already have a CRTC for this connector */
  5749. if (encoder->crtc) {
  5750. crtc = encoder->crtc;
  5751. intel_crtc = to_intel_crtc(crtc);
  5752. old->dpms_mode = intel_crtc->dpms_mode;
  5753. old->load_detect_temp = false;
  5754. /* Make sure the crtc and connector are running */
  5755. if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
  5756. struct drm_encoder_helper_funcs *encoder_funcs;
  5757. struct drm_crtc_helper_funcs *crtc_funcs;
  5758. crtc_funcs = crtc->helper_private;
  5759. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
  5760. encoder_funcs = encoder->helper_private;
  5761. encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
  5762. }
  5763. return true;
  5764. }
  5765. /* Find an unused one (if possible) */
  5766. list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
  5767. i++;
  5768. if (!(encoder->possible_crtcs & (1 << i)))
  5769. continue;
  5770. if (!possible_crtc->enabled) {
  5771. crtc = possible_crtc;
  5772. break;
  5773. }
  5774. }
  5775. /*
  5776. * If we didn't find an unused CRTC, don't use any.
  5777. */
  5778. if (!crtc) {
  5779. DRM_DEBUG_KMS("no pipe available for load-detect\n");
  5780. return false;
  5781. }
  5782. encoder->crtc = crtc;
  5783. connector->encoder = encoder;
  5784. intel_crtc = to_intel_crtc(crtc);
  5785. old->dpms_mode = intel_crtc->dpms_mode;
  5786. old->load_detect_temp = true;
  5787. old->release_fb = NULL;
  5788. if (!mode)
  5789. mode = &load_detect_mode;
  5790. old_fb = crtc->fb;
  5791. /* We need a framebuffer large enough to accommodate all accesses
  5792. * that the plane may generate whilst we perform load detection.
  5793. * We can not rely on the fbcon either being present (we get called
  5794. * during its initialisation to detect all boot displays, or it may
  5795. * not even exist) or that it is large enough to satisfy the
  5796. * requested mode.
  5797. */
  5798. crtc->fb = mode_fits_in_fbdev(dev, mode);
  5799. if (crtc->fb == NULL) {
  5800. DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
  5801. crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
  5802. old->release_fb = crtc->fb;
  5803. } else
  5804. DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
  5805. if (IS_ERR(crtc->fb)) {
  5806. DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
  5807. crtc->fb = old_fb;
  5808. return false;
  5809. }
  5810. if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
  5811. DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
  5812. if (old->release_fb)
  5813. old->release_fb->funcs->destroy(old->release_fb);
  5814. crtc->fb = old_fb;
  5815. return false;
  5816. }
  5817. /* let the connector get through one full cycle before testing */
  5818. intel_wait_for_vblank(dev, intel_crtc->pipe);
  5819. return true;
  5820. }
  5821. void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
  5822. struct drm_connector *connector,
  5823. struct intel_load_detect_pipe *old)
  5824. {
  5825. struct drm_encoder *encoder = &intel_encoder->base;
  5826. struct drm_device *dev = encoder->dev;
  5827. struct drm_crtc *crtc = encoder->crtc;
  5828. struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
  5829. struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  5830. DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
  5831. connector->base.id, drm_get_connector_name(connector),
  5832. encoder->base.id, drm_get_encoder_name(encoder));
  5833. if (old->load_detect_temp) {
  5834. connector->encoder = NULL;
  5835. drm_helper_disable_unused_functions(dev);
  5836. if (old->release_fb)
  5837. old->release_fb->funcs->destroy(old->release_fb);
  5838. return;
  5839. }
  5840. /* Switch crtc and encoder back off if necessary */
  5841. if (old->dpms_mode != DRM_MODE_DPMS_ON) {
  5842. encoder_funcs->dpms(encoder, old->dpms_mode);
  5843. crtc_funcs->dpms(crtc, old->dpms_mode);
  5844. }
  5845. }
  5846. /* Returns the clock of the currently programmed mode of the given pipe. */
  5847. static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
  5848. {
  5849. struct drm_i915_private *dev_priv = dev->dev_private;
  5850. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5851. int pipe = intel_crtc->pipe;
  5852. u32 dpll = I915_READ(DPLL(pipe));
  5853. u32 fp;
  5854. intel_clock_t clock;
  5855. if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
  5856. fp = I915_READ(FP0(pipe));
  5857. else
  5858. fp = I915_READ(FP1(pipe));
  5859. clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
  5860. if (IS_PINEVIEW(dev)) {
  5861. clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
  5862. clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
  5863. } else {
  5864. clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
  5865. clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
  5866. }
  5867. if (!IS_GEN2(dev)) {
  5868. if (IS_PINEVIEW(dev))
  5869. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
  5870. DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
  5871. else
  5872. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
  5873. DPLL_FPA01_P1_POST_DIV_SHIFT);
  5874. switch (dpll & DPLL_MODE_MASK) {
  5875. case DPLLB_MODE_DAC_SERIAL:
  5876. clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
  5877. 5 : 10;
  5878. break;
  5879. case DPLLB_MODE_LVDS:
  5880. clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
  5881. 7 : 14;
  5882. break;
  5883. default:
  5884. DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
  5885. "mode\n", (int)(dpll & DPLL_MODE_MASK));
  5886. return 0;
  5887. }
  5888. /* XXX: Handle the 100Mhz refclk */
  5889. intel_clock(dev, 96000, &clock);
  5890. } else {
  5891. bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
  5892. if (is_lvds) {
  5893. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
  5894. DPLL_FPA01_P1_POST_DIV_SHIFT);
  5895. clock.p2 = 14;
  5896. if ((dpll & PLL_REF_INPUT_MASK) ==
  5897. PLLB_REF_INPUT_SPREADSPECTRUMIN) {
  5898. /* XXX: might not be 66MHz */
  5899. intel_clock(dev, 66000, &clock);
  5900. } else
  5901. intel_clock(dev, 48000, &clock);
  5902. } else {
  5903. if (dpll & PLL_P1_DIVIDE_BY_TWO)
  5904. clock.p1 = 2;
  5905. else {
  5906. clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
  5907. DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
  5908. }
  5909. if (dpll & PLL_P2_DIVIDE_BY_4)
  5910. clock.p2 = 4;
  5911. else
  5912. clock.p2 = 2;
  5913. intel_clock(dev, 48000, &clock);
  5914. }
  5915. }
  5916. /* XXX: It would be nice to validate the clocks, but we can't reuse
  5917. * i830PllIsValid() because it relies on the xf86_config connector
  5918. * configuration being accurate, which it isn't necessarily.
  5919. */
  5920. return clock.dot;
  5921. }
  5922. /** Returns the currently programmed mode of the given pipe. */
  5923. struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
  5924. struct drm_crtc *crtc)
  5925. {
  5926. struct drm_i915_private *dev_priv = dev->dev_private;
  5927. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5928. int pipe = intel_crtc->pipe;
  5929. struct drm_display_mode *mode;
  5930. int htot = I915_READ(HTOTAL(pipe));
  5931. int hsync = I915_READ(HSYNC(pipe));
  5932. int vtot = I915_READ(VTOTAL(pipe));
  5933. int vsync = I915_READ(VSYNC(pipe));
  5934. mode = kzalloc(sizeof(*mode), GFP_KERNEL);
  5935. if (!mode)
  5936. return NULL;
  5937. mode->clock = intel_crtc_clock_get(dev, crtc);
  5938. mode->hdisplay = (htot & 0xffff) + 1;
  5939. mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
  5940. mode->hsync_start = (hsync & 0xffff) + 1;
  5941. mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
  5942. mode->vdisplay = (vtot & 0xffff) + 1;
  5943. mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
  5944. mode->vsync_start = (vsync & 0xffff) + 1;
  5945. mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
  5946. drm_mode_set_name(mode);
  5947. drm_mode_set_crtcinfo(mode, 0);
  5948. return mode;
  5949. }
  5950. #define GPU_IDLE_TIMEOUT 500 /* ms */
  5951. /* When this timer fires, we've been idle for awhile */
  5952. static void intel_gpu_idle_timer(unsigned long arg)
  5953. {
  5954. struct drm_device *dev = (struct drm_device *)arg;
  5955. drm_i915_private_t *dev_priv = dev->dev_private;
  5956. if (!list_empty(&dev_priv->mm.active_list)) {
  5957. /* Still processing requests, so just re-arm the timer. */
  5958. mod_timer(&dev_priv->idle_timer, jiffies +
  5959. msecs_to_jiffies(GPU_IDLE_TIMEOUT));
  5960. return;
  5961. }
  5962. dev_priv->busy = false;
  5963. queue_work(dev_priv->wq, &dev_priv->idle_work);
  5964. }
  5965. #define CRTC_IDLE_TIMEOUT 1000 /* ms */
  5966. static void intel_crtc_idle_timer(unsigned long arg)
  5967. {
  5968. struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
  5969. struct drm_crtc *crtc = &intel_crtc->base;
  5970. drm_i915_private_t *dev_priv = crtc->dev->dev_private;
  5971. struct intel_framebuffer *intel_fb;
  5972. intel_fb = to_intel_framebuffer(crtc->fb);
  5973. if (intel_fb && intel_fb->obj->active) {
  5974. /* The framebuffer is still being accessed by the GPU. */
  5975. mod_timer(&intel_crtc->idle_timer, jiffies +
  5976. msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
  5977. return;
  5978. }
  5979. intel_crtc->busy = false;
  5980. queue_work(dev_priv->wq, &dev_priv->idle_work);
  5981. }
  5982. static void intel_increase_pllclock(struct drm_crtc *crtc)
  5983. {
  5984. struct drm_device *dev = crtc->dev;
  5985. drm_i915_private_t *dev_priv = dev->dev_private;
  5986. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5987. int pipe = intel_crtc->pipe;
  5988. int dpll_reg = DPLL(pipe);
  5989. int dpll;
  5990. if (HAS_PCH_SPLIT(dev))
  5991. return;
  5992. if (!dev_priv->lvds_downclock_avail)
  5993. return;
  5994. dpll = I915_READ(dpll_reg);
  5995. if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
  5996. DRM_DEBUG_DRIVER("upclocking LVDS\n");
  5997. /* Unlock panel regs */
  5998. I915_WRITE(PP_CONTROL,
  5999. I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
  6000. dpll &= ~DISPLAY_RATE_SELECT_FPA1;
  6001. I915_WRITE(dpll_reg, dpll);
  6002. intel_wait_for_vblank(dev, pipe);
  6003. dpll = I915_READ(dpll_reg);
  6004. if (dpll & DISPLAY_RATE_SELECT_FPA1)
  6005. DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
  6006. /* ...and lock them again */
  6007. I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
  6008. }
  6009. /* Schedule downclock */
  6010. mod_timer(&intel_crtc->idle_timer, jiffies +
  6011. msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
  6012. }
  6013. static void intel_decrease_pllclock(struct drm_crtc *crtc)
  6014. {
  6015. struct drm_device *dev = crtc->dev;
  6016. drm_i915_private_t *dev_priv = dev->dev_private;
  6017. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  6018. int pipe = intel_crtc->pipe;
  6019. int dpll_reg = DPLL(pipe);
  6020. int dpll = I915_READ(dpll_reg);
  6021. if (HAS_PCH_SPLIT(dev))
  6022. return;
  6023. if (!dev_priv->lvds_downclock_avail)
  6024. return;
  6025. /*
  6026. * Since this is called by a timer, we should never get here in
  6027. * the manual case.
  6028. */
  6029. if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
  6030. DRM_DEBUG_DRIVER("downclocking LVDS\n");
  6031. /* Unlock panel regs */
  6032. I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
  6033. PANEL_UNLOCK_REGS);
  6034. dpll |= DISPLAY_RATE_SELECT_FPA1;
  6035. I915_WRITE(dpll_reg, dpll);
  6036. intel_wait_for_vblank(dev, pipe);
  6037. dpll = I915_READ(dpll_reg);
  6038. if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
  6039. DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
  6040. /* ...and lock them again */
  6041. I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
  6042. }
  6043. }
  6044. /**
  6045. * intel_idle_update - adjust clocks for idleness
  6046. * @work: work struct
  6047. *
  6048. * Either the GPU or display (or both) went idle. Check the busy status
  6049. * here and adjust the CRTC and GPU clocks as necessary.
  6050. */
  6051. static void intel_idle_update(struct work_struct *work)
  6052. {
  6053. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  6054. idle_work);
  6055. struct drm_device *dev = dev_priv->dev;
  6056. struct drm_crtc *crtc;
  6057. struct intel_crtc *intel_crtc;
  6058. if (!i915_powersave)
  6059. return;
  6060. mutex_lock(&dev->struct_mutex);
  6061. i915_update_gfx_val(dev_priv);
  6062. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  6063. /* Skip inactive CRTCs */
  6064. if (!crtc->fb)
  6065. continue;
  6066. intel_crtc = to_intel_crtc(crtc);
  6067. if (!intel_crtc->busy)
  6068. intel_decrease_pllclock(crtc);
  6069. }
  6070. mutex_unlock(&dev->struct_mutex);
  6071. }
  6072. /**
  6073. * intel_mark_busy - mark the GPU and possibly the display busy
  6074. * @dev: drm device
  6075. * @obj: object we're operating on
  6076. *
  6077. * Callers can use this function to indicate that the GPU is busy processing
  6078. * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
  6079. * buffer), we'll also mark the display as busy, so we know to increase its
  6080. * clock frequency.
  6081. */
  6082. void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
  6083. {
  6084. drm_i915_private_t *dev_priv = dev->dev_private;
  6085. struct drm_crtc *crtc = NULL;
  6086. struct intel_framebuffer *intel_fb;
  6087. struct intel_crtc *intel_crtc;
  6088. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  6089. return;
  6090. if (!dev_priv->busy)
  6091. dev_priv->busy = true;
  6092. else
  6093. mod_timer(&dev_priv->idle_timer, jiffies +
  6094. msecs_to_jiffies(GPU_IDLE_TIMEOUT));
  6095. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  6096. if (!crtc->fb)
  6097. continue;
  6098. intel_crtc = to_intel_crtc(crtc);
  6099. intel_fb = to_intel_framebuffer(crtc->fb);
  6100. if (intel_fb->obj == obj) {
  6101. if (!intel_crtc->busy) {
  6102. /* Non-busy -> busy, upclock */
  6103. intel_increase_pllclock(crtc);
  6104. intel_crtc->busy = true;
  6105. } else {
  6106. /* Busy -> busy, put off timer */
  6107. mod_timer(&intel_crtc->idle_timer, jiffies +
  6108. msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
  6109. }
  6110. }
  6111. }
  6112. }
  6113. static void intel_crtc_destroy(struct drm_crtc *crtc)
  6114. {
  6115. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  6116. struct drm_device *dev = crtc->dev;
  6117. struct intel_unpin_work *work;
  6118. unsigned long flags;
  6119. spin_lock_irqsave(&dev->event_lock, flags);
  6120. work = intel_crtc->unpin_work;
  6121. intel_crtc->unpin_work = NULL;
  6122. spin_unlock_irqrestore(&dev->event_lock, flags);
  6123. if (work) {
  6124. cancel_work_sync(&work->work);
  6125. kfree(work);
  6126. }
  6127. drm_crtc_cleanup(crtc);
  6128. kfree(intel_crtc);
  6129. }
  6130. static void intel_unpin_work_fn(struct work_struct *__work)
  6131. {
  6132. struct intel_unpin_work *work =
  6133. container_of(__work, struct intel_unpin_work, work);
  6134. mutex_lock(&work->dev->struct_mutex);
  6135. i915_gem_object_unpin(work->old_fb_obj);
  6136. drm_gem_object_unreference(&work->pending_flip_obj->base);
  6137. drm_gem_object_unreference(&work->old_fb_obj->base);
  6138. intel_update_fbc(work->dev);
  6139. mutex_unlock(&work->dev->struct_mutex);
  6140. kfree(work);
  6141. }
  6142. static void do_intel_finish_page_flip(struct drm_device *dev,
  6143. struct drm_crtc *crtc)
  6144. {
  6145. drm_i915_private_t *dev_priv = dev->dev_private;
  6146. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  6147. struct intel_unpin_work *work;
  6148. struct drm_i915_gem_object *obj;
  6149. struct drm_pending_vblank_event *e;
  6150. struct timeval tnow, tvbl;
  6151. unsigned long flags;
  6152. /* Ignore early vblank irqs */
  6153. if (intel_crtc == NULL)
  6154. return;
  6155. do_gettimeofday(&tnow);
  6156. spin_lock_irqsave(&dev->event_lock, flags);
  6157. work = intel_crtc->unpin_work;
  6158. if (work == NULL || !work->pending) {
  6159. spin_unlock_irqrestore(&dev->event_lock, flags);
  6160. return;
  6161. }
  6162. intel_crtc->unpin_work = NULL;
  6163. if (work->event) {
  6164. e = work->event;
  6165. e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
  6166. /* Called before vblank count and timestamps have
  6167. * been updated for the vblank interval of flip
  6168. * completion? Need to increment vblank count and
  6169. * add one videorefresh duration to returned timestamp
  6170. * to account for this. We assume this happened if we
  6171. * get called over 0.9 frame durations after the last
  6172. * timestamped vblank.
  6173. *
  6174. * This calculation can not be used with vrefresh rates
  6175. * below 5Hz (10Hz to be on the safe side) without
  6176. * promoting to 64 integers.
  6177. */
  6178. if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
  6179. 9 * crtc->framedur_ns) {
  6180. e->event.sequence++;
  6181. tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
  6182. crtc->framedur_ns);
  6183. }
  6184. e->event.tv_sec = tvbl.tv_sec;
  6185. e->event.tv_usec = tvbl.tv_usec;
  6186. list_add_tail(&e->base.link,
  6187. &e->base.file_priv->event_list);
  6188. wake_up_interruptible(&e->base.file_priv->event_wait);
  6189. }
  6190. drm_vblank_put(dev, intel_crtc->pipe);
  6191. spin_unlock_irqrestore(&dev->event_lock, flags);
  6192. obj = work->old_fb_obj;
  6193. atomic_clear_mask(1 << intel_crtc->plane,
  6194. &obj->pending_flip.counter);
  6195. if (atomic_read(&obj->pending_flip) == 0)
  6196. wake_up(&dev_priv->pending_flip_queue);
  6197. schedule_work(&work->work);
  6198. trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
  6199. }
  6200. void intel_finish_page_flip(struct drm_device *dev, int pipe)
  6201. {
  6202. drm_i915_private_t *dev_priv = dev->dev_private;
  6203. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  6204. do_intel_finish_page_flip(dev, crtc);
  6205. }
  6206. void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
  6207. {
  6208. drm_i915_private_t *dev_priv = dev->dev_private;
  6209. struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
  6210. do_intel_finish_page_flip(dev, crtc);
  6211. }
  6212. void intel_prepare_page_flip(struct drm_device *dev, int plane)
  6213. {
  6214. drm_i915_private_t *dev_priv = dev->dev_private;
  6215. struct intel_crtc *intel_crtc =
  6216. to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
  6217. unsigned long flags;
  6218. spin_lock_irqsave(&dev->event_lock, flags);
  6219. if (intel_crtc->unpin_work) {
  6220. if ((++intel_crtc->unpin_work->pending) > 1)
  6221. DRM_ERROR("Prepared flip multiple times\n");
  6222. } else {
  6223. DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
  6224. }
  6225. spin_unlock_irqrestore(&dev->event_lock, flags);
  6226. }
  6227. static int intel_gen2_queue_flip(struct drm_device *dev,
  6228. struct drm_crtc *crtc,
  6229. struct drm_framebuffer *fb,
  6230. struct drm_i915_gem_object *obj)
  6231. {
  6232. struct drm_i915_private *dev_priv = dev->dev_private;
  6233. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  6234. unsigned long offset;
  6235. u32 flip_mask;
  6236. int ret;
  6237. ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
  6238. if (ret)
  6239. goto out;
  6240. /* Offset into the new buffer for cases of shared fbs between CRTCs */
  6241. offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
  6242. ret = BEGIN_LP_RING(6);
  6243. if (ret)
  6244. goto out;
  6245. /* Can't queue multiple flips, so wait for the previous
  6246. * one to finish before executing the next.
  6247. */
  6248. if (intel_crtc->plane)
  6249. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  6250. else
  6251. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  6252. OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
  6253. OUT_RING(MI_NOOP);
  6254. OUT_RING(MI_DISPLAY_FLIP |
  6255. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  6256. OUT_RING(fb->pitches[0]);
  6257. OUT_RING(obj->gtt_offset + offset);
  6258. OUT_RING(MI_NOOP);
  6259. ADVANCE_LP_RING();
  6260. out:
  6261. return ret;
  6262. }
  6263. static int intel_gen3_queue_flip(struct drm_device *dev,
  6264. struct drm_crtc *crtc,
  6265. struct drm_framebuffer *fb,
  6266. struct drm_i915_gem_object *obj)
  6267. {
  6268. struct drm_i915_private *dev_priv = dev->dev_private;
  6269. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  6270. unsigned long offset;
  6271. u32 flip_mask;
  6272. int ret;
  6273. ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
  6274. if (ret)
  6275. goto out;
  6276. /* Offset into the new buffer for cases of shared fbs between CRTCs */
  6277. offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
  6278. ret = BEGIN_LP_RING(6);
  6279. if (ret)
  6280. goto out;
  6281. if (intel_crtc->plane)
  6282. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  6283. else
  6284. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  6285. OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
  6286. OUT_RING(MI_NOOP);
  6287. OUT_RING(MI_DISPLAY_FLIP_I915 |
  6288. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  6289. OUT_RING(fb->pitches[0]);
  6290. OUT_RING(obj->gtt_offset + offset);
  6291. OUT_RING(MI_NOOP);
  6292. ADVANCE_LP_RING();
  6293. out:
  6294. return ret;
  6295. }
  6296. static int intel_gen4_queue_flip(struct drm_device *dev,
  6297. struct drm_crtc *crtc,
  6298. struct drm_framebuffer *fb,
  6299. struct drm_i915_gem_object *obj)
  6300. {
  6301. struct drm_i915_private *dev_priv = dev->dev_private;
  6302. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  6303. uint32_t pf, pipesrc;
  6304. int ret;
  6305. ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
  6306. if (ret)
  6307. goto out;
  6308. ret = BEGIN_LP_RING(4);
  6309. if (ret)
  6310. goto out;
  6311. /* i965+ uses the linear or tiled offsets from the
  6312. * Display Registers (which do not change across a page-flip)
  6313. * so we need only reprogram the base address.
  6314. */
  6315. OUT_RING(MI_DISPLAY_FLIP |
  6316. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  6317. OUT_RING(fb->pitches[0]);
  6318. OUT_RING(obj->gtt_offset | obj->tiling_mode);
  6319. /* XXX Enabling the panel-fitter across page-flip is so far
  6320. * untested on non-native modes, so ignore it for now.
  6321. * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
  6322. */
  6323. pf = 0;
  6324. pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
  6325. OUT_RING(pf | pipesrc);
  6326. ADVANCE_LP_RING();
  6327. out:
  6328. return ret;
  6329. }
  6330. static int intel_gen6_queue_flip(struct drm_device *dev,
  6331. struct drm_crtc *crtc,
  6332. struct drm_framebuffer *fb,
  6333. struct drm_i915_gem_object *obj)
  6334. {
  6335. struct drm_i915_private *dev_priv = dev->dev_private;
  6336. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  6337. uint32_t pf, pipesrc;
  6338. int ret;
  6339. ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
  6340. if (ret)
  6341. goto out;
  6342. ret = BEGIN_LP_RING(4);
  6343. if (ret)
  6344. goto out;
  6345. OUT_RING(MI_DISPLAY_FLIP |
  6346. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  6347. OUT_RING(fb->pitches[0] | obj->tiling_mode);
  6348. OUT_RING(obj->gtt_offset);
  6349. pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
  6350. pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
  6351. OUT_RING(pf | pipesrc);
  6352. ADVANCE_LP_RING();
  6353. out:
  6354. return ret;
  6355. }
  6356. /*
  6357. * On gen7 we currently use the blit ring because (in early silicon at least)
  6358. * the render ring doesn't give us interrpts for page flip completion, which
  6359. * means clients will hang after the first flip is queued. Fortunately the
  6360. * blit ring generates interrupts properly, so use it instead.
  6361. */
  6362. static int intel_gen7_queue_flip(struct drm_device *dev,
  6363. struct drm_crtc *crtc,
  6364. struct drm_framebuffer *fb,
  6365. struct drm_i915_gem_object *obj)
  6366. {
  6367. struct drm_i915_private *dev_priv = dev->dev_private;
  6368. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  6369. struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
  6370. int ret;
  6371. ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
  6372. if (ret)
  6373. goto out;
  6374. ret = intel_ring_begin(ring, 4);
  6375. if (ret)
  6376. goto out;
  6377. intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
  6378. intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
  6379. intel_ring_emit(ring, (obj->gtt_offset));
  6380. intel_ring_emit(ring, (MI_NOOP));
  6381. intel_ring_advance(ring);
  6382. out:
  6383. return ret;
  6384. }
  6385. static int intel_default_queue_flip(struct drm_device *dev,
  6386. struct drm_crtc *crtc,
  6387. struct drm_framebuffer *fb,
  6388. struct drm_i915_gem_object *obj)
  6389. {
  6390. return -ENODEV;
  6391. }
  6392. static int intel_crtc_page_flip(struct drm_crtc *crtc,
  6393. struct drm_framebuffer *fb,
  6394. struct drm_pending_vblank_event *event)
  6395. {
  6396. struct drm_device *dev = crtc->dev;
  6397. struct drm_i915_private *dev_priv = dev->dev_private;
  6398. struct intel_framebuffer *intel_fb;
  6399. struct drm_i915_gem_object *obj;
  6400. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  6401. struct intel_unpin_work *work;
  6402. unsigned long flags;
  6403. int ret;
  6404. work = kzalloc(sizeof *work, GFP_KERNEL);
  6405. if (work == NULL)
  6406. return -ENOMEM;
  6407. work->event = event;
  6408. work->dev = crtc->dev;
  6409. intel_fb = to_intel_framebuffer(crtc->fb);
  6410. work->old_fb_obj = intel_fb->obj;
  6411. INIT_WORK(&work->work, intel_unpin_work_fn);
  6412. ret = drm_vblank_get(dev, intel_crtc->pipe);
  6413. if (ret)
  6414. goto free_work;
  6415. /* We borrow the event spin lock for protecting unpin_work */
  6416. spin_lock_irqsave(&dev->event_lock, flags);
  6417. if (intel_crtc->unpin_work) {
  6418. spin_unlock_irqrestore(&dev->event_lock, flags);
  6419. kfree(work);
  6420. drm_vblank_put(dev, intel_crtc->pipe);
  6421. DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
  6422. return -EBUSY;
  6423. }
  6424. intel_crtc->unpin_work = work;
  6425. spin_unlock_irqrestore(&dev->event_lock, flags);
  6426. intel_fb = to_intel_framebuffer(fb);
  6427. obj = intel_fb->obj;
  6428. mutex_lock(&dev->struct_mutex);
  6429. /* Reference the objects for the scheduled work. */
  6430. drm_gem_object_reference(&work->old_fb_obj->base);
  6431. drm_gem_object_reference(&obj->base);
  6432. crtc->fb = fb;
  6433. work->pending_flip_obj = obj;
  6434. work->enable_stall_check = true;
  6435. /* Block clients from rendering to the new back buffer until
  6436. * the flip occurs and the object is no longer visible.
  6437. */
  6438. atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
  6439. ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
  6440. if (ret)
  6441. goto cleanup_pending;
  6442. intel_disable_fbc(dev);
  6443. mutex_unlock(&dev->struct_mutex);
  6444. trace_i915_flip_request(intel_crtc->plane, obj);
  6445. return 0;
  6446. cleanup_pending:
  6447. atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
  6448. drm_gem_object_unreference(&work->old_fb_obj->base);
  6449. drm_gem_object_unreference(&obj->base);
  6450. mutex_unlock(&dev->struct_mutex);
  6451. spin_lock_irqsave(&dev->event_lock, flags);
  6452. intel_crtc->unpin_work = NULL;
  6453. spin_unlock_irqrestore(&dev->event_lock, flags);
  6454. drm_vblank_put(dev, intel_crtc->pipe);
  6455. free_work:
  6456. kfree(work);
  6457. return ret;
  6458. }
  6459. static void intel_sanitize_modesetting(struct drm_device *dev,
  6460. int pipe, int plane)
  6461. {
  6462. struct drm_i915_private *dev_priv = dev->dev_private;
  6463. u32 reg, val;
  6464. if (HAS_PCH_SPLIT(dev))
  6465. return;
  6466. /* Who knows what state these registers were left in by the BIOS or
  6467. * grub?
  6468. *
  6469. * If we leave the registers in a conflicting state (e.g. with the
  6470. * display plane reading from the other pipe than the one we intend
  6471. * to use) then when we attempt to teardown the active mode, we will
  6472. * not disable the pipes and planes in the correct order -- leaving
  6473. * a plane reading from a disabled pipe and possibly leading to
  6474. * undefined behaviour.
  6475. */
  6476. reg = DSPCNTR(plane);
  6477. val = I915_READ(reg);
  6478. if ((val & DISPLAY_PLANE_ENABLE) == 0)
  6479. return;
  6480. if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
  6481. return;
  6482. /* This display plane is active and attached to the other CPU pipe. */
  6483. pipe = !pipe;
  6484. /* Disable the plane and wait for it to stop reading from the pipe. */
  6485. intel_disable_plane(dev_priv, plane, pipe);
  6486. intel_disable_pipe(dev_priv, pipe);
  6487. }
  6488. static void intel_crtc_reset(struct drm_crtc *crtc)
  6489. {
  6490. struct drm_device *dev = crtc->dev;
  6491. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  6492. /* Reset flags back to the 'unknown' status so that they
  6493. * will be correctly set on the initial modeset.
  6494. */
  6495. intel_crtc->dpms_mode = -1;
  6496. /* We need to fix up any BIOS configuration that conflicts with
  6497. * our expectations.
  6498. */
  6499. intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
  6500. }
  6501. static struct drm_crtc_helper_funcs intel_helper_funcs = {
  6502. .dpms = intel_crtc_dpms,
  6503. .mode_fixup = intel_crtc_mode_fixup,
  6504. .mode_set = intel_crtc_mode_set,
  6505. .mode_set_base = intel_pipe_set_base,
  6506. .mode_set_base_atomic = intel_pipe_set_base_atomic,
  6507. .load_lut = intel_crtc_load_lut,
  6508. .disable = intel_crtc_disable,
  6509. };
  6510. static const struct drm_crtc_funcs intel_crtc_funcs = {
  6511. .reset = intel_crtc_reset,
  6512. .cursor_set = intel_crtc_cursor_set,
  6513. .cursor_move = intel_crtc_cursor_move,
  6514. .gamma_set = intel_crtc_gamma_set,
  6515. .set_config = drm_crtc_helper_set_config,
  6516. .destroy = intel_crtc_destroy,
  6517. .page_flip = intel_crtc_page_flip,
  6518. };
  6519. static void intel_crtc_init(struct drm_device *dev, int pipe)
  6520. {
  6521. drm_i915_private_t *dev_priv = dev->dev_private;
  6522. struct intel_crtc *intel_crtc;
  6523. int i;
  6524. intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
  6525. if (intel_crtc == NULL)
  6526. return;
  6527. drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
  6528. drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
  6529. for (i = 0; i < 256; i++) {
  6530. intel_crtc->lut_r[i] = i;
  6531. intel_crtc->lut_g[i] = i;
  6532. intel_crtc->lut_b[i] = i;
  6533. }
  6534. /* Swap pipes & planes for FBC on pre-965 */
  6535. intel_crtc->pipe = pipe;
  6536. intel_crtc->plane = pipe;
  6537. if (IS_MOBILE(dev) && IS_GEN3(dev)) {
  6538. DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
  6539. intel_crtc->plane = !pipe;
  6540. }
  6541. BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
  6542. dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
  6543. dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
  6544. dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
  6545. intel_crtc_reset(&intel_crtc->base);
  6546. intel_crtc->active = true; /* force the pipe off on setup_init_config */
  6547. intel_crtc->bpp = 24; /* default for pre-Ironlake */
  6548. if (HAS_PCH_SPLIT(dev)) {
  6549. if (pipe == 2 && IS_IVYBRIDGE(dev))
  6550. intel_crtc->no_pll = true;
  6551. intel_helper_funcs.prepare = ironlake_crtc_prepare;
  6552. intel_helper_funcs.commit = ironlake_crtc_commit;
  6553. } else {
  6554. intel_helper_funcs.prepare = i9xx_crtc_prepare;
  6555. intel_helper_funcs.commit = i9xx_crtc_commit;
  6556. }
  6557. drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
  6558. intel_crtc->busy = false;
  6559. setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
  6560. (unsigned long)intel_crtc);
  6561. }
  6562. int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
  6563. struct drm_file *file)
  6564. {
  6565. drm_i915_private_t *dev_priv = dev->dev_private;
  6566. struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
  6567. struct drm_mode_object *drmmode_obj;
  6568. struct intel_crtc *crtc;
  6569. if (!dev_priv) {
  6570. DRM_ERROR("called with no initialization\n");
  6571. return -EINVAL;
  6572. }
  6573. drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
  6574. DRM_MODE_OBJECT_CRTC);
  6575. if (!drmmode_obj) {
  6576. DRM_ERROR("no such CRTC id\n");
  6577. return -EINVAL;
  6578. }
  6579. crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
  6580. pipe_from_crtc_id->pipe = crtc->pipe;
  6581. return 0;
  6582. }
  6583. static int intel_encoder_clones(struct drm_device *dev, int type_mask)
  6584. {
  6585. struct intel_encoder *encoder;
  6586. int index_mask = 0;
  6587. int entry = 0;
  6588. list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
  6589. if (type_mask & encoder->clone_mask)
  6590. index_mask |= (1 << entry);
  6591. entry++;
  6592. }
  6593. return index_mask;
  6594. }
  6595. static bool has_edp_a(struct drm_device *dev)
  6596. {
  6597. struct drm_i915_private *dev_priv = dev->dev_private;
  6598. if (!IS_MOBILE(dev))
  6599. return false;
  6600. if ((I915_READ(DP_A) & DP_DETECTED) == 0)
  6601. return false;
  6602. if (IS_GEN5(dev) &&
  6603. (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
  6604. return false;
  6605. return true;
  6606. }
  6607. static void intel_setup_outputs(struct drm_device *dev)
  6608. {
  6609. struct drm_i915_private *dev_priv = dev->dev_private;
  6610. struct intel_encoder *encoder;
  6611. bool dpd_is_edp = false;
  6612. bool has_lvds = false;
  6613. if (IS_MOBILE(dev) && !IS_I830(dev))
  6614. has_lvds = intel_lvds_init(dev);
  6615. if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
  6616. /* disable the panel fitter on everything but LVDS */
  6617. I915_WRITE(PFIT_CONTROL, 0);
  6618. }
  6619. if (HAS_PCH_SPLIT(dev)) {
  6620. dpd_is_edp = intel_dpd_is_edp(dev);
  6621. if (has_edp_a(dev))
  6622. intel_dp_init(dev, DP_A);
  6623. if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
  6624. intel_dp_init(dev, PCH_DP_D);
  6625. }
  6626. intel_crt_init(dev);
  6627. if (HAS_PCH_SPLIT(dev)) {
  6628. int found;
  6629. if (I915_READ(HDMIB) & PORT_DETECTED) {
  6630. /* PCH SDVOB multiplex with HDMIB */
  6631. found = intel_sdvo_init(dev, PCH_SDVOB);
  6632. if (!found)
  6633. intel_hdmi_init(dev, HDMIB);
  6634. if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
  6635. intel_dp_init(dev, PCH_DP_B);
  6636. }
  6637. if (I915_READ(HDMIC) & PORT_DETECTED)
  6638. intel_hdmi_init(dev, HDMIC);
  6639. if (I915_READ(HDMID) & PORT_DETECTED)
  6640. intel_hdmi_init(dev, HDMID);
  6641. if (I915_READ(PCH_DP_C) & DP_DETECTED)
  6642. intel_dp_init(dev, PCH_DP_C);
  6643. if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
  6644. intel_dp_init(dev, PCH_DP_D);
  6645. } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
  6646. bool found = false;
  6647. if (I915_READ(SDVOB) & SDVO_DETECTED) {
  6648. DRM_DEBUG_KMS("probing SDVOB\n");
  6649. found = intel_sdvo_init(dev, SDVOB);
  6650. if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
  6651. DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
  6652. intel_hdmi_init(dev, SDVOB);
  6653. }
  6654. if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
  6655. DRM_DEBUG_KMS("probing DP_B\n");
  6656. intel_dp_init(dev, DP_B);
  6657. }
  6658. }
  6659. /* Before G4X SDVOC doesn't have its own detect register */
  6660. if (I915_READ(SDVOB) & SDVO_DETECTED) {
  6661. DRM_DEBUG_KMS("probing SDVOC\n");
  6662. found = intel_sdvo_init(dev, SDVOC);
  6663. }
  6664. if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
  6665. if (SUPPORTS_INTEGRATED_HDMI(dev)) {
  6666. DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
  6667. intel_hdmi_init(dev, SDVOC);
  6668. }
  6669. if (SUPPORTS_INTEGRATED_DP(dev)) {
  6670. DRM_DEBUG_KMS("probing DP_C\n");
  6671. intel_dp_init(dev, DP_C);
  6672. }
  6673. }
  6674. if (SUPPORTS_INTEGRATED_DP(dev) &&
  6675. (I915_READ(DP_D) & DP_DETECTED)) {
  6676. DRM_DEBUG_KMS("probing DP_D\n");
  6677. intel_dp_init(dev, DP_D);
  6678. }
  6679. } else if (IS_GEN2(dev))
  6680. intel_dvo_init(dev);
  6681. if (SUPPORTS_TV(dev))
  6682. intel_tv_init(dev);
  6683. list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
  6684. encoder->base.possible_crtcs = encoder->crtc_mask;
  6685. encoder->base.possible_clones =
  6686. intel_encoder_clones(dev, encoder->clone_mask);
  6687. }
  6688. /* disable all the possible outputs/crtcs before entering KMS mode */
  6689. drm_helper_disable_unused_functions(dev);
  6690. if (HAS_PCH_SPLIT(dev))
  6691. ironlake_init_pch_refclk(dev);
  6692. }
  6693. static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
  6694. {
  6695. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  6696. drm_framebuffer_cleanup(fb);
  6697. drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
  6698. kfree(intel_fb);
  6699. }
  6700. static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
  6701. struct drm_file *file,
  6702. unsigned int *handle)
  6703. {
  6704. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  6705. struct drm_i915_gem_object *obj = intel_fb->obj;
  6706. return drm_gem_handle_create(file, &obj->base, handle);
  6707. }
  6708. static const struct drm_framebuffer_funcs intel_fb_funcs = {
  6709. .destroy = intel_user_framebuffer_destroy,
  6710. .create_handle = intel_user_framebuffer_create_handle,
  6711. };
  6712. int intel_framebuffer_init(struct drm_device *dev,
  6713. struct intel_framebuffer *intel_fb,
  6714. struct drm_mode_fb_cmd2 *mode_cmd,
  6715. struct drm_i915_gem_object *obj)
  6716. {
  6717. int ret;
  6718. if (obj->tiling_mode == I915_TILING_Y)
  6719. return -EINVAL;
  6720. if (mode_cmd->pitches[0] & 63)
  6721. return -EINVAL;
  6722. switch (mode_cmd->pixel_format) {
  6723. case DRM_FORMAT_RGB332:
  6724. case DRM_FORMAT_RGB565:
  6725. case DRM_FORMAT_XRGB8888:
  6726. case DRM_FORMAT_ARGB8888:
  6727. case DRM_FORMAT_XRGB2101010:
  6728. case DRM_FORMAT_ARGB2101010:
  6729. /* RGB formats are common across chipsets */
  6730. break;
  6731. case DRM_FORMAT_YUYV:
  6732. case DRM_FORMAT_UYVY:
  6733. case DRM_FORMAT_YVYU:
  6734. case DRM_FORMAT_VYUY:
  6735. break;
  6736. default:
  6737. DRM_ERROR("unsupported pixel format\n");
  6738. return -EINVAL;
  6739. }
  6740. ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
  6741. if (ret) {
  6742. DRM_ERROR("framebuffer init failed %d\n", ret);
  6743. return ret;
  6744. }
  6745. drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
  6746. intel_fb->obj = obj;
  6747. return 0;
  6748. }
  6749. static struct drm_framebuffer *
  6750. intel_user_framebuffer_create(struct drm_device *dev,
  6751. struct drm_file *filp,
  6752. struct drm_mode_fb_cmd2 *mode_cmd)
  6753. {
  6754. struct drm_i915_gem_object *obj;
  6755. obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
  6756. mode_cmd->handles[0]));
  6757. if (&obj->base == NULL)
  6758. return ERR_PTR(-ENOENT);
  6759. return intel_framebuffer_create(dev, mode_cmd, obj);
  6760. }
  6761. static const struct drm_mode_config_funcs intel_mode_funcs = {
  6762. .fb_create = intel_user_framebuffer_create,
  6763. .output_poll_changed = intel_fb_output_poll_changed,
  6764. };
  6765. static struct drm_i915_gem_object *
  6766. intel_alloc_context_page(struct drm_device *dev)
  6767. {
  6768. struct drm_i915_gem_object *ctx;
  6769. int ret;
  6770. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  6771. ctx = i915_gem_alloc_object(dev, 4096);
  6772. if (!ctx) {
  6773. DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
  6774. return NULL;
  6775. }
  6776. ret = i915_gem_object_pin(ctx, 4096, true);
  6777. if (ret) {
  6778. DRM_ERROR("failed to pin power context: %d\n", ret);
  6779. goto err_unref;
  6780. }
  6781. ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
  6782. if (ret) {
  6783. DRM_ERROR("failed to set-domain on power context: %d\n", ret);
  6784. goto err_unpin;
  6785. }
  6786. return ctx;
  6787. err_unpin:
  6788. i915_gem_object_unpin(ctx);
  6789. err_unref:
  6790. drm_gem_object_unreference(&ctx->base);
  6791. mutex_unlock(&dev->struct_mutex);
  6792. return NULL;
  6793. }
  6794. bool ironlake_set_drps(struct drm_device *dev, u8 val)
  6795. {
  6796. struct drm_i915_private *dev_priv = dev->dev_private;
  6797. u16 rgvswctl;
  6798. rgvswctl = I915_READ16(MEMSWCTL);
  6799. if (rgvswctl & MEMCTL_CMD_STS) {
  6800. DRM_DEBUG("gpu busy, RCS change rejected\n");
  6801. return false; /* still busy with another command */
  6802. }
  6803. rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
  6804. (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
  6805. I915_WRITE16(MEMSWCTL, rgvswctl);
  6806. POSTING_READ16(MEMSWCTL);
  6807. rgvswctl |= MEMCTL_CMD_STS;
  6808. I915_WRITE16(MEMSWCTL, rgvswctl);
  6809. return true;
  6810. }
  6811. void ironlake_enable_drps(struct drm_device *dev)
  6812. {
  6813. struct drm_i915_private *dev_priv = dev->dev_private;
  6814. u32 rgvmodectl = I915_READ(MEMMODECTL);
  6815. u8 fmax, fmin, fstart, vstart;
  6816. /* Enable temp reporting */
  6817. I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
  6818. I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
  6819. /* 100ms RC evaluation intervals */
  6820. I915_WRITE(RCUPEI, 100000);
  6821. I915_WRITE(RCDNEI, 100000);
  6822. /* Set max/min thresholds to 90ms and 80ms respectively */
  6823. I915_WRITE(RCBMAXAVG, 90000);
  6824. I915_WRITE(RCBMINAVG, 80000);
  6825. I915_WRITE(MEMIHYST, 1);
  6826. /* Set up min, max, and cur for interrupt handling */
  6827. fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
  6828. fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
  6829. fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
  6830. MEMMODE_FSTART_SHIFT;
  6831. vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
  6832. PXVFREQ_PX_SHIFT;
  6833. dev_priv->fmax = fmax; /* IPS callback will increase this */
  6834. dev_priv->fstart = fstart;
  6835. dev_priv->max_delay = fstart;
  6836. dev_priv->min_delay = fmin;
  6837. dev_priv->cur_delay = fstart;
  6838. DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
  6839. fmax, fmin, fstart);
  6840. I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
  6841. /*
  6842. * Interrupts will be enabled in ironlake_irq_postinstall
  6843. */
  6844. I915_WRITE(VIDSTART, vstart);
  6845. POSTING_READ(VIDSTART);
  6846. rgvmodectl |= MEMMODE_SWMODE_EN;
  6847. I915_WRITE(MEMMODECTL, rgvmodectl);
  6848. if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
  6849. DRM_ERROR("stuck trying to change perf mode\n");
  6850. msleep(1);
  6851. ironlake_set_drps(dev, fstart);
  6852. dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
  6853. I915_READ(0x112e0);
  6854. dev_priv->last_time1 = jiffies_to_msecs(jiffies);
  6855. dev_priv->last_count2 = I915_READ(0x112f4);
  6856. getrawmonotonic(&dev_priv->last_time2);
  6857. }
  6858. void ironlake_disable_drps(struct drm_device *dev)
  6859. {
  6860. struct drm_i915_private *dev_priv = dev->dev_private;
  6861. u16 rgvswctl = I915_READ16(MEMSWCTL);
  6862. /* Ack interrupts, disable EFC interrupt */
  6863. I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
  6864. I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
  6865. I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
  6866. I915_WRITE(DEIIR, DE_PCU_EVENT);
  6867. I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
  6868. /* Go back to the starting frequency */
  6869. ironlake_set_drps(dev, dev_priv->fstart);
  6870. msleep(1);
  6871. rgvswctl |= MEMCTL_CMD_STS;
  6872. I915_WRITE(MEMSWCTL, rgvswctl);
  6873. msleep(1);
  6874. }
  6875. void gen6_set_rps(struct drm_device *dev, u8 val)
  6876. {
  6877. struct drm_i915_private *dev_priv = dev->dev_private;
  6878. u32 swreq;
  6879. swreq = (val & 0x3ff) << 25;
  6880. I915_WRITE(GEN6_RPNSWREQ, swreq);
  6881. }
  6882. void gen6_disable_rps(struct drm_device *dev)
  6883. {
  6884. struct drm_i915_private *dev_priv = dev->dev_private;
  6885. I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
  6886. I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
  6887. I915_WRITE(GEN6_PMIER, 0);
  6888. /* Complete PM interrupt masking here doesn't race with the rps work
  6889. * item again unmasking PM interrupts because that is using a different
  6890. * register (PMIMR) to mask PM interrupts. The only risk is in leaving
  6891. * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
  6892. spin_lock_irq(&dev_priv->rps_lock);
  6893. dev_priv->pm_iir = 0;
  6894. spin_unlock_irq(&dev_priv->rps_lock);
  6895. I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
  6896. }
  6897. static unsigned long intel_pxfreq(u32 vidfreq)
  6898. {
  6899. unsigned long freq;
  6900. int div = (vidfreq & 0x3f0000) >> 16;
  6901. int post = (vidfreq & 0x3000) >> 12;
  6902. int pre = (vidfreq & 0x7);
  6903. if (!pre)
  6904. return 0;
  6905. freq = ((div * 133333) / ((1<<post) * pre));
  6906. return freq;
  6907. }
  6908. void intel_init_emon(struct drm_device *dev)
  6909. {
  6910. struct drm_i915_private *dev_priv = dev->dev_private;
  6911. u32 lcfuse;
  6912. u8 pxw[16];
  6913. int i;
  6914. /* Disable to program */
  6915. I915_WRITE(ECR, 0);
  6916. POSTING_READ(ECR);
  6917. /* Program energy weights for various events */
  6918. I915_WRITE(SDEW, 0x15040d00);
  6919. I915_WRITE(CSIEW0, 0x007f0000);
  6920. I915_WRITE(CSIEW1, 0x1e220004);
  6921. I915_WRITE(CSIEW2, 0x04000004);
  6922. for (i = 0; i < 5; i++)
  6923. I915_WRITE(PEW + (i * 4), 0);
  6924. for (i = 0; i < 3; i++)
  6925. I915_WRITE(DEW + (i * 4), 0);
  6926. /* Program P-state weights to account for frequency power adjustment */
  6927. for (i = 0; i < 16; i++) {
  6928. u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
  6929. unsigned long freq = intel_pxfreq(pxvidfreq);
  6930. unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
  6931. PXVFREQ_PX_SHIFT;
  6932. unsigned long val;
  6933. val = vid * vid;
  6934. val *= (freq / 1000);
  6935. val *= 255;
  6936. val /= (127*127*900);
  6937. if (val > 0xff)
  6938. DRM_ERROR("bad pxval: %ld\n", val);
  6939. pxw[i] = val;
  6940. }
  6941. /* Render standby states get 0 weight */
  6942. pxw[14] = 0;
  6943. pxw[15] = 0;
  6944. for (i = 0; i < 4; i++) {
  6945. u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
  6946. (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
  6947. I915_WRITE(PXW + (i * 4), val);
  6948. }
  6949. /* Adjust magic regs to magic values (more experimental results) */
  6950. I915_WRITE(OGW0, 0);
  6951. I915_WRITE(OGW1, 0);
  6952. I915_WRITE(EG0, 0x00007f00);
  6953. I915_WRITE(EG1, 0x0000000e);
  6954. I915_WRITE(EG2, 0x000e0000);
  6955. I915_WRITE(EG3, 0x68000300);
  6956. I915_WRITE(EG4, 0x42000000);
  6957. I915_WRITE(EG5, 0x00140031);
  6958. I915_WRITE(EG6, 0);
  6959. I915_WRITE(EG7, 0);
  6960. for (i = 0; i < 8; i++)
  6961. I915_WRITE(PXWL + (i * 4), 0);
  6962. /* Enable PMON + select events */
  6963. I915_WRITE(ECR, 0x80000019);
  6964. lcfuse = I915_READ(LCFUSE02);
  6965. dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
  6966. }
  6967. static bool intel_enable_rc6(struct drm_device *dev)
  6968. {
  6969. /*
  6970. * Respect the kernel parameter if it is set
  6971. */
  6972. if (i915_enable_rc6 >= 0)
  6973. return i915_enable_rc6;
  6974. /*
  6975. * Disable RC6 on Ironlake
  6976. */
  6977. if (INTEL_INFO(dev)->gen == 5)
  6978. return 0;
  6979. /*
  6980. * Enable rc6 on Sandybridge if DMA remapping is disabled
  6981. */
  6982. if (INTEL_INFO(dev)->gen == 6) {
  6983. DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
  6984. intel_iommu_enabled ? "true" : "false",
  6985. !intel_iommu_enabled ? "en" : "dis");
  6986. return !intel_iommu_enabled;
  6987. }
  6988. DRM_DEBUG_DRIVER("RC6 enabled\n");
  6989. return 1;
  6990. }
  6991. void gen6_enable_rps(struct drm_i915_private *dev_priv)
  6992. {
  6993. u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  6994. u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
  6995. u32 pcu_mbox, rc6_mask = 0;
  6996. int cur_freq, min_freq, max_freq;
  6997. int i;
  6998. /* Here begins a magic sequence of register writes to enable
  6999. * auto-downclocking.
  7000. *
  7001. * Perhaps there might be some value in exposing these to
  7002. * userspace...
  7003. */
  7004. I915_WRITE(GEN6_RC_STATE, 0);
  7005. mutex_lock(&dev_priv->dev->struct_mutex);
  7006. gen6_gt_force_wake_get(dev_priv);
  7007. /* disable the counters and set deterministic thresholds */
  7008. I915_WRITE(GEN6_RC_CONTROL, 0);
  7009. I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
  7010. I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
  7011. I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
  7012. I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
  7013. I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
  7014. for (i = 0; i < I915_NUM_RINGS; i++)
  7015. I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
  7016. I915_WRITE(GEN6_RC_SLEEP, 0);
  7017. I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
  7018. I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
  7019. I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
  7020. I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
  7021. if (intel_enable_rc6(dev_priv->dev))
  7022. rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
  7023. GEN6_RC_CTL_RC6_ENABLE;
  7024. I915_WRITE(GEN6_RC_CONTROL,
  7025. rc6_mask |
  7026. GEN6_RC_CTL_EI_MODE(1) |
  7027. GEN6_RC_CTL_HW_ENABLE);
  7028. I915_WRITE(GEN6_RPNSWREQ,
  7029. GEN6_FREQUENCY(10) |
  7030. GEN6_OFFSET(0) |
  7031. GEN6_AGGRESSIVE_TURBO);
  7032. I915_WRITE(GEN6_RC_VIDEO_FREQ,
  7033. GEN6_FREQUENCY(12));
  7034. I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
  7035. I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
  7036. 18 << 24 |
  7037. 6 << 16);
  7038. I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
  7039. I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
  7040. I915_WRITE(GEN6_RP_UP_EI, 100000);
  7041. I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
  7042. I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
  7043. I915_WRITE(GEN6_RP_CONTROL,
  7044. GEN6_RP_MEDIA_TURBO |
  7045. GEN6_RP_MEDIA_HW_MODE |
  7046. GEN6_RP_MEDIA_IS_GFX |
  7047. GEN6_RP_ENABLE |
  7048. GEN6_RP_UP_BUSY_AVG |
  7049. GEN6_RP_DOWN_IDLE_CONT);
  7050. if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
  7051. 500))
  7052. DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
  7053. I915_WRITE(GEN6_PCODE_DATA, 0);
  7054. I915_WRITE(GEN6_PCODE_MAILBOX,
  7055. GEN6_PCODE_READY |
  7056. GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
  7057. if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
  7058. 500))
  7059. DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
  7060. min_freq = (rp_state_cap & 0xff0000) >> 16;
  7061. max_freq = rp_state_cap & 0xff;
  7062. cur_freq = (gt_perf_status & 0xff00) >> 8;
  7063. /* Check for overclock support */
  7064. if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
  7065. 500))
  7066. DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
  7067. I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
  7068. pcu_mbox = I915_READ(GEN6_PCODE_DATA);
  7069. if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
  7070. 500))
  7071. DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
  7072. if (pcu_mbox & (1<<31)) { /* OC supported */
  7073. max_freq = pcu_mbox & 0xff;
  7074. DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
  7075. }
  7076. /* In units of 100MHz */
  7077. dev_priv->max_delay = max_freq;
  7078. dev_priv->min_delay = min_freq;
  7079. dev_priv->cur_delay = cur_freq;
  7080. /* requires MSI enabled */
  7081. I915_WRITE(GEN6_PMIER,
  7082. GEN6_PM_MBOX_EVENT |
  7083. GEN6_PM_THERMAL_EVENT |
  7084. GEN6_PM_RP_DOWN_TIMEOUT |
  7085. GEN6_PM_RP_UP_THRESHOLD |
  7086. GEN6_PM_RP_DOWN_THRESHOLD |
  7087. GEN6_PM_RP_UP_EI_EXPIRED |
  7088. GEN6_PM_RP_DOWN_EI_EXPIRED);
  7089. spin_lock_irq(&dev_priv->rps_lock);
  7090. WARN_ON(dev_priv->pm_iir != 0);
  7091. I915_WRITE(GEN6_PMIMR, 0);
  7092. spin_unlock_irq(&dev_priv->rps_lock);
  7093. /* enable all PM interrupts */
  7094. I915_WRITE(GEN6_PMINTRMSK, 0);
  7095. gen6_gt_force_wake_put(dev_priv);
  7096. mutex_unlock(&dev_priv->dev->struct_mutex);
  7097. }
  7098. void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
  7099. {
  7100. int min_freq = 15;
  7101. int gpu_freq, ia_freq, max_ia_freq;
  7102. int scaling_factor = 180;
  7103. max_ia_freq = cpufreq_quick_get_max(0);
  7104. /*
  7105. * Default to measured freq if none found, PCU will ensure we don't go
  7106. * over
  7107. */
  7108. if (!max_ia_freq)
  7109. max_ia_freq = tsc_khz;
  7110. /* Convert from kHz to MHz */
  7111. max_ia_freq /= 1000;
  7112. mutex_lock(&dev_priv->dev->struct_mutex);
  7113. /*
  7114. * For each potential GPU frequency, load a ring frequency we'd like
  7115. * to use for memory access. We do this by specifying the IA frequency
  7116. * the PCU should use as a reference to determine the ring frequency.
  7117. */
  7118. for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
  7119. gpu_freq--) {
  7120. int diff = dev_priv->max_delay - gpu_freq;
  7121. /*
  7122. * For GPU frequencies less than 750MHz, just use the lowest
  7123. * ring freq.
  7124. */
  7125. if (gpu_freq < min_freq)
  7126. ia_freq = 800;
  7127. else
  7128. ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
  7129. ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
  7130. I915_WRITE(GEN6_PCODE_DATA,
  7131. (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
  7132. gpu_freq);
  7133. I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
  7134. GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
  7135. if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
  7136. GEN6_PCODE_READY) == 0, 10)) {
  7137. DRM_ERROR("pcode write of freq table timed out\n");
  7138. continue;
  7139. }
  7140. }
  7141. mutex_unlock(&dev_priv->dev->struct_mutex);
  7142. }
  7143. static void ironlake_init_clock_gating(struct drm_device *dev)
  7144. {
  7145. struct drm_i915_private *dev_priv = dev->dev_private;
  7146. uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
  7147. /* Required for FBC */
  7148. dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
  7149. DPFCRUNIT_CLOCK_GATE_DISABLE |
  7150. DPFDUNIT_CLOCK_GATE_DISABLE;
  7151. /* Required for CxSR */
  7152. dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
  7153. I915_WRITE(PCH_3DCGDIS0,
  7154. MARIUNIT_CLOCK_GATE_DISABLE |
  7155. SVSMUNIT_CLOCK_GATE_DISABLE);
  7156. I915_WRITE(PCH_3DCGDIS1,
  7157. VFMUNIT_CLOCK_GATE_DISABLE);
  7158. I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
  7159. /*
  7160. * According to the spec the following bits should be set in
  7161. * order to enable memory self-refresh
  7162. * The bit 22/21 of 0x42004
  7163. * The bit 5 of 0x42020
  7164. * The bit 15 of 0x45000
  7165. */
  7166. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  7167. (I915_READ(ILK_DISPLAY_CHICKEN2) |
  7168. ILK_DPARB_GATE | ILK_VSDPFD_FULL));
  7169. I915_WRITE(ILK_DSPCLK_GATE,
  7170. (I915_READ(ILK_DSPCLK_GATE) |
  7171. ILK_DPARB_CLK_GATE));
  7172. I915_WRITE(DISP_ARB_CTL,
  7173. (I915_READ(DISP_ARB_CTL) |
  7174. DISP_FBC_WM_DIS));
  7175. I915_WRITE(WM3_LP_ILK, 0);
  7176. I915_WRITE(WM2_LP_ILK, 0);
  7177. I915_WRITE(WM1_LP_ILK, 0);
  7178. /*
  7179. * Based on the document from hardware guys the following bits
  7180. * should be set unconditionally in order to enable FBC.
  7181. * The bit 22 of 0x42000
  7182. * The bit 22 of 0x42004
  7183. * The bit 7,8,9 of 0x42020.
  7184. */
  7185. if (IS_IRONLAKE_M(dev)) {
  7186. I915_WRITE(ILK_DISPLAY_CHICKEN1,
  7187. I915_READ(ILK_DISPLAY_CHICKEN1) |
  7188. ILK_FBCQ_DIS);
  7189. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  7190. I915_READ(ILK_DISPLAY_CHICKEN2) |
  7191. ILK_DPARB_GATE);
  7192. I915_WRITE(ILK_DSPCLK_GATE,
  7193. I915_READ(ILK_DSPCLK_GATE) |
  7194. ILK_DPFC_DIS1 |
  7195. ILK_DPFC_DIS2 |
  7196. ILK_CLK_FBC);
  7197. }
  7198. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  7199. I915_READ(ILK_DISPLAY_CHICKEN2) |
  7200. ILK_ELPIN_409_SELECT);
  7201. I915_WRITE(_3D_CHICKEN2,
  7202. _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
  7203. _3D_CHICKEN2_WM_READ_PIPELINED);
  7204. }
  7205. static void gen6_init_clock_gating(struct drm_device *dev)
  7206. {
  7207. struct drm_i915_private *dev_priv = dev->dev_private;
  7208. int pipe;
  7209. uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
  7210. I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
  7211. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  7212. I915_READ(ILK_DISPLAY_CHICKEN2) |
  7213. ILK_ELPIN_409_SELECT);
  7214. I915_WRITE(WM3_LP_ILK, 0);
  7215. I915_WRITE(WM2_LP_ILK, 0);
  7216. I915_WRITE(WM1_LP_ILK, 0);
  7217. /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
  7218. * gating disable must be set. Failure to set it results in
  7219. * flickering pixels due to Z write ordering failures after
  7220. * some amount of runtime in the Mesa "fire" demo, and Unigine
  7221. * Sanctuary and Tropics, and apparently anything else with
  7222. * alpha test or pixel discard.
  7223. *
  7224. * According to the spec, bit 11 (RCCUNIT) must also be set,
  7225. * but we didn't debug actual testcases to find it out.
  7226. */
  7227. I915_WRITE(GEN6_UCGCTL2,
  7228. GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
  7229. GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
  7230. /*
  7231. * According to the spec the following bits should be
  7232. * set in order to enable memory self-refresh and fbc:
  7233. * The bit21 and bit22 of 0x42000
  7234. * The bit21 and bit22 of 0x42004
  7235. * The bit5 and bit7 of 0x42020
  7236. * The bit14 of 0x70180
  7237. * The bit14 of 0x71180
  7238. */
  7239. I915_WRITE(ILK_DISPLAY_CHICKEN1,
  7240. I915_READ(ILK_DISPLAY_CHICKEN1) |
  7241. ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
  7242. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  7243. I915_READ(ILK_DISPLAY_CHICKEN2) |
  7244. ILK_DPARB_GATE | ILK_VSDPFD_FULL);
  7245. I915_WRITE(ILK_DSPCLK_GATE,
  7246. I915_READ(ILK_DSPCLK_GATE) |
  7247. ILK_DPARB_CLK_GATE |
  7248. ILK_DPFD_CLK_GATE);
  7249. for_each_pipe(pipe) {
  7250. I915_WRITE(DSPCNTR(pipe),
  7251. I915_READ(DSPCNTR(pipe)) |
  7252. DISPPLANE_TRICKLE_FEED_DISABLE);
  7253. intel_flush_display_plane(dev_priv, pipe);
  7254. }
  7255. }
  7256. static void ivybridge_init_clock_gating(struct drm_device *dev)
  7257. {
  7258. struct drm_i915_private *dev_priv = dev->dev_private;
  7259. int pipe;
  7260. uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
  7261. I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
  7262. I915_WRITE(WM3_LP_ILK, 0);
  7263. I915_WRITE(WM2_LP_ILK, 0);
  7264. I915_WRITE(WM1_LP_ILK, 0);
  7265. I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
  7266. I915_WRITE(IVB_CHICKEN3,
  7267. CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
  7268. CHICKEN3_DGMG_DONE_FIX_DISABLE);
  7269. for_each_pipe(pipe) {
  7270. I915_WRITE(DSPCNTR(pipe),
  7271. I915_READ(DSPCNTR(pipe)) |
  7272. DISPPLANE_TRICKLE_FEED_DISABLE);
  7273. intel_flush_display_plane(dev_priv, pipe);
  7274. }
  7275. }
  7276. static void g4x_init_clock_gating(struct drm_device *dev)
  7277. {
  7278. struct drm_i915_private *dev_priv = dev->dev_private;
  7279. uint32_t dspclk_gate;
  7280. I915_WRITE(RENCLK_GATE_D1, 0);
  7281. I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
  7282. GS_UNIT_CLOCK_GATE_DISABLE |
  7283. CL_UNIT_CLOCK_GATE_DISABLE);
  7284. I915_WRITE(RAMCLK_GATE_D, 0);
  7285. dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
  7286. OVRUNIT_CLOCK_GATE_DISABLE |
  7287. OVCUNIT_CLOCK_GATE_DISABLE;
  7288. if (IS_GM45(dev))
  7289. dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
  7290. I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
  7291. }
  7292. static void crestline_init_clock_gating(struct drm_device *dev)
  7293. {
  7294. struct drm_i915_private *dev_priv = dev->dev_private;
  7295. I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
  7296. I915_WRITE(RENCLK_GATE_D2, 0);
  7297. I915_WRITE(DSPCLK_GATE_D, 0);
  7298. I915_WRITE(RAMCLK_GATE_D, 0);
  7299. I915_WRITE16(DEUC, 0);
  7300. }
  7301. static void broadwater_init_clock_gating(struct drm_device *dev)
  7302. {
  7303. struct drm_i915_private *dev_priv = dev->dev_private;
  7304. I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
  7305. I965_RCC_CLOCK_GATE_DISABLE |
  7306. I965_RCPB_CLOCK_GATE_DISABLE |
  7307. I965_ISC_CLOCK_GATE_DISABLE |
  7308. I965_FBC_CLOCK_GATE_DISABLE);
  7309. I915_WRITE(RENCLK_GATE_D2, 0);
  7310. }
  7311. static void gen3_init_clock_gating(struct drm_device *dev)
  7312. {
  7313. struct drm_i915_private *dev_priv = dev->dev_private;
  7314. u32 dstate = I915_READ(D_STATE);
  7315. dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
  7316. DSTATE_DOT_CLOCK_GATING;
  7317. I915_WRITE(D_STATE, dstate);
  7318. }
  7319. static void i85x_init_clock_gating(struct drm_device *dev)
  7320. {
  7321. struct drm_i915_private *dev_priv = dev->dev_private;
  7322. I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
  7323. }
  7324. static void i830_init_clock_gating(struct drm_device *dev)
  7325. {
  7326. struct drm_i915_private *dev_priv = dev->dev_private;
  7327. I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
  7328. }
  7329. static void ibx_init_clock_gating(struct drm_device *dev)
  7330. {
  7331. struct drm_i915_private *dev_priv = dev->dev_private;
  7332. /*
  7333. * On Ibex Peak and Cougar Point, we need to disable clock
  7334. * gating for the panel power sequencer or it will fail to
  7335. * start up when no ports are active.
  7336. */
  7337. I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
  7338. }
  7339. static void cpt_init_clock_gating(struct drm_device *dev)
  7340. {
  7341. struct drm_i915_private *dev_priv = dev->dev_private;
  7342. int pipe;
  7343. /*
  7344. * On Ibex Peak and Cougar Point, we need to disable clock
  7345. * gating for the panel power sequencer or it will fail to
  7346. * start up when no ports are active.
  7347. */
  7348. I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
  7349. I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
  7350. DPLS_EDP_PPS_FIX_DIS);
  7351. /* Without this, mode sets may fail silently on FDI */
  7352. for_each_pipe(pipe)
  7353. I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
  7354. }
  7355. static void ironlake_teardown_rc6(struct drm_device *dev)
  7356. {
  7357. struct drm_i915_private *dev_priv = dev->dev_private;
  7358. if (dev_priv->renderctx) {
  7359. i915_gem_object_unpin(dev_priv->renderctx);
  7360. drm_gem_object_unreference(&dev_priv->renderctx->base);
  7361. dev_priv->renderctx = NULL;
  7362. }
  7363. if (dev_priv->pwrctx) {
  7364. i915_gem_object_unpin(dev_priv->pwrctx);
  7365. drm_gem_object_unreference(&dev_priv->pwrctx->base);
  7366. dev_priv->pwrctx = NULL;
  7367. }
  7368. }
  7369. static void ironlake_disable_rc6(struct drm_device *dev)
  7370. {
  7371. struct drm_i915_private *dev_priv = dev->dev_private;
  7372. if (I915_READ(PWRCTXA)) {
  7373. /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
  7374. I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
  7375. wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
  7376. 50);
  7377. I915_WRITE(PWRCTXA, 0);
  7378. POSTING_READ(PWRCTXA);
  7379. I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
  7380. POSTING_READ(RSTDBYCTL);
  7381. }
  7382. ironlake_teardown_rc6(dev);
  7383. }
  7384. static int ironlake_setup_rc6(struct drm_device *dev)
  7385. {
  7386. struct drm_i915_private *dev_priv = dev->dev_private;
  7387. if (dev_priv->renderctx == NULL)
  7388. dev_priv->renderctx = intel_alloc_context_page(dev);
  7389. if (!dev_priv->renderctx)
  7390. return -ENOMEM;
  7391. if (dev_priv->pwrctx == NULL)
  7392. dev_priv->pwrctx = intel_alloc_context_page(dev);
  7393. if (!dev_priv->pwrctx) {
  7394. ironlake_teardown_rc6(dev);
  7395. return -ENOMEM;
  7396. }
  7397. return 0;
  7398. }
  7399. void ironlake_enable_rc6(struct drm_device *dev)
  7400. {
  7401. struct drm_i915_private *dev_priv = dev->dev_private;
  7402. int ret;
  7403. /* rc6 disabled by default due to repeated reports of hanging during
  7404. * boot and resume.
  7405. */
  7406. if (!intel_enable_rc6(dev))
  7407. return;
  7408. mutex_lock(&dev->struct_mutex);
  7409. ret = ironlake_setup_rc6(dev);
  7410. if (ret) {
  7411. mutex_unlock(&dev->struct_mutex);
  7412. return;
  7413. }
  7414. /*
  7415. * GPU can automatically power down the render unit if given a page
  7416. * to save state.
  7417. */
  7418. ret = BEGIN_LP_RING(6);
  7419. if (ret) {
  7420. ironlake_teardown_rc6(dev);
  7421. mutex_unlock(&dev->struct_mutex);
  7422. return;
  7423. }
  7424. OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
  7425. OUT_RING(MI_SET_CONTEXT);
  7426. OUT_RING(dev_priv->renderctx->gtt_offset |
  7427. MI_MM_SPACE_GTT |
  7428. MI_SAVE_EXT_STATE_EN |
  7429. MI_RESTORE_EXT_STATE_EN |
  7430. MI_RESTORE_INHIBIT);
  7431. OUT_RING(MI_SUSPEND_FLUSH);
  7432. OUT_RING(MI_NOOP);
  7433. OUT_RING(MI_FLUSH);
  7434. ADVANCE_LP_RING();
  7435. /*
  7436. * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
  7437. * does an implicit flush, combined with MI_FLUSH above, it should be
  7438. * safe to assume that renderctx is valid
  7439. */
  7440. ret = intel_wait_ring_idle(LP_RING(dev_priv));
  7441. if (ret) {
  7442. DRM_ERROR("failed to enable ironlake power power savings\n");
  7443. ironlake_teardown_rc6(dev);
  7444. mutex_unlock(&dev->struct_mutex);
  7445. return;
  7446. }
  7447. I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
  7448. I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
  7449. mutex_unlock(&dev->struct_mutex);
  7450. }
  7451. void intel_init_clock_gating(struct drm_device *dev)
  7452. {
  7453. struct drm_i915_private *dev_priv = dev->dev_private;
  7454. dev_priv->display.init_clock_gating(dev);
  7455. if (dev_priv->display.init_pch_clock_gating)
  7456. dev_priv->display.init_pch_clock_gating(dev);
  7457. }
  7458. /* Set up chip specific display functions */
  7459. static void intel_init_display(struct drm_device *dev)
  7460. {
  7461. struct drm_i915_private *dev_priv = dev->dev_private;
  7462. /* We always want a DPMS function */
  7463. if (HAS_PCH_SPLIT(dev)) {
  7464. dev_priv->display.dpms = ironlake_crtc_dpms;
  7465. dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
  7466. dev_priv->display.update_plane = ironlake_update_plane;
  7467. } else {
  7468. dev_priv->display.dpms = i9xx_crtc_dpms;
  7469. dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
  7470. dev_priv->display.update_plane = i9xx_update_plane;
  7471. }
  7472. if (I915_HAS_FBC(dev)) {
  7473. if (HAS_PCH_SPLIT(dev)) {
  7474. dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
  7475. dev_priv->display.enable_fbc = ironlake_enable_fbc;
  7476. dev_priv->display.disable_fbc = ironlake_disable_fbc;
  7477. } else if (IS_GM45(dev)) {
  7478. dev_priv->display.fbc_enabled = g4x_fbc_enabled;
  7479. dev_priv->display.enable_fbc = g4x_enable_fbc;
  7480. dev_priv->display.disable_fbc = g4x_disable_fbc;
  7481. } else if (IS_CRESTLINE(dev)) {
  7482. dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
  7483. dev_priv->display.enable_fbc = i8xx_enable_fbc;
  7484. dev_priv->display.disable_fbc = i8xx_disable_fbc;
  7485. }
  7486. /* 855GM needs testing */
  7487. }
  7488. /* Returns the core display clock speed */
  7489. if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
  7490. dev_priv->display.get_display_clock_speed =
  7491. i945_get_display_clock_speed;
  7492. else if (IS_I915G(dev))
  7493. dev_priv->display.get_display_clock_speed =
  7494. i915_get_display_clock_speed;
  7495. else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
  7496. dev_priv->display.get_display_clock_speed =
  7497. i9xx_misc_get_display_clock_speed;
  7498. else if (IS_I915GM(dev))
  7499. dev_priv->display.get_display_clock_speed =
  7500. i915gm_get_display_clock_speed;
  7501. else if (IS_I865G(dev))
  7502. dev_priv->display.get_display_clock_speed =
  7503. i865_get_display_clock_speed;
  7504. else if (IS_I85X(dev))
  7505. dev_priv->display.get_display_clock_speed =
  7506. i855_get_display_clock_speed;
  7507. else /* 852, 830 */
  7508. dev_priv->display.get_display_clock_speed =
  7509. i830_get_display_clock_speed;
  7510. /* For FIFO watermark updates */
  7511. if (HAS_PCH_SPLIT(dev)) {
  7512. dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
  7513. dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
  7514. /* IVB configs may use multi-threaded forcewake */
  7515. if (IS_IVYBRIDGE(dev)) {
  7516. u32 ecobus;
  7517. /* A small trick here - if the bios hasn't configured MT forcewake,
  7518. * and if the device is in RC6, then force_wake_mt_get will not wake
  7519. * the device and the ECOBUS read will return zero. Which will be
  7520. * (correctly) interpreted by the test below as MT forcewake being
  7521. * disabled.
  7522. */
  7523. mutex_lock(&dev->struct_mutex);
  7524. __gen6_gt_force_wake_mt_get(dev_priv);
  7525. ecobus = I915_READ_NOTRACE(ECOBUS);
  7526. __gen6_gt_force_wake_mt_put(dev_priv);
  7527. mutex_unlock(&dev->struct_mutex);
  7528. if (ecobus & FORCEWAKE_MT_ENABLE) {
  7529. DRM_DEBUG_KMS("Using MT version of forcewake\n");
  7530. dev_priv->display.force_wake_get =
  7531. __gen6_gt_force_wake_mt_get;
  7532. dev_priv->display.force_wake_put =
  7533. __gen6_gt_force_wake_mt_put;
  7534. }
  7535. }
  7536. if (HAS_PCH_IBX(dev))
  7537. dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
  7538. else if (HAS_PCH_CPT(dev))
  7539. dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
  7540. if (IS_GEN5(dev)) {
  7541. if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
  7542. dev_priv->display.update_wm = ironlake_update_wm;
  7543. else {
  7544. DRM_DEBUG_KMS("Failed to get proper latency. "
  7545. "Disable CxSR\n");
  7546. dev_priv->display.update_wm = NULL;
  7547. }
  7548. dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
  7549. dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
  7550. dev_priv->display.write_eld = ironlake_write_eld;
  7551. } else if (IS_GEN6(dev)) {
  7552. if (SNB_READ_WM0_LATENCY()) {
  7553. dev_priv->display.update_wm = sandybridge_update_wm;
  7554. dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
  7555. } else {
  7556. DRM_DEBUG_KMS("Failed to read display plane latency. "
  7557. "Disable CxSR\n");
  7558. dev_priv->display.update_wm = NULL;
  7559. }
  7560. dev_priv->display.fdi_link_train = gen6_fdi_link_train;
  7561. dev_priv->display.init_clock_gating = gen6_init_clock_gating;
  7562. dev_priv->display.write_eld = ironlake_write_eld;
  7563. } else if (IS_IVYBRIDGE(dev)) {
  7564. /* FIXME: detect B0+ stepping and use auto training */
  7565. dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
  7566. if (SNB_READ_WM0_LATENCY()) {
  7567. dev_priv->display.update_wm = sandybridge_update_wm;
  7568. dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
  7569. } else {
  7570. DRM_DEBUG_KMS("Failed to read display plane latency. "
  7571. "Disable CxSR\n");
  7572. dev_priv->display.update_wm = NULL;
  7573. }
  7574. dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
  7575. dev_priv->display.write_eld = ironlake_write_eld;
  7576. } else
  7577. dev_priv->display.update_wm = NULL;
  7578. } else if (IS_PINEVIEW(dev)) {
  7579. if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
  7580. dev_priv->is_ddr3,
  7581. dev_priv->fsb_freq,
  7582. dev_priv->mem_freq)) {
  7583. DRM_INFO("failed to find known CxSR latency "
  7584. "(found ddr%s fsb freq %d, mem freq %d), "
  7585. "disabling CxSR\n",
  7586. (dev_priv->is_ddr3 == 1) ? "3" : "2",
  7587. dev_priv->fsb_freq, dev_priv->mem_freq);
  7588. /* Disable CxSR and never update its watermark again */
  7589. pineview_disable_cxsr(dev);
  7590. dev_priv->display.update_wm = NULL;
  7591. } else
  7592. dev_priv->display.update_wm = pineview_update_wm;
  7593. dev_priv->display.init_clock_gating = gen3_init_clock_gating;
  7594. } else if (IS_G4X(dev)) {
  7595. dev_priv->display.write_eld = g4x_write_eld;
  7596. dev_priv->display.update_wm = g4x_update_wm;
  7597. dev_priv->display.init_clock_gating = g4x_init_clock_gating;
  7598. } else if (IS_GEN4(dev)) {
  7599. dev_priv->display.update_wm = i965_update_wm;
  7600. if (IS_CRESTLINE(dev))
  7601. dev_priv->display.init_clock_gating = crestline_init_clock_gating;
  7602. else if (IS_BROADWATER(dev))
  7603. dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
  7604. } else if (IS_GEN3(dev)) {
  7605. dev_priv->display.update_wm = i9xx_update_wm;
  7606. dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
  7607. dev_priv->display.init_clock_gating = gen3_init_clock_gating;
  7608. } else if (IS_I865G(dev)) {
  7609. dev_priv->display.update_wm = i830_update_wm;
  7610. dev_priv->display.init_clock_gating = i85x_init_clock_gating;
  7611. dev_priv->display.get_fifo_size = i830_get_fifo_size;
  7612. } else if (IS_I85X(dev)) {
  7613. dev_priv->display.update_wm = i9xx_update_wm;
  7614. dev_priv->display.get_fifo_size = i85x_get_fifo_size;
  7615. dev_priv->display.init_clock_gating = i85x_init_clock_gating;
  7616. } else {
  7617. dev_priv->display.update_wm = i830_update_wm;
  7618. dev_priv->display.init_clock_gating = i830_init_clock_gating;
  7619. if (IS_845G(dev))
  7620. dev_priv->display.get_fifo_size = i845_get_fifo_size;
  7621. else
  7622. dev_priv->display.get_fifo_size = i830_get_fifo_size;
  7623. }
  7624. /* Default just returns -ENODEV to indicate unsupported */
  7625. dev_priv->display.queue_flip = intel_default_queue_flip;
  7626. switch (INTEL_INFO(dev)->gen) {
  7627. case 2:
  7628. dev_priv->display.queue_flip = intel_gen2_queue_flip;
  7629. break;
  7630. case 3:
  7631. dev_priv->display.queue_flip = intel_gen3_queue_flip;
  7632. break;
  7633. case 4:
  7634. case 5:
  7635. dev_priv->display.queue_flip = intel_gen4_queue_flip;
  7636. break;
  7637. case 6:
  7638. dev_priv->display.queue_flip = intel_gen6_queue_flip;
  7639. break;
  7640. case 7:
  7641. dev_priv->display.queue_flip = intel_gen7_queue_flip;
  7642. break;
  7643. }
  7644. }
  7645. /*
  7646. * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
  7647. * resume, or other times. This quirk makes sure that's the case for
  7648. * affected systems.
  7649. */
  7650. static void quirk_pipea_force(struct drm_device *dev)
  7651. {
  7652. struct drm_i915_private *dev_priv = dev->dev_private;
  7653. dev_priv->quirks |= QUIRK_PIPEA_FORCE;
  7654. DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
  7655. }
  7656. /*
  7657. * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
  7658. */
  7659. static void quirk_ssc_force_disable(struct drm_device *dev)
  7660. {
  7661. struct drm_i915_private *dev_priv = dev->dev_private;
  7662. dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
  7663. }
  7664. struct intel_quirk {
  7665. int device;
  7666. int subsystem_vendor;
  7667. int subsystem_device;
  7668. void (*hook)(struct drm_device *dev);
  7669. };
  7670. struct intel_quirk intel_quirks[] = {
  7671. /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
  7672. { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
  7673. /* HP Mini needs pipe A force quirk (LP: #322104) */
  7674. { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
  7675. /* Thinkpad R31 needs pipe A force quirk */
  7676. { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
  7677. /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
  7678. { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
  7679. /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
  7680. { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
  7681. /* ThinkPad X40 needs pipe A force quirk */
  7682. /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
  7683. { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
  7684. /* 855 & before need to leave pipe A & dpll A up */
  7685. { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
  7686. { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
  7687. /* Lenovo U160 cannot use SSC on LVDS */
  7688. { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
  7689. /* Sony Vaio Y cannot use SSC on LVDS */
  7690. { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
  7691. };
  7692. static void intel_init_quirks(struct drm_device *dev)
  7693. {
  7694. struct pci_dev *d = dev->pdev;
  7695. int i;
  7696. for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
  7697. struct intel_quirk *q = &intel_quirks[i];
  7698. if (d->device == q->device &&
  7699. (d->subsystem_vendor == q->subsystem_vendor ||
  7700. q->subsystem_vendor == PCI_ANY_ID) &&
  7701. (d->subsystem_device == q->subsystem_device ||
  7702. q->subsystem_device == PCI_ANY_ID))
  7703. q->hook(dev);
  7704. }
  7705. }
  7706. /* Disable the VGA plane that we never use */
  7707. static void i915_disable_vga(struct drm_device *dev)
  7708. {
  7709. struct drm_i915_private *dev_priv = dev->dev_private;
  7710. u8 sr1;
  7711. u32 vga_reg;
  7712. if (HAS_PCH_SPLIT(dev))
  7713. vga_reg = CPU_VGACNTRL;
  7714. else
  7715. vga_reg = VGACNTRL;
  7716. vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
  7717. outb(1, VGA_SR_INDEX);
  7718. sr1 = inb(VGA_SR_DATA);
  7719. outb(sr1 | 1<<5, VGA_SR_DATA);
  7720. vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
  7721. udelay(300);
  7722. I915_WRITE(vga_reg, VGA_DISP_DISABLE);
  7723. POSTING_READ(vga_reg);
  7724. }
  7725. void intel_modeset_init(struct drm_device *dev)
  7726. {
  7727. struct drm_i915_private *dev_priv = dev->dev_private;
  7728. int i, ret;
  7729. drm_mode_config_init(dev);
  7730. dev->mode_config.min_width = 0;
  7731. dev->mode_config.min_height = 0;
  7732. dev->mode_config.funcs = (void *)&intel_mode_funcs;
  7733. intel_init_quirks(dev);
  7734. intel_init_display(dev);
  7735. if (IS_GEN2(dev)) {
  7736. dev->mode_config.max_width = 2048;
  7737. dev->mode_config.max_height = 2048;
  7738. } else if (IS_GEN3(dev)) {
  7739. dev->mode_config.max_width = 4096;
  7740. dev->mode_config.max_height = 4096;
  7741. } else {
  7742. dev->mode_config.max_width = 8192;
  7743. dev->mode_config.max_height = 8192;
  7744. }
  7745. dev->mode_config.fb_base = dev->agp->base;
  7746. DRM_DEBUG_KMS("%d display pipe%s available.\n",
  7747. dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
  7748. for (i = 0; i < dev_priv->num_pipe; i++) {
  7749. intel_crtc_init(dev, i);
  7750. if (HAS_PCH_SPLIT(dev)) {
  7751. ret = intel_plane_init(dev, i);
  7752. if (ret)
  7753. DRM_ERROR("plane %d init failed: %d\n",
  7754. i, ret);
  7755. }
  7756. }
  7757. /* Just disable it once at startup */
  7758. i915_disable_vga(dev);
  7759. intel_setup_outputs(dev);
  7760. intel_init_clock_gating(dev);
  7761. if (IS_IRONLAKE_M(dev)) {
  7762. ironlake_enable_drps(dev);
  7763. intel_init_emon(dev);
  7764. }
  7765. if (IS_GEN6(dev) || IS_GEN7(dev)) {
  7766. gen6_enable_rps(dev_priv);
  7767. gen6_update_ring_freq(dev_priv);
  7768. }
  7769. INIT_WORK(&dev_priv->idle_work, intel_idle_update);
  7770. setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
  7771. (unsigned long)dev);
  7772. }
  7773. void intel_modeset_gem_init(struct drm_device *dev)
  7774. {
  7775. if (IS_IRONLAKE_M(dev))
  7776. ironlake_enable_rc6(dev);
  7777. intel_setup_overlay(dev);
  7778. }
  7779. void intel_modeset_cleanup(struct drm_device *dev)
  7780. {
  7781. struct drm_i915_private *dev_priv = dev->dev_private;
  7782. struct drm_crtc *crtc;
  7783. struct intel_crtc *intel_crtc;
  7784. drm_kms_helper_poll_fini(dev);
  7785. mutex_lock(&dev->struct_mutex);
  7786. intel_unregister_dsm_handler();
  7787. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  7788. /* Skip inactive CRTCs */
  7789. if (!crtc->fb)
  7790. continue;
  7791. intel_crtc = to_intel_crtc(crtc);
  7792. intel_increase_pllclock(crtc);
  7793. }
  7794. intel_disable_fbc(dev);
  7795. if (IS_IRONLAKE_M(dev))
  7796. ironlake_disable_drps(dev);
  7797. if (IS_GEN6(dev) || IS_GEN7(dev))
  7798. gen6_disable_rps(dev);
  7799. if (IS_IRONLAKE_M(dev))
  7800. ironlake_disable_rc6(dev);
  7801. mutex_unlock(&dev->struct_mutex);
  7802. /* Disable the irq before mode object teardown, for the irq might
  7803. * enqueue unpin/hotplug work. */
  7804. drm_irq_uninstall(dev);
  7805. cancel_work_sync(&dev_priv->hotplug_work);
  7806. cancel_work_sync(&dev_priv->rps_work);
  7807. /* flush any delayed tasks or pending work */
  7808. flush_scheduled_work();
  7809. /* Shut off idle work before the crtcs get freed. */
  7810. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  7811. intel_crtc = to_intel_crtc(crtc);
  7812. del_timer_sync(&intel_crtc->idle_timer);
  7813. }
  7814. del_timer_sync(&dev_priv->idle_timer);
  7815. cancel_work_sync(&dev_priv->idle_work);
  7816. drm_mode_config_cleanup(dev);
  7817. }
  7818. /*
  7819. * Return which encoder is currently attached for connector.
  7820. */
  7821. struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
  7822. {
  7823. return &intel_attached_encoder(connector)->base;
  7824. }
  7825. void intel_connector_attach_encoder(struct intel_connector *connector,
  7826. struct intel_encoder *encoder)
  7827. {
  7828. connector->encoder = encoder;
  7829. drm_mode_connector_attach_encoder(&connector->base,
  7830. &encoder->base);
  7831. }
  7832. /*
  7833. * set vga decode state - true == enable VGA decode
  7834. */
  7835. int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
  7836. {
  7837. struct drm_i915_private *dev_priv = dev->dev_private;
  7838. u16 gmch_ctrl;
  7839. pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
  7840. if (state)
  7841. gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
  7842. else
  7843. gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
  7844. pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
  7845. return 0;
  7846. }
  7847. #ifdef CONFIG_DEBUG_FS
  7848. #include <linux/seq_file.h>
  7849. struct intel_display_error_state {
  7850. struct intel_cursor_error_state {
  7851. u32 control;
  7852. u32 position;
  7853. u32 base;
  7854. u32 size;
  7855. } cursor[2];
  7856. struct intel_pipe_error_state {
  7857. u32 conf;
  7858. u32 source;
  7859. u32 htotal;
  7860. u32 hblank;
  7861. u32 hsync;
  7862. u32 vtotal;
  7863. u32 vblank;
  7864. u32 vsync;
  7865. } pipe[2];
  7866. struct intel_plane_error_state {
  7867. u32 control;
  7868. u32 stride;
  7869. u32 size;
  7870. u32 pos;
  7871. u32 addr;
  7872. u32 surface;
  7873. u32 tile_offset;
  7874. } plane[2];
  7875. };
  7876. struct intel_display_error_state *
  7877. intel_display_capture_error_state(struct drm_device *dev)
  7878. {
  7879. drm_i915_private_t *dev_priv = dev->dev_private;
  7880. struct intel_display_error_state *error;
  7881. int i;
  7882. error = kmalloc(sizeof(*error), GFP_ATOMIC);
  7883. if (error == NULL)
  7884. return NULL;
  7885. for (i = 0; i < 2; i++) {
  7886. error->cursor[i].control = I915_READ(CURCNTR(i));
  7887. error->cursor[i].position = I915_READ(CURPOS(i));
  7888. error->cursor[i].base = I915_READ(CURBASE(i));
  7889. error->plane[i].control = I915_READ(DSPCNTR(i));
  7890. error->plane[i].stride = I915_READ(DSPSTRIDE(i));
  7891. error->plane[i].size = I915_READ(DSPSIZE(i));
  7892. error->plane[i].pos = I915_READ(DSPPOS(i));
  7893. error->plane[i].addr = I915_READ(DSPADDR(i));
  7894. if (INTEL_INFO(dev)->gen >= 4) {
  7895. error->plane[i].surface = I915_READ(DSPSURF(i));
  7896. error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
  7897. }
  7898. error->pipe[i].conf = I915_READ(PIPECONF(i));
  7899. error->pipe[i].source = I915_READ(PIPESRC(i));
  7900. error->pipe[i].htotal = I915_READ(HTOTAL(i));
  7901. error->pipe[i].hblank = I915_READ(HBLANK(i));
  7902. error->pipe[i].hsync = I915_READ(HSYNC(i));
  7903. error->pipe[i].vtotal = I915_READ(VTOTAL(i));
  7904. error->pipe[i].vblank = I915_READ(VBLANK(i));
  7905. error->pipe[i].vsync = I915_READ(VSYNC(i));
  7906. }
  7907. return error;
  7908. }
  7909. void
  7910. intel_display_print_error_state(struct seq_file *m,
  7911. struct drm_device *dev,
  7912. struct intel_display_error_state *error)
  7913. {
  7914. int i;
  7915. for (i = 0; i < 2; i++) {
  7916. seq_printf(m, "Pipe [%d]:\n", i);
  7917. seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
  7918. seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
  7919. seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
  7920. seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
  7921. seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
  7922. seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
  7923. seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
  7924. seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
  7925. seq_printf(m, "Plane [%d]:\n", i);
  7926. seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
  7927. seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
  7928. seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
  7929. seq_printf(m, " POS: %08x\n", error->plane[i].pos);
  7930. seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
  7931. if (INTEL_INFO(dev)->gen >= 4) {
  7932. seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
  7933. seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
  7934. }
  7935. seq_printf(m, "Cursor [%d]:\n", i);
  7936. seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
  7937. seq_printf(m, " POS: %08x\n", error->cursor[i].position);
  7938. seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
  7939. }
  7940. }
  7941. #endif