i915_irq.c 99 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/sysrq.h>
  30. #include <linux/slab.h>
  31. #include <drm/drmP.h>
  32. #include <drm/i915_drm.h>
  33. #include "i915_drv.h"
  34. #include "i915_trace.h"
  35. #include "intel_drv.h"
  36. static const u32 hpd_ibx[] = {
  37. [HPD_CRT] = SDE_CRT_HOTPLUG,
  38. [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
  39. [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
  40. [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
  41. [HPD_PORT_D] = SDE_PORTD_HOTPLUG
  42. };
  43. static const u32 hpd_cpt[] = {
  44. [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
  45. [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
  46. [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  47. [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  48. [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
  49. };
  50. static const u32 hpd_mask_i915[] = {
  51. [HPD_CRT] = CRT_HOTPLUG_INT_EN,
  52. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
  53. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
  54. [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
  55. [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
  56. [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
  57. };
  58. static const u32 hpd_status_gen4[] = {
  59. [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  60. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
  61. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
  62. [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  63. [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  64. [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  65. };
  66. static const u32 hpd_status_i965[] = {
  67. [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  68. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
  69. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
  70. [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  71. [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  72. [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  73. };
  74. static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
  75. [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  76. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
  77. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
  78. [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  79. [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  80. [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  81. };
  82. static void ibx_hpd_irq_setup(struct drm_device *dev);
  83. static void i915_hpd_irq_setup(struct drm_device *dev);
  84. /* For display hotplug interrupt */
  85. static void
  86. ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  87. {
  88. if ((dev_priv->irq_mask & mask) != 0) {
  89. dev_priv->irq_mask &= ~mask;
  90. I915_WRITE(DEIMR, dev_priv->irq_mask);
  91. POSTING_READ(DEIMR);
  92. }
  93. }
  94. static void
  95. ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  96. {
  97. if ((dev_priv->irq_mask & mask) != mask) {
  98. dev_priv->irq_mask |= mask;
  99. I915_WRITE(DEIMR, dev_priv->irq_mask);
  100. POSTING_READ(DEIMR);
  101. }
  102. }
  103. static bool ivb_can_enable_err_int(struct drm_device *dev)
  104. {
  105. struct drm_i915_private *dev_priv = dev->dev_private;
  106. struct intel_crtc *crtc;
  107. enum pipe pipe;
  108. for_each_pipe(pipe) {
  109. crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  110. if (crtc->cpu_fifo_underrun_disabled)
  111. return false;
  112. }
  113. return true;
  114. }
  115. static bool cpt_can_enable_serr_int(struct drm_device *dev)
  116. {
  117. struct drm_i915_private *dev_priv = dev->dev_private;
  118. enum pipe pipe;
  119. struct intel_crtc *crtc;
  120. for_each_pipe(pipe) {
  121. crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  122. if (crtc->pch_fifo_underrun_disabled)
  123. return false;
  124. }
  125. return true;
  126. }
  127. static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
  128. enum pipe pipe, bool enable)
  129. {
  130. struct drm_i915_private *dev_priv = dev->dev_private;
  131. uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
  132. DE_PIPEB_FIFO_UNDERRUN;
  133. if (enable)
  134. ironlake_enable_display_irq(dev_priv, bit);
  135. else
  136. ironlake_disable_display_irq(dev_priv, bit);
  137. }
  138. static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
  139. bool enable)
  140. {
  141. struct drm_i915_private *dev_priv = dev->dev_private;
  142. if (enable) {
  143. if (!ivb_can_enable_err_int(dev))
  144. return;
  145. I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
  146. ERR_INT_FIFO_UNDERRUN_B |
  147. ERR_INT_FIFO_UNDERRUN_C);
  148. ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
  149. } else {
  150. ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
  151. }
  152. }
  153. static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
  154. bool enable)
  155. {
  156. struct drm_device *dev = crtc->base.dev;
  157. struct drm_i915_private *dev_priv = dev->dev_private;
  158. uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
  159. SDE_TRANSB_FIFO_UNDER;
  160. if (enable)
  161. I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
  162. else
  163. I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
  164. POSTING_READ(SDEIMR);
  165. }
  166. static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
  167. enum transcoder pch_transcoder,
  168. bool enable)
  169. {
  170. struct drm_i915_private *dev_priv = dev->dev_private;
  171. if (enable) {
  172. if (!cpt_can_enable_serr_int(dev))
  173. return;
  174. I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
  175. SERR_INT_TRANS_B_FIFO_UNDERRUN |
  176. SERR_INT_TRANS_C_FIFO_UNDERRUN);
  177. I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
  178. } else {
  179. I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
  180. }
  181. POSTING_READ(SDEIMR);
  182. }
  183. /**
  184. * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
  185. * @dev: drm device
  186. * @pipe: pipe
  187. * @enable: true if we want to report FIFO underrun errors, false otherwise
  188. *
  189. * This function makes us disable or enable CPU fifo underruns for a specific
  190. * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
  191. * reporting for one pipe may also disable all the other CPU error interruts for
  192. * the other pipes, due to the fact that there's just one interrupt mask/enable
  193. * bit for all the pipes.
  194. *
  195. * Returns the previous state of underrun reporting.
  196. */
  197. bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
  198. enum pipe pipe, bool enable)
  199. {
  200. struct drm_i915_private *dev_priv = dev->dev_private;
  201. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  202. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  203. unsigned long flags;
  204. bool ret;
  205. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  206. ret = !intel_crtc->cpu_fifo_underrun_disabled;
  207. if (enable == ret)
  208. goto done;
  209. intel_crtc->cpu_fifo_underrun_disabled = !enable;
  210. if (IS_GEN5(dev) || IS_GEN6(dev))
  211. ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
  212. else if (IS_GEN7(dev))
  213. ivybridge_set_fifo_underrun_reporting(dev, enable);
  214. done:
  215. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  216. return ret;
  217. }
  218. /**
  219. * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
  220. * @dev: drm device
  221. * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
  222. * @enable: true if we want to report FIFO underrun errors, false otherwise
  223. *
  224. * This function makes us disable or enable PCH fifo underruns for a specific
  225. * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
  226. * underrun reporting for one transcoder may also disable all the other PCH
  227. * error interruts for the other transcoders, due to the fact that there's just
  228. * one interrupt mask/enable bit for all the transcoders.
  229. *
  230. * Returns the previous state of underrun reporting.
  231. */
  232. bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
  233. enum transcoder pch_transcoder,
  234. bool enable)
  235. {
  236. struct drm_i915_private *dev_priv = dev->dev_private;
  237. enum pipe p;
  238. struct drm_crtc *crtc;
  239. struct intel_crtc *intel_crtc;
  240. unsigned long flags;
  241. bool ret;
  242. if (HAS_PCH_LPT(dev)) {
  243. crtc = NULL;
  244. for_each_pipe(p) {
  245. struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
  246. if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
  247. crtc = c;
  248. break;
  249. }
  250. }
  251. if (!crtc) {
  252. DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
  253. return false;
  254. }
  255. } else {
  256. crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
  257. }
  258. intel_crtc = to_intel_crtc(crtc);
  259. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  260. ret = !intel_crtc->pch_fifo_underrun_disabled;
  261. if (enable == ret)
  262. goto done;
  263. intel_crtc->pch_fifo_underrun_disabled = !enable;
  264. if (HAS_PCH_IBX(dev))
  265. ibx_set_fifo_underrun_reporting(intel_crtc, enable);
  266. else
  267. cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
  268. done:
  269. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  270. return ret;
  271. }
  272. void
  273. i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  274. {
  275. u32 reg = PIPESTAT(pipe);
  276. u32 pipestat = I915_READ(reg) & 0x7fff0000;
  277. if ((pipestat & mask) == mask)
  278. return;
  279. /* Enable the interrupt, clear any pending status */
  280. pipestat |= mask | (mask >> 16);
  281. I915_WRITE(reg, pipestat);
  282. POSTING_READ(reg);
  283. }
  284. void
  285. i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  286. {
  287. u32 reg = PIPESTAT(pipe);
  288. u32 pipestat = I915_READ(reg) & 0x7fff0000;
  289. if ((pipestat & mask) == 0)
  290. return;
  291. pipestat &= ~mask;
  292. I915_WRITE(reg, pipestat);
  293. POSTING_READ(reg);
  294. }
  295. /**
  296. * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
  297. */
  298. static void i915_enable_asle_pipestat(struct drm_device *dev)
  299. {
  300. drm_i915_private_t *dev_priv = dev->dev_private;
  301. unsigned long irqflags;
  302. if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
  303. return;
  304. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  305. i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
  306. if (INTEL_INFO(dev)->gen >= 4)
  307. i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
  308. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  309. }
  310. /**
  311. * i915_pipe_enabled - check if a pipe is enabled
  312. * @dev: DRM device
  313. * @pipe: pipe to check
  314. *
  315. * Reading certain registers when the pipe is disabled can hang the chip.
  316. * Use this routine to make sure the PLL is running and the pipe is active
  317. * before reading such registers if unsure.
  318. */
  319. static int
  320. i915_pipe_enabled(struct drm_device *dev, int pipe)
  321. {
  322. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  323. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  324. /* Locking is horribly broken here, but whatever. */
  325. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  326. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  327. return intel_crtc->active;
  328. } else {
  329. return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
  330. }
  331. }
  332. /* Called from drm generic code, passed a 'crtc', which
  333. * we use as a pipe index
  334. */
  335. static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
  336. {
  337. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  338. unsigned long high_frame;
  339. unsigned long low_frame;
  340. u32 high1, high2, low;
  341. if (!i915_pipe_enabled(dev, pipe)) {
  342. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  343. "pipe %c\n", pipe_name(pipe));
  344. return 0;
  345. }
  346. high_frame = PIPEFRAME(pipe);
  347. low_frame = PIPEFRAMEPIXEL(pipe);
  348. /*
  349. * High & low register fields aren't synchronized, so make sure
  350. * we get a low value that's stable across two reads of the high
  351. * register.
  352. */
  353. do {
  354. high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  355. low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
  356. high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  357. } while (high1 != high2);
  358. high1 >>= PIPE_FRAME_HIGH_SHIFT;
  359. low >>= PIPE_FRAME_LOW_SHIFT;
  360. return (high1 << 8) | low;
  361. }
  362. static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
  363. {
  364. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  365. int reg = PIPE_FRMCOUNT_GM45(pipe);
  366. if (!i915_pipe_enabled(dev, pipe)) {
  367. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  368. "pipe %c\n", pipe_name(pipe));
  369. return 0;
  370. }
  371. return I915_READ(reg);
  372. }
  373. static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
  374. int *vpos, int *hpos)
  375. {
  376. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  377. u32 vbl = 0, position = 0;
  378. int vbl_start, vbl_end, htotal, vtotal;
  379. bool in_vbl = true;
  380. int ret = 0;
  381. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  382. pipe);
  383. if (!i915_pipe_enabled(dev, pipe)) {
  384. DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
  385. "pipe %c\n", pipe_name(pipe));
  386. return 0;
  387. }
  388. /* Get vtotal. */
  389. vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  390. if (INTEL_INFO(dev)->gen >= 4) {
  391. /* No obvious pixelcount register. Only query vertical
  392. * scanout position from Display scan line register.
  393. */
  394. position = I915_READ(PIPEDSL(pipe));
  395. /* Decode into vertical scanout position. Don't have
  396. * horizontal scanout position.
  397. */
  398. *vpos = position & 0x1fff;
  399. *hpos = 0;
  400. } else {
  401. /* Have access to pixelcount since start of frame.
  402. * We can split this into vertical and horizontal
  403. * scanout position.
  404. */
  405. position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
  406. htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  407. *vpos = position / htotal;
  408. *hpos = position - (*vpos * htotal);
  409. }
  410. /* Query vblank area. */
  411. vbl = I915_READ(VBLANK(cpu_transcoder));
  412. /* Test position against vblank region. */
  413. vbl_start = vbl & 0x1fff;
  414. vbl_end = (vbl >> 16) & 0x1fff;
  415. if ((*vpos < vbl_start) || (*vpos > vbl_end))
  416. in_vbl = false;
  417. /* Inside "upper part" of vblank area? Apply corrective offset: */
  418. if (in_vbl && (*vpos >= vbl_start))
  419. *vpos = *vpos - vtotal;
  420. /* Readouts valid? */
  421. if (vbl > 0)
  422. ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
  423. /* In vblank? */
  424. if (in_vbl)
  425. ret |= DRM_SCANOUTPOS_INVBL;
  426. return ret;
  427. }
  428. static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
  429. int *max_error,
  430. struct timeval *vblank_time,
  431. unsigned flags)
  432. {
  433. struct drm_crtc *crtc;
  434. if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
  435. DRM_ERROR("Invalid crtc %d\n", pipe);
  436. return -EINVAL;
  437. }
  438. /* Get drm_crtc to timestamp: */
  439. crtc = intel_get_crtc_for_pipe(dev, pipe);
  440. if (crtc == NULL) {
  441. DRM_ERROR("Invalid crtc %d\n", pipe);
  442. return -EINVAL;
  443. }
  444. if (!crtc->enabled) {
  445. DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
  446. return -EBUSY;
  447. }
  448. /* Helper routine in DRM core does all the work: */
  449. return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
  450. vblank_time, flags,
  451. crtc);
  452. }
  453. static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
  454. {
  455. enum drm_connector_status old_status;
  456. WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
  457. old_status = connector->status;
  458. connector->status = connector->funcs->detect(connector, false);
  459. DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
  460. connector->base.id,
  461. drm_get_connector_name(connector),
  462. old_status, connector->status);
  463. return (old_status != connector->status);
  464. }
  465. /*
  466. * Handle hotplug events outside the interrupt handler proper.
  467. */
  468. #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
  469. static void i915_hotplug_work_func(struct work_struct *work)
  470. {
  471. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  472. hotplug_work);
  473. struct drm_device *dev = dev_priv->dev;
  474. struct drm_mode_config *mode_config = &dev->mode_config;
  475. struct intel_connector *intel_connector;
  476. struct intel_encoder *intel_encoder;
  477. struct drm_connector *connector;
  478. unsigned long irqflags;
  479. bool hpd_disabled = false;
  480. bool changed = false;
  481. u32 hpd_event_bits;
  482. /* HPD irq before everything is fully set up. */
  483. if (!dev_priv->enable_hotplug_processing)
  484. return;
  485. mutex_lock(&mode_config->mutex);
  486. DRM_DEBUG_KMS("running encoder hotplug functions\n");
  487. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  488. hpd_event_bits = dev_priv->hpd_event_bits;
  489. dev_priv->hpd_event_bits = 0;
  490. list_for_each_entry(connector, &mode_config->connector_list, head) {
  491. intel_connector = to_intel_connector(connector);
  492. intel_encoder = intel_connector->encoder;
  493. if (intel_encoder->hpd_pin > HPD_NONE &&
  494. dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
  495. connector->polled == DRM_CONNECTOR_POLL_HPD) {
  496. DRM_INFO("HPD interrupt storm detected on connector %s: "
  497. "switching from hotplug detection to polling\n",
  498. drm_get_connector_name(connector));
  499. dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
  500. connector->polled = DRM_CONNECTOR_POLL_CONNECT
  501. | DRM_CONNECTOR_POLL_DISCONNECT;
  502. hpd_disabled = true;
  503. }
  504. if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
  505. DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
  506. drm_get_connector_name(connector), intel_encoder->hpd_pin);
  507. }
  508. }
  509. /* if there were no outputs to poll, poll was disabled,
  510. * therefore make sure it's enabled when disabling HPD on
  511. * some connectors */
  512. if (hpd_disabled) {
  513. drm_kms_helper_poll_enable(dev);
  514. mod_timer(&dev_priv->hotplug_reenable_timer,
  515. jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
  516. }
  517. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  518. list_for_each_entry(connector, &mode_config->connector_list, head) {
  519. intel_connector = to_intel_connector(connector);
  520. intel_encoder = intel_connector->encoder;
  521. if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
  522. if (intel_encoder->hot_plug)
  523. intel_encoder->hot_plug(intel_encoder);
  524. if (intel_hpd_irq_event(dev, connector))
  525. changed = true;
  526. }
  527. }
  528. mutex_unlock(&mode_config->mutex);
  529. if (changed)
  530. drm_kms_helper_hotplug_event(dev);
  531. }
  532. static void ironlake_handle_rps_change(struct drm_device *dev)
  533. {
  534. drm_i915_private_t *dev_priv = dev->dev_private;
  535. u32 busy_up, busy_down, max_avg, min_avg;
  536. u8 new_delay;
  537. unsigned long flags;
  538. spin_lock_irqsave(&mchdev_lock, flags);
  539. I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  540. new_delay = dev_priv->ips.cur_delay;
  541. I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
  542. busy_up = I915_READ(RCPREVBSYTUPAVG);
  543. busy_down = I915_READ(RCPREVBSYTDNAVG);
  544. max_avg = I915_READ(RCBMAXAVG);
  545. min_avg = I915_READ(RCBMINAVG);
  546. /* Handle RCS change request from hw */
  547. if (busy_up > max_avg) {
  548. if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
  549. new_delay = dev_priv->ips.cur_delay - 1;
  550. if (new_delay < dev_priv->ips.max_delay)
  551. new_delay = dev_priv->ips.max_delay;
  552. } else if (busy_down < min_avg) {
  553. if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
  554. new_delay = dev_priv->ips.cur_delay + 1;
  555. if (new_delay > dev_priv->ips.min_delay)
  556. new_delay = dev_priv->ips.min_delay;
  557. }
  558. if (ironlake_set_drps(dev, new_delay))
  559. dev_priv->ips.cur_delay = new_delay;
  560. spin_unlock_irqrestore(&mchdev_lock, flags);
  561. return;
  562. }
  563. static void notify_ring(struct drm_device *dev,
  564. struct intel_ring_buffer *ring)
  565. {
  566. struct drm_i915_private *dev_priv = dev->dev_private;
  567. if (ring->obj == NULL)
  568. return;
  569. trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
  570. wake_up_all(&ring->irq_queue);
  571. if (i915_enable_hangcheck) {
  572. dev_priv->gpu_error.hangcheck_count = 0;
  573. mod_timer(&dev_priv->gpu_error.hangcheck_timer,
  574. round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  575. }
  576. }
  577. static void gen6_pm_rps_work(struct work_struct *work)
  578. {
  579. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  580. rps.work);
  581. u32 pm_iir, pm_imr;
  582. u8 new_delay;
  583. spin_lock_irq(&dev_priv->rps.lock);
  584. pm_iir = dev_priv->rps.pm_iir;
  585. dev_priv->rps.pm_iir = 0;
  586. pm_imr = I915_READ(GEN6_PMIMR);
  587. I915_WRITE(GEN6_PMIMR, 0);
  588. spin_unlock_irq(&dev_priv->rps.lock);
  589. if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
  590. return;
  591. mutex_lock(&dev_priv->rps.hw_lock);
  592. if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
  593. new_delay = dev_priv->rps.cur_delay + 1;
  594. else
  595. new_delay = dev_priv->rps.cur_delay - 1;
  596. /* sysfs frequency interfaces may have snuck in while servicing the
  597. * interrupt
  598. */
  599. if (!(new_delay > dev_priv->rps.max_delay ||
  600. new_delay < dev_priv->rps.min_delay)) {
  601. if (IS_VALLEYVIEW(dev_priv->dev))
  602. valleyview_set_rps(dev_priv->dev, new_delay);
  603. else
  604. gen6_set_rps(dev_priv->dev, new_delay);
  605. }
  606. if (IS_VALLEYVIEW(dev_priv->dev)) {
  607. /*
  608. * On VLV, when we enter RC6 we may not be at the minimum
  609. * voltage level, so arm a timer to check. It should only
  610. * fire when there's activity or once after we've entered
  611. * RC6, and then won't be re-armed until the next RPS interrupt.
  612. */
  613. mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
  614. msecs_to_jiffies(100));
  615. }
  616. mutex_unlock(&dev_priv->rps.hw_lock);
  617. }
  618. /**
  619. * ivybridge_parity_work - Workqueue called when a parity error interrupt
  620. * occurred.
  621. * @work: workqueue struct
  622. *
  623. * Doesn't actually do anything except notify userspace. As a consequence of
  624. * this event, userspace should try to remap the bad rows since statistically
  625. * it is likely the same row is more likely to go bad again.
  626. */
  627. static void ivybridge_parity_work(struct work_struct *work)
  628. {
  629. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  630. l3_parity.error_work);
  631. u32 error_status, row, bank, subbank;
  632. char *parity_event[5];
  633. uint32_t misccpctl;
  634. unsigned long flags;
  635. /* We must turn off DOP level clock gating to access the L3 registers.
  636. * In order to prevent a get/put style interface, acquire struct mutex
  637. * any time we access those registers.
  638. */
  639. mutex_lock(&dev_priv->dev->struct_mutex);
  640. misccpctl = I915_READ(GEN7_MISCCPCTL);
  641. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  642. POSTING_READ(GEN7_MISCCPCTL);
  643. error_status = I915_READ(GEN7_L3CDERRST1);
  644. row = GEN7_PARITY_ERROR_ROW(error_status);
  645. bank = GEN7_PARITY_ERROR_BANK(error_status);
  646. subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
  647. I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
  648. GEN7_L3CDERRST1_ENABLE);
  649. POSTING_READ(GEN7_L3CDERRST1);
  650. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  651. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  652. dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  653. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  654. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  655. mutex_unlock(&dev_priv->dev->struct_mutex);
  656. parity_event[0] = "L3_PARITY_ERROR=1";
  657. parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
  658. parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
  659. parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
  660. parity_event[4] = NULL;
  661. kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
  662. KOBJ_CHANGE, parity_event);
  663. DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
  664. row, bank, subbank);
  665. kfree(parity_event[3]);
  666. kfree(parity_event[2]);
  667. kfree(parity_event[1]);
  668. }
  669. static void ivybridge_handle_parity_error(struct drm_device *dev)
  670. {
  671. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  672. unsigned long flags;
  673. if (!HAS_L3_GPU_CACHE(dev))
  674. return;
  675. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  676. dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  677. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  678. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  679. queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
  680. }
  681. static void snb_gt_irq_handler(struct drm_device *dev,
  682. struct drm_i915_private *dev_priv,
  683. u32 gt_iir)
  684. {
  685. if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
  686. GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
  687. notify_ring(dev, &dev_priv->ring[RCS]);
  688. if (gt_iir & GEN6_BSD_USER_INTERRUPT)
  689. notify_ring(dev, &dev_priv->ring[VCS]);
  690. if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
  691. notify_ring(dev, &dev_priv->ring[BCS]);
  692. if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
  693. GT_GEN6_BSD_CS_ERROR_INTERRUPT |
  694. GT_RENDER_CS_ERROR_INTERRUPT)) {
  695. DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
  696. i915_handle_error(dev, false);
  697. }
  698. if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
  699. ivybridge_handle_parity_error(dev);
  700. }
  701. static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
  702. u32 pm_iir)
  703. {
  704. unsigned long flags;
  705. /*
  706. * IIR bits should never already be set because IMR should
  707. * prevent an interrupt from being shown in IIR. The warning
  708. * displays a case where we've unsafely cleared
  709. * dev_priv->rps.pm_iir. Although missing an interrupt of the same
  710. * type is not a problem, it displays a problem in the logic.
  711. *
  712. * The mask bit in IMR is cleared by dev_priv->rps.work.
  713. */
  714. spin_lock_irqsave(&dev_priv->rps.lock, flags);
  715. dev_priv->rps.pm_iir |= pm_iir;
  716. I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
  717. POSTING_READ(GEN6_PMIMR);
  718. spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
  719. queue_work(dev_priv->wq, &dev_priv->rps.work);
  720. }
  721. #define HPD_STORM_DETECT_PERIOD 1000
  722. #define HPD_STORM_THRESHOLD 5
  723. static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
  724. u32 hotplug_trigger,
  725. const u32 *hpd)
  726. {
  727. drm_i915_private_t *dev_priv = dev->dev_private;
  728. unsigned long irqflags;
  729. int i;
  730. bool ret = false;
  731. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  732. for (i = 1; i < HPD_NUM_PINS; i++) {
  733. if (!(hpd[i] & hotplug_trigger) ||
  734. dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
  735. continue;
  736. dev_priv->hpd_event_bits |= (1 << i);
  737. if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
  738. dev_priv->hpd_stats[i].hpd_last_jiffies
  739. + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
  740. dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
  741. dev_priv->hpd_stats[i].hpd_cnt = 0;
  742. } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
  743. dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
  744. dev_priv->hpd_event_bits &= ~(1 << i);
  745. DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
  746. ret = true;
  747. } else {
  748. dev_priv->hpd_stats[i].hpd_cnt++;
  749. }
  750. }
  751. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  752. return ret;
  753. }
  754. static void gmbus_irq_handler(struct drm_device *dev)
  755. {
  756. struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
  757. wake_up_all(&dev_priv->gmbus_wait_queue);
  758. }
  759. static void dp_aux_irq_handler(struct drm_device *dev)
  760. {
  761. struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
  762. wake_up_all(&dev_priv->gmbus_wait_queue);
  763. }
  764. static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  765. {
  766. struct drm_device *dev = (struct drm_device *) arg;
  767. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  768. u32 iir, gt_iir, pm_iir;
  769. irqreturn_t ret = IRQ_NONE;
  770. unsigned long irqflags;
  771. int pipe;
  772. u32 pipe_stats[I915_MAX_PIPES];
  773. atomic_inc(&dev_priv->irq_received);
  774. while (true) {
  775. iir = I915_READ(VLV_IIR);
  776. gt_iir = I915_READ(GTIIR);
  777. pm_iir = I915_READ(GEN6_PMIIR);
  778. if (gt_iir == 0 && pm_iir == 0 && iir == 0)
  779. goto out;
  780. ret = IRQ_HANDLED;
  781. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  782. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  783. for_each_pipe(pipe) {
  784. int reg = PIPESTAT(pipe);
  785. pipe_stats[pipe] = I915_READ(reg);
  786. /*
  787. * Clear the PIPE*STAT regs before the IIR
  788. */
  789. if (pipe_stats[pipe] & 0x8000ffff) {
  790. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  791. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  792. pipe_name(pipe));
  793. I915_WRITE(reg, pipe_stats[pipe]);
  794. }
  795. }
  796. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  797. for_each_pipe(pipe) {
  798. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
  799. drm_handle_vblank(dev, pipe);
  800. if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
  801. intel_prepare_page_flip(dev, pipe);
  802. intel_finish_page_flip(dev, pipe);
  803. }
  804. }
  805. /* Consume port. Then clear IIR or we'll miss events */
  806. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  807. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  808. u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  809. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  810. hotplug_status);
  811. if (hotplug_trigger) {
  812. if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
  813. i915_hpd_irq_setup(dev);
  814. queue_work(dev_priv->wq,
  815. &dev_priv->hotplug_work);
  816. }
  817. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  818. I915_READ(PORT_HOTPLUG_STAT);
  819. }
  820. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  821. gmbus_irq_handler(dev);
  822. if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
  823. gen6_queue_rps_work(dev_priv, pm_iir);
  824. I915_WRITE(GTIIR, gt_iir);
  825. I915_WRITE(GEN6_PMIIR, pm_iir);
  826. I915_WRITE(VLV_IIR, iir);
  827. }
  828. out:
  829. return ret;
  830. }
  831. static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
  832. {
  833. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  834. int pipe;
  835. u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
  836. if (hotplug_trigger) {
  837. if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
  838. ibx_hpd_irq_setup(dev);
  839. queue_work(dev_priv->wq, &dev_priv->hotplug_work);
  840. }
  841. if (pch_iir & SDE_AUDIO_POWER_MASK) {
  842. int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
  843. SDE_AUDIO_POWER_SHIFT);
  844. DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  845. port_name(port));
  846. }
  847. if (pch_iir & SDE_AUX_MASK)
  848. dp_aux_irq_handler(dev);
  849. if (pch_iir & SDE_GMBUS)
  850. gmbus_irq_handler(dev);
  851. if (pch_iir & SDE_AUDIO_HDCP_MASK)
  852. DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
  853. if (pch_iir & SDE_AUDIO_TRANS_MASK)
  854. DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
  855. if (pch_iir & SDE_POISON)
  856. DRM_ERROR("PCH poison interrupt\n");
  857. if (pch_iir & SDE_FDI_MASK)
  858. for_each_pipe(pipe)
  859. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  860. pipe_name(pipe),
  861. I915_READ(FDI_RX_IIR(pipe)));
  862. if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
  863. DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
  864. if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
  865. DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
  866. if (pch_iir & SDE_TRANSA_FIFO_UNDER)
  867. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
  868. false))
  869. DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
  870. if (pch_iir & SDE_TRANSB_FIFO_UNDER)
  871. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
  872. false))
  873. DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
  874. }
  875. static void ivb_err_int_handler(struct drm_device *dev)
  876. {
  877. struct drm_i915_private *dev_priv = dev->dev_private;
  878. u32 err_int = I915_READ(GEN7_ERR_INT);
  879. if (err_int & ERR_INT_POISON)
  880. DRM_ERROR("Poison interrupt\n");
  881. if (err_int & ERR_INT_FIFO_UNDERRUN_A)
  882. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
  883. DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
  884. if (err_int & ERR_INT_FIFO_UNDERRUN_B)
  885. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
  886. DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
  887. if (err_int & ERR_INT_FIFO_UNDERRUN_C)
  888. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
  889. DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
  890. I915_WRITE(GEN7_ERR_INT, err_int);
  891. }
  892. static void cpt_serr_int_handler(struct drm_device *dev)
  893. {
  894. struct drm_i915_private *dev_priv = dev->dev_private;
  895. u32 serr_int = I915_READ(SERR_INT);
  896. if (serr_int & SERR_INT_POISON)
  897. DRM_ERROR("PCH poison interrupt\n");
  898. if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
  899. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
  900. false))
  901. DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
  902. if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
  903. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
  904. false))
  905. DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
  906. if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
  907. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
  908. false))
  909. DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
  910. I915_WRITE(SERR_INT, serr_int);
  911. }
  912. static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
  913. {
  914. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  915. int pipe;
  916. u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
  917. if (hotplug_trigger) {
  918. if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
  919. ibx_hpd_irq_setup(dev);
  920. queue_work(dev_priv->wq, &dev_priv->hotplug_work);
  921. }
  922. if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
  923. int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
  924. SDE_AUDIO_POWER_SHIFT_CPT);
  925. DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
  926. port_name(port));
  927. }
  928. if (pch_iir & SDE_AUX_MASK_CPT)
  929. dp_aux_irq_handler(dev);
  930. if (pch_iir & SDE_GMBUS_CPT)
  931. gmbus_irq_handler(dev);
  932. if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
  933. DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
  934. if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
  935. DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
  936. if (pch_iir & SDE_FDI_MASK_CPT)
  937. for_each_pipe(pipe)
  938. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  939. pipe_name(pipe),
  940. I915_READ(FDI_RX_IIR(pipe)));
  941. if (pch_iir & SDE_ERROR_CPT)
  942. cpt_serr_int_handler(dev);
  943. }
  944. static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
  945. {
  946. struct drm_device *dev = (struct drm_device *) arg;
  947. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  948. u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
  949. irqreturn_t ret = IRQ_NONE;
  950. int i;
  951. atomic_inc(&dev_priv->irq_received);
  952. /* We get interrupts on unclaimed registers, so check for this before we
  953. * do any I915_{READ,WRITE}. */
  954. if (IS_HASWELL(dev) &&
  955. (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  956. DRM_ERROR("Unclaimed register before interrupt\n");
  957. I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  958. }
  959. /* disable master interrupt before clearing iir */
  960. de_ier = I915_READ(DEIER);
  961. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  962. /* Disable south interrupts. We'll only write to SDEIIR once, so further
  963. * interrupts will will be stored on its back queue, and then we'll be
  964. * able to process them after we restore SDEIER (as soon as we restore
  965. * it, we'll get an interrupt if SDEIIR still has something to process
  966. * due to its back queue). */
  967. if (!HAS_PCH_NOP(dev)) {
  968. sde_ier = I915_READ(SDEIER);
  969. I915_WRITE(SDEIER, 0);
  970. POSTING_READ(SDEIER);
  971. }
  972. /* On Haswell, also mask ERR_INT because we don't want to risk
  973. * generating "unclaimed register" interrupts from inside the interrupt
  974. * handler. */
  975. if (IS_HASWELL(dev))
  976. ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
  977. gt_iir = I915_READ(GTIIR);
  978. if (gt_iir) {
  979. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  980. I915_WRITE(GTIIR, gt_iir);
  981. ret = IRQ_HANDLED;
  982. }
  983. de_iir = I915_READ(DEIIR);
  984. if (de_iir) {
  985. if (de_iir & DE_ERR_INT_IVB)
  986. ivb_err_int_handler(dev);
  987. if (de_iir & DE_AUX_CHANNEL_A_IVB)
  988. dp_aux_irq_handler(dev);
  989. if (de_iir & DE_GSE_IVB)
  990. intel_opregion_asle_intr(dev);
  991. for (i = 0; i < 3; i++) {
  992. if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
  993. drm_handle_vblank(dev, i);
  994. if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
  995. intel_prepare_page_flip(dev, i);
  996. intel_finish_page_flip_plane(dev, i);
  997. }
  998. }
  999. /* check event from PCH */
  1000. if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
  1001. u32 pch_iir = I915_READ(SDEIIR);
  1002. cpt_irq_handler(dev, pch_iir);
  1003. /* clear PCH hotplug event before clear CPU irq */
  1004. I915_WRITE(SDEIIR, pch_iir);
  1005. }
  1006. I915_WRITE(DEIIR, de_iir);
  1007. ret = IRQ_HANDLED;
  1008. }
  1009. pm_iir = I915_READ(GEN6_PMIIR);
  1010. if (pm_iir) {
  1011. if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
  1012. gen6_queue_rps_work(dev_priv, pm_iir);
  1013. I915_WRITE(GEN6_PMIIR, pm_iir);
  1014. ret = IRQ_HANDLED;
  1015. }
  1016. if (IS_HASWELL(dev) && ivb_can_enable_err_int(dev))
  1017. ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
  1018. I915_WRITE(DEIER, de_ier);
  1019. POSTING_READ(DEIER);
  1020. if (!HAS_PCH_NOP(dev)) {
  1021. I915_WRITE(SDEIER, sde_ier);
  1022. POSTING_READ(SDEIER);
  1023. }
  1024. return ret;
  1025. }
  1026. static void ilk_gt_irq_handler(struct drm_device *dev,
  1027. struct drm_i915_private *dev_priv,
  1028. u32 gt_iir)
  1029. {
  1030. if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
  1031. notify_ring(dev, &dev_priv->ring[RCS]);
  1032. if (gt_iir & GT_BSD_USER_INTERRUPT)
  1033. notify_ring(dev, &dev_priv->ring[VCS]);
  1034. }
  1035. static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  1036. {
  1037. struct drm_device *dev = (struct drm_device *) arg;
  1038. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1039. int ret = IRQ_NONE;
  1040. u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
  1041. atomic_inc(&dev_priv->irq_received);
  1042. /* disable master interrupt before clearing iir */
  1043. de_ier = I915_READ(DEIER);
  1044. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  1045. POSTING_READ(DEIER);
  1046. /* Disable south interrupts. We'll only write to SDEIIR once, so further
  1047. * interrupts will will be stored on its back queue, and then we'll be
  1048. * able to process them after we restore SDEIER (as soon as we restore
  1049. * it, we'll get an interrupt if SDEIIR still has something to process
  1050. * due to its back queue). */
  1051. sde_ier = I915_READ(SDEIER);
  1052. I915_WRITE(SDEIER, 0);
  1053. POSTING_READ(SDEIER);
  1054. de_iir = I915_READ(DEIIR);
  1055. gt_iir = I915_READ(GTIIR);
  1056. pm_iir = I915_READ(GEN6_PMIIR);
  1057. if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
  1058. goto done;
  1059. ret = IRQ_HANDLED;
  1060. if (IS_GEN5(dev))
  1061. ilk_gt_irq_handler(dev, dev_priv, gt_iir);
  1062. else
  1063. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  1064. if (de_iir & DE_AUX_CHANNEL_A)
  1065. dp_aux_irq_handler(dev);
  1066. if (de_iir & DE_GSE)
  1067. intel_opregion_asle_intr(dev);
  1068. if (de_iir & DE_PIPEA_VBLANK)
  1069. drm_handle_vblank(dev, 0);
  1070. if (de_iir & DE_PIPEB_VBLANK)
  1071. drm_handle_vblank(dev, 1);
  1072. if (de_iir & DE_POISON)
  1073. DRM_ERROR("Poison interrupt\n");
  1074. if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
  1075. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
  1076. DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
  1077. if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
  1078. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
  1079. DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
  1080. if (de_iir & DE_PLANEA_FLIP_DONE) {
  1081. intel_prepare_page_flip(dev, 0);
  1082. intel_finish_page_flip_plane(dev, 0);
  1083. }
  1084. if (de_iir & DE_PLANEB_FLIP_DONE) {
  1085. intel_prepare_page_flip(dev, 1);
  1086. intel_finish_page_flip_plane(dev, 1);
  1087. }
  1088. /* check event from PCH */
  1089. if (de_iir & DE_PCH_EVENT) {
  1090. u32 pch_iir = I915_READ(SDEIIR);
  1091. if (HAS_PCH_CPT(dev))
  1092. cpt_irq_handler(dev, pch_iir);
  1093. else
  1094. ibx_irq_handler(dev, pch_iir);
  1095. /* should clear PCH hotplug event before clear CPU irq */
  1096. I915_WRITE(SDEIIR, pch_iir);
  1097. }
  1098. if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
  1099. ironlake_handle_rps_change(dev);
  1100. if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
  1101. gen6_queue_rps_work(dev_priv, pm_iir);
  1102. I915_WRITE(GTIIR, gt_iir);
  1103. I915_WRITE(DEIIR, de_iir);
  1104. I915_WRITE(GEN6_PMIIR, pm_iir);
  1105. done:
  1106. I915_WRITE(DEIER, de_ier);
  1107. POSTING_READ(DEIER);
  1108. I915_WRITE(SDEIER, sde_ier);
  1109. POSTING_READ(SDEIER);
  1110. return ret;
  1111. }
  1112. /**
  1113. * i915_error_work_func - do process context error handling work
  1114. * @work: work struct
  1115. *
  1116. * Fire an error uevent so userspace can see that a hang or error
  1117. * was detected.
  1118. */
  1119. static void i915_error_work_func(struct work_struct *work)
  1120. {
  1121. struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
  1122. work);
  1123. drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
  1124. gpu_error);
  1125. struct drm_device *dev = dev_priv->dev;
  1126. struct intel_ring_buffer *ring;
  1127. char *error_event[] = { "ERROR=1", NULL };
  1128. char *reset_event[] = { "RESET=1", NULL };
  1129. char *reset_done_event[] = { "ERROR=0", NULL };
  1130. int i, ret;
  1131. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
  1132. /*
  1133. * Note that there's only one work item which does gpu resets, so we
  1134. * need not worry about concurrent gpu resets potentially incrementing
  1135. * error->reset_counter twice. We only need to take care of another
  1136. * racing irq/hangcheck declaring the gpu dead for a second time. A
  1137. * quick check for that is good enough: schedule_work ensures the
  1138. * correct ordering between hang detection and this work item, and since
  1139. * the reset in-progress bit is only ever set by code outside of this
  1140. * work we don't need to worry about any other races.
  1141. */
  1142. if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
  1143. DRM_DEBUG_DRIVER("resetting chip\n");
  1144. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
  1145. reset_event);
  1146. ret = i915_reset(dev);
  1147. if (ret == 0) {
  1148. /*
  1149. * After all the gem state is reset, increment the reset
  1150. * counter and wake up everyone waiting for the reset to
  1151. * complete.
  1152. *
  1153. * Since unlock operations are a one-sided barrier only,
  1154. * we need to insert a barrier here to order any seqno
  1155. * updates before
  1156. * the counter increment.
  1157. */
  1158. smp_mb__before_atomic_inc();
  1159. atomic_inc(&dev_priv->gpu_error.reset_counter);
  1160. kobject_uevent_env(&dev->primary->kdev.kobj,
  1161. KOBJ_CHANGE, reset_done_event);
  1162. } else {
  1163. atomic_set(&error->reset_counter, I915_WEDGED);
  1164. }
  1165. for_each_ring(ring, dev_priv, i)
  1166. wake_up_all(&ring->irq_queue);
  1167. intel_display_handle_reset(dev);
  1168. wake_up_all(&dev_priv->gpu_error.reset_queue);
  1169. }
  1170. }
  1171. /* NB: please notice the memset */
  1172. static void i915_get_extra_instdone(struct drm_device *dev,
  1173. uint32_t *instdone)
  1174. {
  1175. struct drm_i915_private *dev_priv = dev->dev_private;
  1176. memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
  1177. switch(INTEL_INFO(dev)->gen) {
  1178. case 2:
  1179. case 3:
  1180. instdone[0] = I915_READ(INSTDONE);
  1181. break;
  1182. case 4:
  1183. case 5:
  1184. case 6:
  1185. instdone[0] = I915_READ(INSTDONE_I965);
  1186. instdone[1] = I915_READ(INSTDONE1);
  1187. break;
  1188. default:
  1189. WARN_ONCE(1, "Unsupported platform\n");
  1190. case 7:
  1191. instdone[0] = I915_READ(GEN7_INSTDONE_1);
  1192. instdone[1] = I915_READ(GEN7_SC_INSTDONE);
  1193. instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
  1194. instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
  1195. break;
  1196. }
  1197. }
  1198. #ifdef CONFIG_DEBUG_FS
  1199. static struct drm_i915_error_object *
  1200. i915_error_object_create_sized(struct drm_i915_private *dev_priv,
  1201. struct drm_i915_gem_object *src,
  1202. const int num_pages)
  1203. {
  1204. struct drm_i915_error_object *dst;
  1205. int i;
  1206. u32 reloc_offset;
  1207. if (src == NULL || src->pages == NULL)
  1208. return NULL;
  1209. dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
  1210. if (dst == NULL)
  1211. return NULL;
  1212. reloc_offset = src->gtt_offset;
  1213. for (i = 0; i < num_pages; i++) {
  1214. unsigned long flags;
  1215. void *d;
  1216. d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
  1217. if (d == NULL)
  1218. goto unwind;
  1219. local_irq_save(flags);
  1220. if (reloc_offset < dev_priv->gtt.mappable_end &&
  1221. src->has_global_gtt_mapping) {
  1222. void __iomem *s;
  1223. /* Simply ignore tiling or any overlapping fence.
  1224. * It's part of the error state, and this hopefully
  1225. * captures what the GPU read.
  1226. */
  1227. s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
  1228. reloc_offset);
  1229. memcpy_fromio(d, s, PAGE_SIZE);
  1230. io_mapping_unmap_atomic(s);
  1231. } else if (src->stolen) {
  1232. unsigned long offset;
  1233. offset = dev_priv->mm.stolen_base;
  1234. offset += src->stolen->start;
  1235. offset += i << PAGE_SHIFT;
  1236. memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
  1237. } else {
  1238. struct page *page;
  1239. void *s;
  1240. page = i915_gem_object_get_page(src, i);
  1241. drm_clflush_pages(&page, 1);
  1242. s = kmap_atomic(page);
  1243. memcpy(d, s, PAGE_SIZE);
  1244. kunmap_atomic(s);
  1245. drm_clflush_pages(&page, 1);
  1246. }
  1247. local_irq_restore(flags);
  1248. dst->pages[i] = d;
  1249. reloc_offset += PAGE_SIZE;
  1250. }
  1251. dst->page_count = num_pages;
  1252. dst->gtt_offset = src->gtt_offset;
  1253. return dst;
  1254. unwind:
  1255. while (i--)
  1256. kfree(dst->pages[i]);
  1257. kfree(dst);
  1258. return NULL;
  1259. }
  1260. #define i915_error_object_create(dev_priv, src) \
  1261. i915_error_object_create_sized((dev_priv), (src), \
  1262. (src)->base.size>>PAGE_SHIFT)
  1263. static void
  1264. i915_error_object_free(struct drm_i915_error_object *obj)
  1265. {
  1266. int page;
  1267. if (obj == NULL)
  1268. return;
  1269. for (page = 0; page < obj->page_count; page++)
  1270. kfree(obj->pages[page]);
  1271. kfree(obj);
  1272. }
  1273. void
  1274. i915_error_state_free(struct kref *error_ref)
  1275. {
  1276. struct drm_i915_error_state *error = container_of(error_ref,
  1277. typeof(*error), ref);
  1278. int i;
  1279. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  1280. i915_error_object_free(error->ring[i].batchbuffer);
  1281. i915_error_object_free(error->ring[i].ringbuffer);
  1282. i915_error_object_free(error->ring[i].ctx);
  1283. kfree(error->ring[i].requests);
  1284. }
  1285. kfree(error->active_bo);
  1286. kfree(error->overlay);
  1287. kfree(error->display);
  1288. kfree(error);
  1289. }
  1290. static void capture_bo(struct drm_i915_error_buffer *err,
  1291. struct drm_i915_gem_object *obj)
  1292. {
  1293. err->size = obj->base.size;
  1294. err->name = obj->base.name;
  1295. err->rseqno = obj->last_read_seqno;
  1296. err->wseqno = obj->last_write_seqno;
  1297. err->gtt_offset = obj->gtt_offset;
  1298. err->read_domains = obj->base.read_domains;
  1299. err->write_domain = obj->base.write_domain;
  1300. err->fence_reg = obj->fence_reg;
  1301. err->pinned = 0;
  1302. if (obj->pin_count > 0)
  1303. err->pinned = 1;
  1304. if (obj->user_pin_count > 0)
  1305. err->pinned = -1;
  1306. err->tiling = obj->tiling_mode;
  1307. err->dirty = obj->dirty;
  1308. err->purgeable = obj->madv != I915_MADV_WILLNEED;
  1309. err->ring = obj->ring ? obj->ring->id : -1;
  1310. err->cache_level = obj->cache_level;
  1311. }
  1312. static u32 capture_active_bo(struct drm_i915_error_buffer *err,
  1313. int count, struct list_head *head)
  1314. {
  1315. struct drm_i915_gem_object *obj;
  1316. int i = 0;
  1317. list_for_each_entry(obj, head, mm_list) {
  1318. capture_bo(err++, obj);
  1319. if (++i == count)
  1320. break;
  1321. }
  1322. return i;
  1323. }
  1324. static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
  1325. int count, struct list_head *head)
  1326. {
  1327. struct drm_i915_gem_object *obj;
  1328. int i = 0;
  1329. list_for_each_entry(obj, head, gtt_list) {
  1330. if (obj->pin_count == 0)
  1331. continue;
  1332. capture_bo(err++, obj);
  1333. if (++i == count)
  1334. break;
  1335. }
  1336. return i;
  1337. }
  1338. static void i915_gem_record_fences(struct drm_device *dev,
  1339. struct drm_i915_error_state *error)
  1340. {
  1341. struct drm_i915_private *dev_priv = dev->dev_private;
  1342. int i;
  1343. /* Fences */
  1344. switch (INTEL_INFO(dev)->gen) {
  1345. case 7:
  1346. case 6:
  1347. for (i = 0; i < dev_priv->num_fence_regs; i++)
  1348. error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
  1349. break;
  1350. case 5:
  1351. case 4:
  1352. for (i = 0; i < 16; i++)
  1353. error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
  1354. break;
  1355. case 3:
  1356. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  1357. for (i = 0; i < 8; i++)
  1358. error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
  1359. case 2:
  1360. for (i = 0; i < 8; i++)
  1361. error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
  1362. break;
  1363. default:
  1364. BUG();
  1365. }
  1366. }
  1367. static struct drm_i915_error_object *
  1368. i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
  1369. struct intel_ring_buffer *ring)
  1370. {
  1371. struct drm_i915_gem_object *obj;
  1372. u32 seqno;
  1373. if (!ring->get_seqno)
  1374. return NULL;
  1375. if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
  1376. u32 acthd = I915_READ(ACTHD);
  1377. if (WARN_ON(ring->id != RCS))
  1378. return NULL;
  1379. obj = ring->private;
  1380. if (acthd >= obj->gtt_offset &&
  1381. acthd < obj->gtt_offset + obj->base.size)
  1382. return i915_error_object_create(dev_priv, obj);
  1383. }
  1384. seqno = ring->get_seqno(ring, false);
  1385. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
  1386. if (obj->ring != ring)
  1387. continue;
  1388. if (i915_seqno_passed(seqno, obj->last_read_seqno))
  1389. continue;
  1390. if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
  1391. continue;
  1392. /* We need to copy these to an anonymous buffer as the simplest
  1393. * method to avoid being overwritten by userspace.
  1394. */
  1395. return i915_error_object_create(dev_priv, obj);
  1396. }
  1397. return NULL;
  1398. }
  1399. static void i915_record_ring_state(struct drm_device *dev,
  1400. struct drm_i915_error_state *error,
  1401. struct intel_ring_buffer *ring)
  1402. {
  1403. struct drm_i915_private *dev_priv = dev->dev_private;
  1404. if (INTEL_INFO(dev)->gen >= 6) {
  1405. error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
  1406. error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
  1407. error->semaphore_mboxes[ring->id][0]
  1408. = I915_READ(RING_SYNC_0(ring->mmio_base));
  1409. error->semaphore_mboxes[ring->id][1]
  1410. = I915_READ(RING_SYNC_1(ring->mmio_base));
  1411. error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
  1412. error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
  1413. }
  1414. if (INTEL_INFO(dev)->gen >= 4) {
  1415. error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
  1416. error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
  1417. error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
  1418. error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
  1419. error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
  1420. if (ring->id == RCS)
  1421. error->bbaddr = I915_READ64(BB_ADDR);
  1422. } else {
  1423. error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
  1424. error->ipeir[ring->id] = I915_READ(IPEIR);
  1425. error->ipehr[ring->id] = I915_READ(IPEHR);
  1426. error->instdone[ring->id] = I915_READ(INSTDONE);
  1427. }
  1428. error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
  1429. error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
  1430. error->seqno[ring->id] = ring->get_seqno(ring, false);
  1431. error->acthd[ring->id] = intel_ring_get_active_head(ring);
  1432. error->head[ring->id] = I915_READ_HEAD(ring);
  1433. error->tail[ring->id] = I915_READ_TAIL(ring);
  1434. error->ctl[ring->id] = I915_READ_CTL(ring);
  1435. error->cpu_ring_head[ring->id] = ring->head;
  1436. error->cpu_ring_tail[ring->id] = ring->tail;
  1437. }
  1438. static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
  1439. struct drm_i915_error_state *error,
  1440. struct drm_i915_error_ring *ering)
  1441. {
  1442. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1443. struct drm_i915_gem_object *obj;
  1444. /* Currently render ring is the only HW context user */
  1445. if (ring->id != RCS || !error->ccid)
  1446. return;
  1447. list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
  1448. if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
  1449. ering->ctx = i915_error_object_create_sized(dev_priv,
  1450. obj, 1);
  1451. }
  1452. }
  1453. }
  1454. static void i915_gem_record_rings(struct drm_device *dev,
  1455. struct drm_i915_error_state *error)
  1456. {
  1457. struct drm_i915_private *dev_priv = dev->dev_private;
  1458. struct intel_ring_buffer *ring;
  1459. struct drm_i915_gem_request *request;
  1460. int i, count;
  1461. for_each_ring(ring, dev_priv, i) {
  1462. i915_record_ring_state(dev, error, ring);
  1463. error->ring[i].batchbuffer =
  1464. i915_error_first_batchbuffer(dev_priv, ring);
  1465. error->ring[i].ringbuffer =
  1466. i915_error_object_create(dev_priv, ring->obj);
  1467. i915_gem_record_active_context(ring, error, &error->ring[i]);
  1468. count = 0;
  1469. list_for_each_entry(request, &ring->request_list, list)
  1470. count++;
  1471. error->ring[i].num_requests = count;
  1472. error->ring[i].requests =
  1473. kmalloc(count*sizeof(struct drm_i915_error_request),
  1474. GFP_ATOMIC);
  1475. if (error->ring[i].requests == NULL) {
  1476. error->ring[i].num_requests = 0;
  1477. continue;
  1478. }
  1479. count = 0;
  1480. list_for_each_entry(request, &ring->request_list, list) {
  1481. struct drm_i915_error_request *erq;
  1482. erq = &error->ring[i].requests[count++];
  1483. erq->seqno = request->seqno;
  1484. erq->jiffies = request->emitted_jiffies;
  1485. erq->tail = request->tail;
  1486. }
  1487. }
  1488. }
  1489. /**
  1490. * i915_capture_error_state - capture an error record for later analysis
  1491. * @dev: drm device
  1492. *
  1493. * Should be called when an error is detected (either a hang or an error
  1494. * interrupt) to capture error state from the time of the error. Fills
  1495. * out a structure which becomes available in debugfs for user level tools
  1496. * to pick up.
  1497. */
  1498. static void i915_capture_error_state(struct drm_device *dev)
  1499. {
  1500. struct drm_i915_private *dev_priv = dev->dev_private;
  1501. struct drm_i915_gem_object *obj;
  1502. struct drm_i915_error_state *error;
  1503. unsigned long flags;
  1504. int i, pipe;
  1505. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1506. error = dev_priv->gpu_error.first_error;
  1507. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1508. if (error)
  1509. return;
  1510. /* Account for pipe specific data like PIPE*STAT */
  1511. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  1512. if (!error) {
  1513. DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
  1514. return;
  1515. }
  1516. DRM_INFO("capturing error event; look for more information in "
  1517. "/sys/kernel/debug/dri/%d/i915_error_state\n",
  1518. dev->primary->index);
  1519. kref_init(&error->ref);
  1520. error->eir = I915_READ(EIR);
  1521. error->pgtbl_er = I915_READ(PGTBL_ER);
  1522. if (HAS_HW_CONTEXTS(dev))
  1523. error->ccid = I915_READ(CCID);
  1524. if (HAS_PCH_SPLIT(dev))
  1525. error->ier = I915_READ(DEIER) | I915_READ(GTIER);
  1526. else if (IS_VALLEYVIEW(dev))
  1527. error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
  1528. else if (IS_GEN2(dev))
  1529. error->ier = I915_READ16(IER);
  1530. else
  1531. error->ier = I915_READ(IER);
  1532. if (INTEL_INFO(dev)->gen >= 6)
  1533. error->derrmr = I915_READ(DERRMR);
  1534. if (IS_VALLEYVIEW(dev))
  1535. error->forcewake = I915_READ(FORCEWAKE_VLV);
  1536. else if (INTEL_INFO(dev)->gen >= 7)
  1537. error->forcewake = I915_READ(FORCEWAKE_MT);
  1538. else if (INTEL_INFO(dev)->gen == 6)
  1539. error->forcewake = I915_READ(FORCEWAKE);
  1540. if (!HAS_PCH_SPLIT(dev))
  1541. for_each_pipe(pipe)
  1542. error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
  1543. if (INTEL_INFO(dev)->gen >= 6) {
  1544. error->error = I915_READ(ERROR_GEN6);
  1545. error->done_reg = I915_READ(DONE_REG);
  1546. }
  1547. if (INTEL_INFO(dev)->gen == 7)
  1548. error->err_int = I915_READ(GEN7_ERR_INT);
  1549. i915_get_extra_instdone(dev, error->extra_instdone);
  1550. i915_gem_record_fences(dev, error);
  1551. i915_gem_record_rings(dev, error);
  1552. /* Record buffers on the active and pinned lists. */
  1553. error->active_bo = NULL;
  1554. error->pinned_bo = NULL;
  1555. i = 0;
  1556. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
  1557. i++;
  1558. error->active_bo_count = i;
  1559. list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
  1560. if (obj->pin_count)
  1561. i++;
  1562. error->pinned_bo_count = i - error->active_bo_count;
  1563. error->active_bo = NULL;
  1564. error->pinned_bo = NULL;
  1565. if (i) {
  1566. error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
  1567. GFP_ATOMIC);
  1568. if (error->active_bo)
  1569. error->pinned_bo =
  1570. error->active_bo + error->active_bo_count;
  1571. }
  1572. if (error->active_bo)
  1573. error->active_bo_count =
  1574. capture_active_bo(error->active_bo,
  1575. error->active_bo_count,
  1576. &dev_priv->mm.active_list);
  1577. if (error->pinned_bo)
  1578. error->pinned_bo_count =
  1579. capture_pinned_bo(error->pinned_bo,
  1580. error->pinned_bo_count,
  1581. &dev_priv->mm.bound_list);
  1582. do_gettimeofday(&error->time);
  1583. error->overlay = intel_overlay_capture_error_state(dev);
  1584. error->display = intel_display_capture_error_state(dev);
  1585. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1586. if (dev_priv->gpu_error.first_error == NULL) {
  1587. dev_priv->gpu_error.first_error = error;
  1588. error = NULL;
  1589. }
  1590. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1591. if (error)
  1592. i915_error_state_free(&error->ref);
  1593. }
  1594. void i915_destroy_error_state(struct drm_device *dev)
  1595. {
  1596. struct drm_i915_private *dev_priv = dev->dev_private;
  1597. struct drm_i915_error_state *error;
  1598. unsigned long flags;
  1599. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1600. error = dev_priv->gpu_error.first_error;
  1601. dev_priv->gpu_error.first_error = NULL;
  1602. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1603. if (error)
  1604. kref_put(&error->ref, i915_error_state_free);
  1605. }
  1606. #else
  1607. #define i915_capture_error_state(x)
  1608. #endif
  1609. static void i915_report_and_clear_eir(struct drm_device *dev)
  1610. {
  1611. struct drm_i915_private *dev_priv = dev->dev_private;
  1612. uint32_t instdone[I915_NUM_INSTDONE_REG];
  1613. u32 eir = I915_READ(EIR);
  1614. int pipe, i;
  1615. if (!eir)
  1616. return;
  1617. pr_err("render error detected, EIR: 0x%08x\n", eir);
  1618. i915_get_extra_instdone(dev, instdone);
  1619. if (IS_G4X(dev)) {
  1620. if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
  1621. u32 ipeir = I915_READ(IPEIR_I965);
  1622. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1623. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1624. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1625. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1626. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1627. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1628. I915_WRITE(IPEIR_I965, ipeir);
  1629. POSTING_READ(IPEIR_I965);
  1630. }
  1631. if (eir & GM45_ERROR_PAGE_TABLE) {
  1632. u32 pgtbl_err = I915_READ(PGTBL_ER);
  1633. pr_err("page table error\n");
  1634. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  1635. I915_WRITE(PGTBL_ER, pgtbl_err);
  1636. POSTING_READ(PGTBL_ER);
  1637. }
  1638. }
  1639. if (!IS_GEN2(dev)) {
  1640. if (eir & I915_ERROR_PAGE_TABLE) {
  1641. u32 pgtbl_err = I915_READ(PGTBL_ER);
  1642. pr_err("page table error\n");
  1643. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  1644. I915_WRITE(PGTBL_ER, pgtbl_err);
  1645. POSTING_READ(PGTBL_ER);
  1646. }
  1647. }
  1648. if (eir & I915_ERROR_MEMORY_REFRESH) {
  1649. pr_err("memory refresh error:\n");
  1650. for_each_pipe(pipe)
  1651. pr_err("pipe %c stat: 0x%08x\n",
  1652. pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
  1653. /* pipestat has already been acked */
  1654. }
  1655. if (eir & I915_ERROR_INSTRUCTION) {
  1656. pr_err("instruction error\n");
  1657. pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
  1658. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1659. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1660. if (INTEL_INFO(dev)->gen < 4) {
  1661. u32 ipeir = I915_READ(IPEIR);
  1662. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
  1663. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
  1664. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
  1665. I915_WRITE(IPEIR, ipeir);
  1666. POSTING_READ(IPEIR);
  1667. } else {
  1668. u32 ipeir = I915_READ(IPEIR_I965);
  1669. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1670. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1671. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1672. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1673. I915_WRITE(IPEIR_I965, ipeir);
  1674. POSTING_READ(IPEIR_I965);
  1675. }
  1676. }
  1677. I915_WRITE(EIR, eir);
  1678. POSTING_READ(EIR);
  1679. eir = I915_READ(EIR);
  1680. if (eir) {
  1681. /*
  1682. * some errors might have become stuck,
  1683. * mask them.
  1684. */
  1685. DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
  1686. I915_WRITE(EMR, I915_READ(EMR) | eir);
  1687. I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1688. }
  1689. }
  1690. /**
  1691. * i915_handle_error - handle an error interrupt
  1692. * @dev: drm device
  1693. *
  1694. * Do some basic checking of regsiter state at error interrupt time and
  1695. * dump it to the syslog. Also call i915_capture_error_state() to make
  1696. * sure we get a record and make it available in debugfs. Fire a uevent
  1697. * so userspace knows something bad happened (should trigger collection
  1698. * of a ring dump etc.).
  1699. */
  1700. void i915_handle_error(struct drm_device *dev, bool wedged)
  1701. {
  1702. struct drm_i915_private *dev_priv = dev->dev_private;
  1703. struct intel_ring_buffer *ring;
  1704. int i;
  1705. i915_capture_error_state(dev);
  1706. i915_report_and_clear_eir(dev);
  1707. if (wedged) {
  1708. atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
  1709. &dev_priv->gpu_error.reset_counter);
  1710. /*
  1711. * Wakeup waiting processes so that the reset work item
  1712. * doesn't deadlock trying to grab various locks.
  1713. */
  1714. for_each_ring(ring, dev_priv, i)
  1715. wake_up_all(&ring->irq_queue);
  1716. }
  1717. queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
  1718. }
  1719. static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
  1720. {
  1721. drm_i915_private_t *dev_priv = dev->dev_private;
  1722. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  1723. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1724. struct drm_i915_gem_object *obj;
  1725. struct intel_unpin_work *work;
  1726. unsigned long flags;
  1727. bool stall_detected;
  1728. /* Ignore early vblank irqs */
  1729. if (intel_crtc == NULL)
  1730. return;
  1731. spin_lock_irqsave(&dev->event_lock, flags);
  1732. work = intel_crtc->unpin_work;
  1733. if (work == NULL ||
  1734. atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
  1735. !work->enable_stall_check) {
  1736. /* Either the pending flip IRQ arrived, or we're too early. Don't check */
  1737. spin_unlock_irqrestore(&dev->event_lock, flags);
  1738. return;
  1739. }
  1740. /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
  1741. obj = work->pending_flip_obj;
  1742. if (INTEL_INFO(dev)->gen >= 4) {
  1743. int dspsurf = DSPSURF(intel_crtc->plane);
  1744. stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
  1745. obj->gtt_offset;
  1746. } else {
  1747. int dspaddr = DSPADDR(intel_crtc->plane);
  1748. stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
  1749. crtc->y * crtc->fb->pitches[0] +
  1750. crtc->x * crtc->fb->bits_per_pixel/8);
  1751. }
  1752. spin_unlock_irqrestore(&dev->event_lock, flags);
  1753. if (stall_detected) {
  1754. DRM_DEBUG_DRIVER("Pageflip stall detected\n");
  1755. intel_prepare_page_flip(dev, intel_crtc->plane);
  1756. }
  1757. }
  1758. /* Called from drm generic code, passed 'crtc' which
  1759. * we use as a pipe index
  1760. */
  1761. static int i915_enable_vblank(struct drm_device *dev, int pipe)
  1762. {
  1763. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1764. unsigned long irqflags;
  1765. if (!i915_pipe_enabled(dev, pipe))
  1766. return -EINVAL;
  1767. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1768. if (INTEL_INFO(dev)->gen >= 4)
  1769. i915_enable_pipestat(dev_priv, pipe,
  1770. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1771. else
  1772. i915_enable_pipestat(dev_priv, pipe,
  1773. PIPE_VBLANK_INTERRUPT_ENABLE);
  1774. /* maintain vblank delivery even in deep C-states */
  1775. if (dev_priv->info->gen == 3)
  1776. I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
  1777. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1778. return 0;
  1779. }
  1780. static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
  1781. {
  1782. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1783. unsigned long irqflags;
  1784. if (!i915_pipe_enabled(dev, pipe))
  1785. return -EINVAL;
  1786. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1787. ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
  1788. DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
  1789. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1790. return 0;
  1791. }
  1792. static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
  1793. {
  1794. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1795. unsigned long irqflags;
  1796. if (!i915_pipe_enabled(dev, pipe))
  1797. return -EINVAL;
  1798. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1799. ironlake_enable_display_irq(dev_priv,
  1800. DE_PIPEA_VBLANK_IVB << (5 * pipe));
  1801. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1802. return 0;
  1803. }
  1804. static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
  1805. {
  1806. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1807. unsigned long irqflags;
  1808. u32 imr;
  1809. if (!i915_pipe_enabled(dev, pipe))
  1810. return -EINVAL;
  1811. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1812. imr = I915_READ(VLV_IMR);
  1813. if (pipe == 0)
  1814. imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1815. else
  1816. imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1817. I915_WRITE(VLV_IMR, imr);
  1818. i915_enable_pipestat(dev_priv, pipe,
  1819. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1820. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1821. return 0;
  1822. }
  1823. /* Called from drm generic code, passed 'crtc' which
  1824. * we use as a pipe index
  1825. */
  1826. static void i915_disable_vblank(struct drm_device *dev, int pipe)
  1827. {
  1828. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1829. unsigned long irqflags;
  1830. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1831. if (dev_priv->info->gen == 3)
  1832. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
  1833. i915_disable_pipestat(dev_priv, pipe,
  1834. PIPE_VBLANK_INTERRUPT_ENABLE |
  1835. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1836. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1837. }
  1838. static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
  1839. {
  1840. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1841. unsigned long irqflags;
  1842. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1843. ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
  1844. DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
  1845. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1846. }
  1847. static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
  1848. {
  1849. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1850. unsigned long irqflags;
  1851. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1852. ironlake_disable_display_irq(dev_priv,
  1853. DE_PIPEA_VBLANK_IVB << (pipe * 5));
  1854. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1855. }
  1856. static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
  1857. {
  1858. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1859. unsigned long irqflags;
  1860. u32 imr;
  1861. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1862. i915_disable_pipestat(dev_priv, pipe,
  1863. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1864. imr = I915_READ(VLV_IMR);
  1865. if (pipe == 0)
  1866. imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1867. else
  1868. imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1869. I915_WRITE(VLV_IMR, imr);
  1870. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1871. }
  1872. static u32
  1873. ring_last_seqno(struct intel_ring_buffer *ring)
  1874. {
  1875. return list_entry(ring->request_list.prev,
  1876. struct drm_i915_gem_request, list)->seqno;
  1877. }
  1878. static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring,
  1879. u32 ring_seqno, bool *err)
  1880. {
  1881. if (list_empty(&ring->request_list) ||
  1882. i915_seqno_passed(ring_seqno, ring_last_seqno(ring))) {
  1883. /* Issue a wake-up to catch stuck h/w. */
  1884. if (waitqueue_active(&ring->irq_queue)) {
  1885. DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
  1886. ring->name);
  1887. wake_up_all(&ring->irq_queue);
  1888. *err = true;
  1889. }
  1890. return true;
  1891. }
  1892. return false;
  1893. }
  1894. static bool semaphore_passed(struct intel_ring_buffer *ring)
  1895. {
  1896. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1897. u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
  1898. struct intel_ring_buffer *signaller;
  1899. u32 cmd, ipehr, acthd_min;
  1900. ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
  1901. if ((ipehr & ~(0x3 << 16)) !=
  1902. (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
  1903. return false;
  1904. /* ACTHD is likely pointing to the dword after the actual command,
  1905. * so scan backwards until we find the MBOX.
  1906. */
  1907. acthd_min = max((int)acthd - 3 * 4, 0);
  1908. do {
  1909. cmd = ioread32(ring->virtual_start + acthd);
  1910. if (cmd == ipehr)
  1911. break;
  1912. acthd -= 4;
  1913. if (acthd < acthd_min)
  1914. return false;
  1915. } while (1);
  1916. signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
  1917. return i915_seqno_passed(signaller->get_seqno(signaller, false),
  1918. ioread32(ring->virtual_start+acthd+4)+1);
  1919. }
  1920. static bool kick_ring(struct intel_ring_buffer *ring)
  1921. {
  1922. struct drm_device *dev = ring->dev;
  1923. struct drm_i915_private *dev_priv = dev->dev_private;
  1924. u32 tmp = I915_READ_CTL(ring);
  1925. if (tmp & RING_WAIT) {
  1926. DRM_ERROR("Kicking stuck wait on %s\n",
  1927. ring->name);
  1928. I915_WRITE_CTL(ring, tmp);
  1929. return true;
  1930. }
  1931. if (INTEL_INFO(dev)->gen >= 6 &&
  1932. tmp & RING_WAIT_SEMAPHORE &&
  1933. semaphore_passed(ring)) {
  1934. DRM_ERROR("Kicking stuck semaphore on %s\n",
  1935. ring->name);
  1936. I915_WRITE_CTL(ring, tmp);
  1937. return true;
  1938. }
  1939. return false;
  1940. }
  1941. static bool i915_hangcheck_ring_hung(struct intel_ring_buffer *ring)
  1942. {
  1943. if (IS_GEN2(ring->dev))
  1944. return false;
  1945. /* Is the chip hanging on a WAIT_FOR_EVENT?
  1946. * If so we can simply poke the RB_WAIT bit
  1947. * and break the hang. This should work on
  1948. * all but the second generation chipsets.
  1949. */
  1950. return !kick_ring(ring);
  1951. }
  1952. static bool i915_hangcheck_hung(struct drm_device *dev)
  1953. {
  1954. drm_i915_private_t *dev_priv = dev->dev_private;
  1955. if (dev_priv->gpu_error.hangcheck_count++ > 1) {
  1956. bool hung = true;
  1957. struct intel_ring_buffer *ring;
  1958. int i;
  1959. DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
  1960. i915_handle_error(dev, true);
  1961. for_each_ring(ring, dev_priv, i)
  1962. hung &= i915_hangcheck_ring_hung(ring);
  1963. return hung;
  1964. }
  1965. return false;
  1966. }
  1967. /**
  1968. * This is called when the chip hasn't reported back with completed
  1969. * batchbuffers in a long time. The first time this is called we simply record
  1970. * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
  1971. * again, we assume the chip is wedged and try to fix it.
  1972. */
  1973. void i915_hangcheck_elapsed(unsigned long data)
  1974. {
  1975. struct drm_device *dev = (struct drm_device *)data;
  1976. drm_i915_private_t *dev_priv = dev->dev_private;
  1977. struct intel_ring_buffer *ring;
  1978. bool err = false, idle;
  1979. int i;
  1980. u32 seqno[I915_NUM_RINGS];
  1981. bool work_done;
  1982. if (!i915_enable_hangcheck)
  1983. return;
  1984. idle = true;
  1985. for_each_ring(ring, dev_priv, i) {
  1986. seqno[i] = ring->get_seqno(ring, false);
  1987. idle &= i915_hangcheck_ring_idle(ring, seqno[i], &err);
  1988. }
  1989. /* If all work is done then ACTHD clearly hasn't advanced. */
  1990. if (idle) {
  1991. if (err) {
  1992. if (i915_hangcheck_hung(dev))
  1993. return;
  1994. goto repeat;
  1995. }
  1996. dev_priv->gpu_error.hangcheck_count = 0;
  1997. return;
  1998. }
  1999. work_done = false;
  2000. for_each_ring(ring, dev_priv, i) {
  2001. if (ring->hangcheck.seqno != seqno[i]) {
  2002. work_done = true;
  2003. ring->hangcheck.seqno = seqno[i];
  2004. }
  2005. }
  2006. if (!work_done) {
  2007. if (i915_hangcheck_hung(dev))
  2008. return;
  2009. } else {
  2010. dev_priv->gpu_error.hangcheck_count = 0;
  2011. }
  2012. repeat:
  2013. /* Reset timer case chip hangs without another request being added */
  2014. mod_timer(&dev_priv->gpu_error.hangcheck_timer,
  2015. round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  2016. }
  2017. /* drm_dma.h hooks
  2018. */
  2019. static void ironlake_irq_preinstall(struct drm_device *dev)
  2020. {
  2021. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2022. atomic_set(&dev_priv->irq_received, 0);
  2023. I915_WRITE(HWSTAM, 0xeffe);
  2024. /* XXX hotplug from PCH */
  2025. I915_WRITE(DEIMR, 0xffffffff);
  2026. I915_WRITE(DEIER, 0x0);
  2027. POSTING_READ(DEIER);
  2028. /* and GT */
  2029. I915_WRITE(GTIMR, 0xffffffff);
  2030. I915_WRITE(GTIER, 0x0);
  2031. POSTING_READ(GTIER);
  2032. if (HAS_PCH_NOP(dev))
  2033. return;
  2034. /* south display irq */
  2035. I915_WRITE(SDEIMR, 0xffffffff);
  2036. /*
  2037. * SDEIER is also touched by the interrupt handler to work around missed
  2038. * PCH interrupts. Hence we can't update it after the interrupt handler
  2039. * is enabled - instead we unconditionally enable all PCH interrupt
  2040. * sources here, but then only unmask them as needed with SDEIMR.
  2041. */
  2042. I915_WRITE(SDEIER, 0xffffffff);
  2043. POSTING_READ(SDEIER);
  2044. }
  2045. static void valleyview_irq_preinstall(struct drm_device *dev)
  2046. {
  2047. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2048. int pipe;
  2049. atomic_set(&dev_priv->irq_received, 0);
  2050. /* VLV magic */
  2051. I915_WRITE(VLV_IMR, 0);
  2052. I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
  2053. I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
  2054. I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
  2055. /* and GT */
  2056. I915_WRITE(GTIIR, I915_READ(GTIIR));
  2057. I915_WRITE(GTIIR, I915_READ(GTIIR));
  2058. I915_WRITE(GTIMR, 0xffffffff);
  2059. I915_WRITE(GTIER, 0x0);
  2060. POSTING_READ(GTIER);
  2061. I915_WRITE(DPINVGTT, 0xff);
  2062. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2063. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2064. for_each_pipe(pipe)
  2065. I915_WRITE(PIPESTAT(pipe), 0xffff);
  2066. I915_WRITE(VLV_IIR, 0xffffffff);
  2067. I915_WRITE(VLV_IMR, 0xffffffff);
  2068. I915_WRITE(VLV_IER, 0x0);
  2069. POSTING_READ(VLV_IER);
  2070. }
  2071. static void ibx_hpd_irq_setup(struct drm_device *dev)
  2072. {
  2073. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2074. struct drm_mode_config *mode_config = &dev->mode_config;
  2075. struct intel_encoder *intel_encoder;
  2076. u32 mask = ~I915_READ(SDEIMR);
  2077. u32 hotplug;
  2078. if (HAS_PCH_IBX(dev)) {
  2079. mask &= ~SDE_HOTPLUG_MASK;
  2080. list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  2081. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  2082. mask |= hpd_ibx[intel_encoder->hpd_pin];
  2083. } else {
  2084. mask &= ~SDE_HOTPLUG_MASK_CPT;
  2085. list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  2086. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  2087. mask |= hpd_cpt[intel_encoder->hpd_pin];
  2088. }
  2089. I915_WRITE(SDEIMR, ~mask);
  2090. /*
  2091. * Enable digital hotplug on the PCH, and configure the DP short pulse
  2092. * duration to 2ms (which is the minimum in the Display Port spec)
  2093. *
  2094. * This register is the same on all known PCH chips.
  2095. */
  2096. hotplug = I915_READ(PCH_PORT_HOTPLUG);
  2097. hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
  2098. hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
  2099. hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
  2100. hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
  2101. I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  2102. }
  2103. static void ibx_irq_postinstall(struct drm_device *dev)
  2104. {
  2105. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2106. u32 mask;
  2107. if (HAS_PCH_IBX(dev)) {
  2108. mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
  2109. SDE_TRANSA_FIFO_UNDER | SDE_POISON;
  2110. } else {
  2111. mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
  2112. I915_WRITE(SERR_INT, I915_READ(SERR_INT));
  2113. }
  2114. if (HAS_PCH_NOP(dev))
  2115. return;
  2116. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  2117. I915_WRITE(SDEIMR, ~mask);
  2118. }
  2119. static int ironlake_irq_postinstall(struct drm_device *dev)
  2120. {
  2121. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2122. /* enable kind of interrupts always enabled */
  2123. u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  2124. DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
  2125. DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
  2126. DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
  2127. u32 render_irqs;
  2128. dev_priv->irq_mask = ~display_mask;
  2129. /* should always can generate irq */
  2130. I915_WRITE(DEIIR, I915_READ(DEIIR));
  2131. I915_WRITE(DEIMR, dev_priv->irq_mask);
  2132. I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
  2133. POSTING_READ(DEIER);
  2134. dev_priv->gt_irq_mask = ~0;
  2135. I915_WRITE(GTIIR, I915_READ(GTIIR));
  2136. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  2137. if (IS_GEN6(dev))
  2138. render_irqs =
  2139. GT_USER_INTERRUPT |
  2140. GEN6_BSD_USER_INTERRUPT |
  2141. GEN6_BLITTER_USER_INTERRUPT;
  2142. else
  2143. render_irqs =
  2144. GT_USER_INTERRUPT |
  2145. GT_PIPE_NOTIFY |
  2146. GT_BSD_USER_INTERRUPT;
  2147. I915_WRITE(GTIER, render_irqs);
  2148. POSTING_READ(GTIER);
  2149. ibx_irq_postinstall(dev);
  2150. if (IS_IRONLAKE_M(dev)) {
  2151. /* Clear & enable PCU event interrupts */
  2152. I915_WRITE(DEIIR, DE_PCU_EVENT);
  2153. I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
  2154. ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
  2155. }
  2156. return 0;
  2157. }
  2158. static int ivybridge_irq_postinstall(struct drm_device *dev)
  2159. {
  2160. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2161. /* enable kind of interrupts always enabled */
  2162. u32 display_mask =
  2163. DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
  2164. DE_PLANEC_FLIP_DONE_IVB |
  2165. DE_PLANEB_FLIP_DONE_IVB |
  2166. DE_PLANEA_FLIP_DONE_IVB |
  2167. DE_AUX_CHANNEL_A_IVB |
  2168. DE_ERR_INT_IVB;
  2169. u32 render_irqs;
  2170. dev_priv->irq_mask = ~display_mask;
  2171. /* should always can generate irq */
  2172. I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
  2173. I915_WRITE(DEIIR, I915_READ(DEIIR));
  2174. I915_WRITE(DEIMR, dev_priv->irq_mask);
  2175. I915_WRITE(DEIER,
  2176. display_mask |
  2177. DE_PIPEC_VBLANK_IVB |
  2178. DE_PIPEB_VBLANK_IVB |
  2179. DE_PIPEA_VBLANK_IVB);
  2180. POSTING_READ(DEIER);
  2181. dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  2182. I915_WRITE(GTIIR, I915_READ(GTIIR));
  2183. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  2184. render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
  2185. GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  2186. I915_WRITE(GTIER, render_irqs);
  2187. POSTING_READ(GTIER);
  2188. ibx_irq_postinstall(dev);
  2189. return 0;
  2190. }
  2191. static int valleyview_irq_postinstall(struct drm_device *dev)
  2192. {
  2193. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2194. u32 enable_mask;
  2195. u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
  2196. u32 render_irqs;
  2197. enable_mask = I915_DISPLAY_PORT_INTERRUPT;
  2198. enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2199. I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  2200. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2201. I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  2202. /*
  2203. *Leave vblank interrupts masked initially. enable/disable will
  2204. * toggle them based on usage.
  2205. */
  2206. dev_priv->irq_mask = (~enable_mask) |
  2207. I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  2208. I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  2209. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2210. POSTING_READ(PORT_HOTPLUG_EN);
  2211. I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  2212. I915_WRITE(VLV_IER, enable_mask);
  2213. I915_WRITE(VLV_IIR, 0xffffffff);
  2214. I915_WRITE(PIPESTAT(0), 0xffff);
  2215. I915_WRITE(PIPESTAT(1), 0xffff);
  2216. POSTING_READ(VLV_IER);
  2217. i915_enable_pipestat(dev_priv, 0, pipestat_enable);
  2218. i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  2219. i915_enable_pipestat(dev_priv, 1, pipestat_enable);
  2220. I915_WRITE(VLV_IIR, 0xffffffff);
  2221. I915_WRITE(VLV_IIR, 0xffffffff);
  2222. I915_WRITE(GTIIR, I915_READ(GTIIR));
  2223. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  2224. render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
  2225. GEN6_BLITTER_USER_INTERRUPT;
  2226. I915_WRITE(GTIER, render_irqs);
  2227. POSTING_READ(GTIER);
  2228. /* ack & enable invalid PTE error interrupts */
  2229. #if 0 /* FIXME: add support to irq handler for checking these bits */
  2230. I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  2231. I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
  2232. #endif
  2233. I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  2234. return 0;
  2235. }
  2236. static void valleyview_irq_uninstall(struct drm_device *dev)
  2237. {
  2238. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2239. int pipe;
  2240. if (!dev_priv)
  2241. return;
  2242. del_timer_sync(&dev_priv->hotplug_reenable_timer);
  2243. for_each_pipe(pipe)
  2244. I915_WRITE(PIPESTAT(pipe), 0xffff);
  2245. I915_WRITE(HWSTAM, 0xffffffff);
  2246. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2247. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2248. for_each_pipe(pipe)
  2249. I915_WRITE(PIPESTAT(pipe), 0xffff);
  2250. I915_WRITE(VLV_IIR, 0xffffffff);
  2251. I915_WRITE(VLV_IMR, 0xffffffff);
  2252. I915_WRITE(VLV_IER, 0x0);
  2253. POSTING_READ(VLV_IER);
  2254. }
  2255. static void ironlake_irq_uninstall(struct drm_device *dev)
  2256. {
  2257. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2258. if (!dev_priv)
  2259. return;
  2260. del_timer_sync(&dev_priv->hotplug_reenable_timer);
  2261. I915_WRITE(HWSTAM, 0xffffffff);
  2262. I915_WRITE(DEIMR, 0xffffffff);
  2263. I915_WRITE(DEIER, 0x0);
  2264. I915_WRITE(DEIIR, I915_READ(DEIIR));
  2265. if (IS_GEN7(dev))
  2266. I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
  2267. I915_WRITE(GTIMR, 0xffffffff);
  2268. I915_WRITE(GTIER, 0x0);
  2269. I915_WRITE(GTIIR, I915_READ(GTIIR));
  2270. if (HAS_PCH_NOP(dev))
  2271. return;
  2272. I915_WRITE(SDEIMR, 0xffffffff);
  2273. I915_WRITE(SDEIER, 0x0);
  2274. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  2275. if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
  2276. I915_WRITE(SERR_INT, I915_READ(SERR_INT));
  2277. }
  2278. static void i8xx_irq_preinstall(struct drm_device * dev)
  2279. {
  2280. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2281. int pipe;
  2282. atomic_set(&dev_priv->irq_received, 0);
  2283. for_each_pipe(pipe)
  2284. I915_WRITE(PIPESTAT(pipe), 0);
  2285. I915_WRITE16(IMR, 0xffff);
  2286. I915_WRITE16(IER, 0x0);
  2287. POSTING_READ16(IER);
  2288. }
  2289. static int i8xx_irq_postinstall(struct drm_device *dev)
  2290. {
  2291. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2292. I915_WRITE16(EMR,
  2293. ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  2294. /* Unmask the interrupts that we always want on. */
  2295. dev_priv->irq_mask =
  2296. ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2297. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2298. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2299. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2300. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2301. I915_WRITE16(IMR, dev_priv->irq_mask);
  2302. I915_WRITE16(IER,
  2303. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2304. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2305. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  2306. I915_USER_INTERRUPT);
  2307. POSTING_READ16(IER);
  2308. return 0;
  2309. }
  2310. /*
  2311. * Returns true when a page flip has completed.
  2312. */
  2313. static bool i8xx_handle_vblank(struct drm_device *dev,
  2314. int pipe, u16 iir)
  2315. {
  2316. drm_i915_private_t *dev_priv = dev->dev_private;
  2317. u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
  2318. if (!drm_handle_vblank(dev, pipe))
  2319. return false;
  2320. if ((iir & flip_pending) == 0)
  2321. return false;
  2322. intel_prepare_page_flip(dev, pipe);
  2323. /* We detect FlipDone by looking for the change in PendingFlip from '1'
  2324. * to '0' on the following vblank, i.e. IIR has the Pendingflip
  2325. * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  2326. * the flip is completed (no longer pending). Since this doesn't raise
  2327. * an interrupt per se, we watch for the change at vblank.
  2328. */
  2329. if (I915_READ16(ISR) & flip_pending)
  2330. return false;
  2331. intel_finish_page_flip(dev, pipe);
  2332. return true;
  2333. }
  2334. static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  2335. {
  2336. struct drm_device *dev = (struct drm_device *) arg;
  2337. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2338. u16 iir, new_iir;
  2339. u32 pipe_stats[2];
  2340. unsigned long irqflags;
  2341. int irq_received;
  2342. int pipe;
  2343. u16 flip_mask =
  2344. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2345. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2346. atomic_inc(&dev_priv->irq_received);
  2347. iir = I915_READ16(IIR);
  2348. if (iir == 0)
  2349. return IRQ_NONE;
  2350. while (iir & ~flip_mask) {
  2351. /* Can't rely on pipestat interrupt bit in iir as it might
  2352. * have been cleared after the pipestat interrupt was received.
  2353. * It doesn't set the bit in iir again, but it still produces
  2354. * interrupts (for non-MSI).
  2355. */
  2356. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2357. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2358. i915_handle_error(dev, false);
  2359. for_each_pipe(pipe) {
  2360. int reg = PIPESTAT(pipe);
  2361. pipe_stats[pipe] = I915_READ(reg);
  2362. /*
  2363. * Clear the PIPE*STAT regs before the IIR
  2364. */
  2365. if (pipe_stats[pipe] & 0x8000ffff) {
  2366. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2367. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2368. pipe_name(pipe));
  2369. I915_WRITE(reg, pipe_stats[pipe]);
  2370. irq_received = 1;
  2371. }
  2372. }
  2373. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2374. I915_WRITE16(IIR, iir & ~flip_mask);
  2375. new_iir = I915_READ16(IIR); /* Flush posted writes */
  2376. i915_update_dri1_breadcrumb(dev);
  2377. if (iir & I915_USER_INTERRUPT)
  2378. notify_ring(dev, &dev_priv->ring[RCS]);
  2379. if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2380. i8xx_handle_vblank(dev, 0, iir))
  2381. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
  2382. if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2383. i8xx_handle_vblank(dev, 1, iir))
  2384. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
  2385. iir = new_iir;
  2386. }
  2387. return IRQ_HANDLED;
  2388. }
  2389. static void i8xx_irq_uninstall(struct drm_device * dev)
  2390. {
  2391. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2392. int pipe;
  2393. for_each_pipe(pipe) {
  2394. /* Clear enable bits; then clear status bits */
  2395. I915_WRITE(PIPESTAT(pipe), 0);
  2396. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  2397. }
  2398. I915_WRITE16(IMR, 0xffff);
  2399. I915_WRITE16(IER, 0x0);
  2400. I915_WRITE16(IIR, I915_READ16(IIR));
  2401. }
  2402. static void i915_irq_preinstall(struct drm_device * dev)
  2403. {
  2404. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2405. int pipe;
  2406. atomic_set(&dev_priv->irq_received, 0);
  2407. if (I915_HAS_HOTPLUG(dev)) {
  2408. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2409. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2410. }
  2411. I915_WRITE16(HWSTAM, 0xeffe);
  2412. for_each_pipe(pipe)
  2413. I915_WRITE(PIPESTAT(pipe), 0);
  2414. I915_WRITE(IMR, 0xffffffff);
  2415. I915_WRITE(IER, 0x0);
  2416. POSTING_READ(IER);
  2417. }
  2418. static int i915_irq_postinstall(struct drm_device *dev)
  2419. {
  2420. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2421. u32 enable_mask;
  2422. I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  2423. /* Unmask the interrupts that we always want on. */
  2424. dev_priv->irq_mask =
  2425. ~(I915_ASLE_INTERRUPT |
  2426. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2427. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2428. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2429. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2430. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2431. enable_mask =
  2432. I915_ASLE_INTERRUPT |
  2433. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2434. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2435. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  2436. I915_USER_INTERRUPT;
  2437. if (I915_HAS_HOTPLUG(dev)) {
  2438. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2439. POSTING_READ(PORT_HOTPLUG_EN);
  2440. /* Enable in IER... */
  2441. enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  2442. /* and unmask in IMR */
  2443. dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  2444. }
  2445. I915_WRITE(IMR, dev_priv->irq_mask);
  2446. I915_WRITE(IER, enable_mask);
  2447. POSTING_READ(IER);
  2448. i915_enable_asle_pipestat(dev);
  2449. return 0;
  2450. }
  2451. /*
  2452. * Returns true when a page flip has completed.
  2453. */
  2454. static bool i915_handle_vblank(struct drm_device *dev,
  2455. int plane, int pipe, u32 iir)
  2456. {
  2457. drm_i915_private_t *dev_priv = dev->dev_private;
  2458. u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  2459. if (!drm_handle_vblank(dev, pipe))
  2460. return false;
  2461. if ((iir & flip_pending) == 0)
  2462. return false;
  2463. intel_prepare_page_flip(dev, plane);
  2464. /* We detect FlipDone by looking for the change in PendingFlip from '1'
  2465. * to '0' on the following vblank, i.e. IIR has the Pendingflip
  2466. * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  2467. * the flip is completed (no longer pending). Since this doesn't raise
  2468. * an interrupt per se, we watch for the change at vblank.
  2469. */
  2470. if (I915_READ(ISR) & flip_pending)
  2471. return false;
  2472. intel_finish_page_flip(dev, pipe);
  2473. return true;
  2474. }
  2475. static irqreturn_t i915_irq_handler(int irq, void *arg)
  2476. {
  2477. struct drm_device *dev = (struct drm_device *) arg;
  2478. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2479. u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
  2480. unsigned long irqflags;
  2481. u32 flip_mask =
  2482. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2483. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2484. int pipe, ret = IRQ_NONE;
  2485. atomic_inc(&dev_priv->irq_received);
  2486. iir = I915_READ(IIR);
  2487. do {
  2488. bool irq_received = (iir & ~flip_mask) != 0;
  2489. bool blc_event = false;
  2490. /* Can't rely on pipestat interrupt bit in iir as it might
  2491. * have been cleared after the pipestat interrupt was received.
  2492. * It doesn't set the bit in iir again, but it still produces
  2493. * interrupts (for non-MSI).
  2494. */
  2495. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2496. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2497. i915_handle_error(dev, false);
  2498. for_each_pipe(pipe) {
  2499. int reg = PIPESTAT(pipe);
  2500. pipe_stats[pipe] = I915_READ(reg);
  2501. /* Clear the PIPE*STAT regs before the IIR */
  2502. if (pipe_stats[pipe] & 0x8000ffff) {
  2503. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2504. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2505. pipe_name(pipe));
  2506. I915_WRITE(reg, pipe_stats[pipe]);
  2507. irq_received = true;
  2508. }
  2509. }
  2510. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2511. if (!irq_received)
  2512. break;
  2513. /* Consume port. Then clear IIR or we'll miss events */
  2514. if ((I915_HAS_HOTPLUG(dev)) &&
  2515. (iir & I915_DISPLAY_PORT_INTERRUPT)) {
  2516. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2517. u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  2518. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2519. hotplug_status);
  2520. if (hotplug_trigger) {
  2521. if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
  2522. i915_hpd_irq_setup(dev);
  2523. queue_work(dev_priv->wq,
  2524. &dev_priv->hotplug_work);
  2525. }
  2526. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2527. POSTING_READ(PORT_HOTPLUG_STAT);
  2528. }
  2529. I915_WRITE(IIR, iir & ~flip_mask);
  2530. new_iir = I915_READ(IIR); /* Flush posted writes */
  2531. if (iir & I915_USER_INTERRUPT)
  2532. notify_ring(dev, &dev_priv->ring[RCS]);
  2533. for_each_pipe(pipe) {
  2534. int plane = pipe;
  2535. if (IS_MOBILE(dev))
  2536. plane = !plane;
  2537. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2538. i915_handle_vblank(dev, plane, pipe, iir))
  2539. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  2540. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  2541. blc_event = true;
  2542. }
  2543. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  2544. intel_opregion_asle_intr(dev);
  2545. /* With MSI, interrupts are only generated when iir
  2546. * transitions from zero to nonzero. If another bit got
  2547. * set while we were handling the existing iir bits, then
  2548. * we would never get another interrupt.
  2549. *
  2550. * This is fine on non-MSI as well, as if we hit this path
  2551. * we avoid exiting the interrupt handler only to generate
  2552. * another one.
  2553. *
  2554. * Note that for MSI this could cause a stray interrupt report
  2555. * if an interrupt landed in the time between writing IIR and
  2556. * the posting read. This should be rare enough to never
  2557. * trigger the 99% of 100,000 interrupts test for disabling
  2558. * stray interrupts.
  2559. */
  2560. ret = IRQ_HANDLED;
  2561. iir = new_iir;
  2562. } while (iir & ~flip_mask);
  2563. i915_update_dri1_breadcrumb(dev);
  2564. return ret;
  2565. }
  2566. static void i915_irq_uninstall(struct drm_device * dev)
  2567. {
  2568. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2569. int pipe;
  2570. del_timer_sync(&dev_priv->hotplug_reenable_timer);
  2571. if (I915_HAS_HOTPLUG(dev)) {
  2572. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2573. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2574. }
  2575. I915_WRITE16(HWSTAM, 0xffff);
  2576. for_each_pipe(pipe) {
  2577. /* Clear enable bits; then clear status bits */
  2578. I915_WRITE(PIPESTAT(pipe), 0);
  2579. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  2580. }
  2581. I915_WRITE(IMR, 0xffffffff);
  2582. I915_WRITE(IER, 0x0);
  2583. I915_WRITE(IIR, I915_READ(IIR));
  2584. }
  2585. static void i965_irq_preinstall(struct drm_device * dev)
  2586. {
  2587. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2588. int pipe;
  2589. atomic_set(&dev_priv->irq_received, 0);
  2590. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2591. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2592. I915_WRITE(HWSTAM, 0xeffe);
  2593. for_each_pipe(pipe)
  2594. I915_WRITE(PIPESTAT(pipe), 0);
  2595. I915_WRITE(IMR, 0xffffffff);
  2596. I915_WRITE(IER, 0x0);
  2597. POSTING_READ(IER);
  2598. }
  2599. static int i965_irq_postinstall(struct drm_device *dev)
  2600. {
  2601. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2602. u32 enable_mask;
  2603. u32 error_mask;
  2604. /* Unmask the interrupts that we always want on. */
  2605. dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
  2606. I915_DISPLAY_PORT_INTERRUPT |
  2607. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2608. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2609. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2610. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2611. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2612. enable_mask = ~dev_priv->irq_mask;
  2613. enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2614. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  2615. enable_mask |= I915_USER_INTERRUPT;
  2616. if (IS_G4X(dev))
  2617. enable_mask |= I915_BSD_USER_INTERRUPT;
  2618. i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  2619. /*
  2620. * Enable some error detection, note the instruction error mask
  2621. * bit is reserved, so we leave it masked.
  2622. */
  2623. if (IS_G4X(dev)) {
  2624. error_mask = ~(GM45_ERROR_PAGE_TABLE |
  2625. GM45_ERROR_MEM_PRIV |
  2626. GM45_ERROR_CP_PRIV |
  2627. I915_ERROR_MEMORY_REFRESH);
  2628. } else {
  2629. error_mask = ~(I915_ERROR_PAGE_TABLE |
  2630. I915_ERROR_MEMORY_REFRESH);
  2631. }
  2632. I915_WRITE(EMR, error_mask);
  2633. I915_WRITE(IMR, dev_priv->irq_mask);
  2634. I915_WRITE(IER, enable_mask);
  2635. POSTING_READ(IER);
  2636. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2637. POSTING_READ(PORT_HOTPLUG_EN);
  2638. i915_enable_asle_pipestat(dev);
  2639. return 0;
  2640. }
  2641. static void i915_hpd_irq_setup(struct drm_device *dev)
  2642. {
  2643. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2644. struct drm_mode_config *mode_config = &dev->mode_config;
  2645. struct intel_encoder *intel_encoder;
  2646. u32 hotplug_en;
  2647. if (I915_HAS_HOTPLUG(dev)) {
  2648. hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  2649. hotplug_en &= ~HOTPLUG_INT_EN_MASK;
  2650. /* Note HDMI and DP share hotplug bits */
  2651. /* enable bits are the same for all generations */
  2652. list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  2653. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  2654. hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
  2655. /* Programming the CRT detection parameters tends
  2656. to generate a spurious hotplug event about three
  2657. seconds later. So just do it once.
  2658. */
  2659. if (IS_G4X(dev))
  2660. hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  2661. hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
  2662. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  2663. /* Ignore TV since it's buggy */
  2664. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  2665. }
  2666. }
  2667. static irqreturn_t i965_irq_handler(int irq, void *arg)
  2668. {
  2669. struct drm_device *dev = (struct drm_device *) arg;
  2670. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2671. u32 iir, new_iir;
  2672. u32 pipe_stats[I915_MAX_PIPES];
  2673. unsigned long irqflags;
  2674. int irq_received;
  2675. int ret = IRQ_NONE, pipe;
  2676. u32 flip_mask =
  2677. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2678. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2679. atomic_inc(&dev_priv->irq_received);
  2680. iir = I915_READ(IIR);
  2681. for (;;) {
  2682. bool blc_event = false;
  2683. irq_received = (iir & ~flip_mask) != 0;
  2684. /* Can't rely on pipestat interrupt bit in iir as it might
  2685. * have been cleared after the pipestat interrupt was received.
  2686. * It doesn't set the bit in iir again, but it still produces
  2687. * interrupts (for non-MSI).
  2688. */
  2689. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2690. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2691. i915_handle_error(dev, false);
  2692. for_each_pipe(pipe) {
  2693. int reg = PIPESTAT(pipe);
  2694. pipe_stats[pipe] = I915_READ(reg);
  2695. /*
  2696. * Clear the PIPE*STAT regs before the IIR
  2697. */
  2698. if (pipe_stats[pipe] & 0x8000ffff) {
  2699. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2700. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2701. pipe_name(pipe));
  2702. I915_WRITE(reg, pipe_stats[pipe]);
  2703. irq_received = 1;
  2704. }
  2705. }
  2706. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2707. if (!irq_received)
  2708. break;
  2709. ret = IRQ_HANDLED;
  2710. /* Consume port. Then clear IIR or we'll miss events */
  2711. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  2712. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2713. u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
  2714. HOTPLUG_INT_STATUS_G4X :
  2715. HOTPLUG_INT_STATUS_I965);
  2716. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2717. hotplug_status);
  2718. if (hotplug_trigger) {
  2719. if (hotplug_irq_storm_detect(dev, hotplug_trigger,
  2720. IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
  2721. i915_hpd_irq_setup(dev);
  2722. queue_work(dev_priv->wq,
  2723. &dev_priv->hotplug_work);
  2724. }
  2725. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2726. I915_READ(PORT_HOTPLUG_STAT);
  2727. }
  2728. I915_WRITE(IIR, iir & ~flip_mask);
  2729. new_iir = I915_READ(IIR); /* Flush posted writes */
  2730. if (iir & I915_USER_INTERRUPT)
  2731. notify_ring(dev, &dev_priv->ring[RCS]);
  2732. if (iir & I915_BSD_USER_INTERRUPT)
  2733. notify_ring(dev, &dev_priv->ring[VCS]);
  2734. for_each_pipe(pipe) {
  2735. if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  2736. i915_handle_vblank(dev, pipe, pipe, iir))
  2737. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
  2738. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  2739. blc_event = true;
  2740. }
  2741. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  2742. intel_opregion_asle_intr(dev);
  2743. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  2744. gmbus_irq_handler(dev);
  2745. /* With MSI, interrupts are only generated when iir
  2746. * transitions from zero to nonzero. If another bit got
  2747. * set while we were handling the existing iir bits, then
  2748. * we would never get another interrupt.
  2749. *
  2750. * This is fine on non-MSI as well, as if we hit this path
  2751. * we avoid exiting the interrupt handler only to generate
  2752. * another one.
  2753. *
  2754. * Note that for MSI this could cause a stray interrupt report
  2755. * if an interrupt landed in the time between writing IIR and
  2756. * the posting read. This should be rare enough to never
  2757. * trigger the 99% of 100,000 interrupts test for disabling
  2758. * stray interrupts.
  2759. */
  2760. iir = new_iir;
  2761. }
  2762. i915_update_dri1_breadcrumb(dev);
  2763. return ret;
  2764. }
  2765. static void i965_irq_uninstall(struct drm_device * dev)
  2766. {
  2767. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2768. int pipe;
  2769. if (!dev_priv)
  2770. return;
  2771. del_timer_sync(&dev_priv->hotplug_reenable_timer);
  2772. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2773. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2774. I915_WRITE(HWSTAM, 0xffffffff);
  2775. for_each_pipe(pipe)
  2776. I915_WRITE(PIPESTAT(pipe), 0);
  2777. I915_WRITE(IMR, 0xffffffff);
  2778. I915_WRITE(IER, 0x0);
  2779. for_each_pipe(pipe)
  2780. I915_WRITE(PIPESTAT(pipe),
  2781. I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
  2782. I915_WRITE(IIR, I915_READ(IIR));
  2783. }
  2784. static void i915_reenable_hotplug_timer_func(unsigned long data)
  2785. {
  2786. drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
  2787. struct drm_device *dev = dev_priv->dev;
  2788. struct drm_mode_config *mode_config = &dev->mode_config;
  2789. unsigned long irqflags;
  2790. int i;
  2791. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2792. for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
  2793. struct drm_connector *connector;
  2794. if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
  2795. continue;
  2796. dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  2797. list_for_each_entry(connector, &mode_config->connector_list, head) {
  2798. struct intel_connector *intel_connector = to_intel_connector(connector);
  2799. if (intel_connector->encoder->hpd_pin == i) {
  2800. if (connector->polled != intel_connector->polled)
  2801. DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
  2802. drm_get_connector_name(connector));
  2803. connector->polled = intel_connector->polled;
  2804. if (!connector->polled)
  2805. connector->polled = DRM_CONNECTOR_POLL_HPD;
  2806. }
  2807. }
  2808. }
  2809. if (dev_priv->display.hpd_irq_setup)
  2810. dev_priv->display.hpd_irq_setup(dev);
  2811. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2812. }
  2813. void intel_irq_init(struct drm_device *dev)
  2814. {
  2815. struct drm_i915_private *dev_priv = dev->dev_private;
  2816. INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
  2817. INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
  2818. INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
  2819. INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
  2820. setup_timer(&dev_priv->gpu_error.hangcheck_timer,
  2821. i915_hangcheck_elapsed,
  2822. (unsigned long) dev);
  2823. setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
  2824. (unsigned long) dev_priv);
  2825. pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
  2826. dev->driver->get_vblank_counter = i915_get_vblank_counter;
  2827. dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  2828. if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  2829. dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
  2830. dev->driver->get_vblank_counter = gm45_get_vblank_counter;
  2831. }
  2832. if (drm_core_check_feature(dev, DRIVER_MODESET))
  2833. dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
  2834. else
  2835. dev->driver->get_vblank_timestamp = NULL;
  2836. dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  2837. if (IS_VALLEYVIEW(dev)) {
  2838. dev->driver->irq_handler = valleyview_irq_handler;
  2839. dev->driver->irq_preinstall = valleyview_irq_preinstall;
  2840. dev->driver->irq_postinstall = valleyview_irq_postinstall;
  2841. dev->driver->irq_uninstall = valleyview_irq_uninstall;
  2842. dev->driver->enable_vblank = valleyview_enable_vblank;
  2843. dev->driver->disable_vblank = valleyview_disable_vblank;
  2844. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  2845. } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
  2846. /* Share pre & uninstall handlers with ILK/SNB */
  2847. dev->driver->irq_handler = ivybridge_irq_handler;
  2848. dev->driver->irq_preinstall = ironlake_irq_preinstall;
  2849. dev->driver->irq_postinstall = ivybridge_irq_postinstall;
  2850. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  2851. dev->driver->enable_vblank = ivybridge_enable_vblank;
  2852. dev->driver->disable_vblank = ivybridge_disable_vblank;
  2853. dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
  2854. } else if (HAS_PCH_SPLIT(dev)) {
  2855. dev->driver->irq_handler = ironlake_irq_handler;
  2856. dev->driver->irq_preinstall = ironlake_irq_preinstall;
  2857. dev->driver->irq_postinstall = ironlake_irq_postinstall;
  2858. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  2859. dev->driver->enable_vblank = ironlake_enable_vblank;
  2860. dev->driver->disable_vblank = ironlake_disable_vblank;
  2861. dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
  2862. } else {
  2863. if (INTEL_INFO(dev)->gen == 2) {
  2864. dev->driver->irq_preinstall = i8xx_irq_preinstall;
  2865. dev->driver->irq_postinstall = i8xx_irq_postinstall;
  2866. dev->driver->irq_handler = i8xx_irq_handler;
  2867. dev->driver->irq_uninstall = i8xx_irq_uninstall;
  2868. } else if (INTEL_INFO(dev)->gen == 3) {
  2869. dev->driver->irq_preinstall = i915_irq_preinstall;
  2870. dev->driver->irq_postinstall = i915_irq_postinstall;
  2871. dev->driver->irq_uninstall = i915_irq_uninstall;
  2872. dev->driver->irq_handler = i915_irq_handler;
  2873. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  2874. } else {
  2875. dev->driver->irq_preinstall = i965_irq_preinstall;
  2876. dev->driver->irq_postinstall = i965_irq_postinstall;
  2877. dev->driver->irq_uninstall = i965_irq_uninstall;
  2878. dev->driver->irq_handler = i965_irq_handler;
  2879. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  2880. }
  2881. dev->driver->enable_vblank = i915_enable_vblank;
  2882. dev->driver->disable_vblank = i915_disable_vblank;
  2883. }
  2884. }
  2885. void intel_hpd_init(struct drm_device *dev)
  2886. {
  2887. struct drm_i915_private *dev_priv = dev->dev_private;
  2888. struct drm_mode_config *mode_config = &dev->mode_config;
  2889. struct drm_connector *connector;
  2890. int i;
  2891. for (i = 1; i < HPD_NUM_PINS; i++) {
  2892. dev_priv->hpd_stats[i].hpd_cnt = 0;
  2893. dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  2894. }
  2895. list_for_each_entry(connector, &mode_config->connector_list, head) {
  2896. struct intel_connector *intel_connector = to_intel_connector(connector);
  2897. connector->polled = intel_connector->polled;
  2898. if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
  2899. connector->polled = DRM_CONNECTOR_POLL_HPD;
  2900. }
  2901. if (dev_priv->display.hpd_irq_setup)
  2902. dev_priv->display.hpd_irq_setup(dev);
  2903. }