i915_irq.c 88 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/sysrq.h>
  30. #include <linux/slab.h>
  31. #include <drm/drmP.h>
  32. #include <drm/i915_drm.h>
  33. #include "i915_drv.h"
  34. #include "i915_trace.h"
  35. #include "intel_drv.h"
  36. static const u32 hpd_ibx[] = {
  37. [HPD_CRT] = SDE_CRT_HOTPLUG,
  38. [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
  39. [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
  40. [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
  41. [HPD_PORT_D] = SDE_PORTD_HOTPLUG
  42. };
  43. static const u32 hpd_cpt[] = {
  44. [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
  45. [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
  46. [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  47. [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  48. [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
  49. };
  50. static const u32 hpd_mask_i915[] = {
  51. [HPD_CRT] = CRT_HOTPLUG_INT_EN,
  52. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
  53. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
  54. [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
  55. [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
  56. [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
  57. };
  58. static const u32 hpd_status_gen4[] = {
  59. [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  60. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
  61. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
  62. [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  63. [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  64. [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  65. };
  66. static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
  67. [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  68. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
  69. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
  70. [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  71. [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  72. [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  73. };
  74. /* For display hotplug interrupt */
  75. static void
  76. ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  77. {
  78. assert_spin_locked(&dev_priv->irq_lock);
  79. if ((dev_priv->irq_mask & mask) != 0) {
  80. dev_priv->irq_mask &= ~mask;
  81. I915_WRITE(DEIMR, dev_priv->irq_mask);
  82. POSTING_READ(DEIMR);
  83. }
  84. }
  85. static void
  86. ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  87. {
  88. assert_spin_locked(&dev_priv->irq_lock);
  89. if ((dev_priv->irq_mask & mask) != mask) {
  90. dev_priv->irq_mask |= mask;
  91. I915_WRITE(DEIMR, dev_priv->irq_mask);
  92. POSTING_READ(DEIMR);
  93. }
  94. }
  95. static bool ivb_can_enable_err_int(struct drm_device *dev)
  96. {
  97. struct drm_i915_private *dev_priv = dev->dev_private;
  98. struct intel_crtc *crtc;
  99. enum pipe pipe;
  100. assert_spin_locked(&dev_priv->irq_lock);
  101. for_each_pipe(pipe) {
  102. crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  103. if (crtc->cpu_fifo_underrun_disabled)
  104. return false;
  105. }
  106. return true;
  107. }
  108. static bool cpt_can_enable_serr_int(struct drm_device *dev)
  109. {
  110. struct drm_i915_private *dev_priv = dev->dev_private;
  111. enum pipe pipe;
  112. struct intel_crtc *crtc;
  113. assert_spin_locked(&dev_priv->irq_lock);
  114. for_each_pipe(pipe) {
  115. crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  116. if (crtc->pch_fifo_underrun_disabled)
  117. return false;
  118. }
  119. return true;
  120. }
  121. static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
  122. enum pipe pipe, bool enable)
  123. {
  124. struct drm_i915_private *dev_priv = dev->dev_private;
  125. uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
  126. DE_PIPEB_FIFO_UNDERRUN;
  127. if (enable)
  128. ironlake_enable_display_irq(dev_priv, bit);
  129. else
  130. ironlake_disable_display_irq(dev_priv, bit);
  131. }
  132. static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
  133. enum pipe pipe, bool enable)
  134. {
  135. struct drm_i915_private *dev_priv = dev->dev_private;
  136. if (enable) {
  137. I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
  138. if (!ivb_can_enable_err_int(dev))
  139. return;
  140. ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
  141. } else {
  142. bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
  143. /* Change the state _after_ we've read out the current one. */
  144. ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
  145. if (!was_enabled &&
  146. (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
  147. DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
  148. pipe_name(pipe));
  149. }
  150. }
  151. }
  152. /**
  153. * ibx_display_interrupt_update - update SDEIMR
  154. * @dev_priv: driver private
  155. * @interrupt_mask: mask of interrupt bits to update
  156. * @enabled_irq_mask: mask of interrupt bits to enable
  157. */
  158. static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
  159. uint32_t interrupt_mask,
  160. uint32_t enabled_irq_mask)
  161. {
  162. uint32_t sdeimr = I915_READ(SDEIMR);
  163. sdeimr &= ~interrupt_mask;
  164. sdeimr |= (~enabled_irq_mask & interrupt_mask);
  165. assert_spin_locked(&dev_priv->irq_lock);
  166. I915_WRITE(SDEIMR, sdeimr);
  167. POSTING_READ(SDEIMR);
  168. }
  169. #define ibx_enable_display_interrupt(dev_priv, bits) \
  170. ibx_display_interrupt_update((dev_priv), (bits), (bits))
  171. #define ibx_disable_display_interrupt(dev_priv, bits) \
  172. ibx_display_interrupt_update((dev_priv), (bits), 0)
  173. static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
  174. enum transcoder pch_transcoder,
  175. bool enable)
  176. {
  177. struct drm_i915_private *dev_priv = dev->dev_private;
  178. uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
  179. SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
  180. if (enable)
  181. ibx_enable_display_interrupt(dev_priv, bit);
  182. else
  183. ibx_disable_display_interrupt(dev_priv, bit);
  184. }
  185. static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
  186. enum transcoder pch_transcoder,
  187. bool enable)
  188. {
  189. struct drm_i915_private *dev_priv = dev->dev_private;
  190. if (enable) {
  191. I915_WRITE(SERR_INT,
  192. SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
  193. if (!cpt_can_enable_serr_int(dev))
  194. return;
  195. ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
  196. } else {
  197. uint32_t tmp = I915_READ(SERR_INT);
  198. bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
  199. /* Change the state _after_ we've read out the current one. */
  200. ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
  201. if (!was_enabled &&
  202. (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
  203. DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
  204. transcoder_name(pch_transcoder));
  205. }
  206. }
  207. }
  208. /**
  209. * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
  210. * @dev: drm device
  211. * @pipe: pipe
  212. * @enable: true if we want to report FIFO underrun errors, false otherwise
  213. *
  214. * This function makes us disable or enable CPU fifo underruns for a specific
  215. * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
  216. * reporting for one pipe may also disable all the other CPU error interruts for
  217. * the other pipes, due to the fact that there's just one interrupt mask/enable
  218. * bit for all the pipes.
  219. *
  220. * Returns the previous state of underrun reporting.
  221. */
  222. bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
  223. enum pipe pipe, bool enable)
  224. {
  225. struct drm_i915_private *dev_priv = dev->dev_private;
  226. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  227. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  228. unsigned long flags;
  229. bool ret;
  230. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  231. ret = !intel_crtc->cpu_fifo_underrun_disabled;
  232. if (enable == ret)
  233. goto done;
  234. intel_crtc->cpu_fifo_underrun_disabled = !enable;
  235. if (IS_GEN5(dev) || IS_GEN6(dev))
  236. ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
  237. else if (IS_GEN7(dev))
  238. ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
  239. done:
  240. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  241. return ret;
  242. }
  243. /**
  244. * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
  245. * @dev: drm device
  246. * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
  247. * @enable: true if we want to report FIFO underrun errors, false otherwise
  248. *
  249. * This function makes us disable or enable PCH fifo underruns for a specific
  250. * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
  251. * underrun reporting for one transcoder may also disable all the other PCH
  252. * error interruts for the other transcoders, due to the fact that there's just
  253. * one interrupt mask/enable bit for all the transcoders.
  254. *
  255. * Returns the previous state of underrun reporting.
  256. */
  257. bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
  258. enum transcoder pch_transcoder,
  259. bool enable)
  260. {
  261. struct drm_i915_private *dev_priv = dev->dev_private;
  262. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
  263. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  264. unsigned long flags;
  265. bool ret;
  266. /*
  267. * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
  268. * has only one pch transcoder A that all pipes can use. To avoid racy
  269. * pch transcoder -> pipe lookups from interrupt code simply store the
  270. * underrun statistics in crtc A. Since we never expose this anywhere
  271. * nor use it outside of the fifo underrun code here using the "wrong"
  272. * crtc on LPT won't cause issues.
  273. */
  274. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  275. ret = !intel_crtc->pch_fifo_underrun_disabled;
  276. if (enable == ret)
  277. goto done;
  278. intel_crtc->pch_fifo_underrun_disabled = !enable;
  279. if (HAS_PCH_IBX(dev))
  280. ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
  281. else
  282. cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
  283. done:
  284. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  285. return ret;
  286. }
  287. void
  288. i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  289. {
  290. u32 reg = PIPESTAT(pipe);
  291. u32 pipestat = I915_READ(reg) & 0x7fff0000;
  292. assert_spin_locked(&dev_priv->irq_lock);
  293. if ((pipestat & mask) == mask)
  294. return;
  295. /* Enable the interrupt, clear any pending status */
  296. pipestat |= mask | (mask >> 16);
  297. I915_WRITE(reg, pipestat);
  298. POSTING_READ(reg);
  299. }
  300. void
  301. i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  302. {
  303. u32 reg = PIPESTAT(pipe);
  304. u32 pipestat = I915_READ(reg) & 0x7fff0000;
  305. assert_spin_locked(&dev_priv->irq_lock);
  306. if ((pipestat & mask) == 0)
  307. return;
  308. pipestat &= ~mask;
  309. I915_WRITE(reg, pipestat);
  310. POSTING_READ(reg);
  311. }
  312. /**
  313. * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
  314. */
  315. static void i915_enable_asle_pipestat(struct drm_device *dev)
  316. {
  317. drm_i915_private_t *dev_priv = dev->dev_private;
  318. unsigned long irqflags;
  319. if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
  320. return;
  321. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  322. i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
  323. if (INTEL_INFO(dev)->gen >= 4)
  324. i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
  325. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  326. }
  327. /**
  328. * i915_pipe_enabled - check if a pipe is enabled
  329. * @dev: DRM device
  330. * @pipe: pipe to check
  331. *
  332. * Reading certain registers when the pipe is disabled can hang the chip.
  333. * Use this routine to make sure the PLL is running and the pipe is active
  334. * before reading such registers if unsure.
  335. */
  336. static int
  337. i915_pipe_enabled(struct drm_device *dev, int pipe)
  338. {
  339. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  340. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  341. /* Locking is horribly broken here, but whatever. */
  342. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  343. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  344. return intel_crtc->active;
  345. } else {
  346. return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
  347. }
  348. }
  349. /* Called from drm generic code, passed a 'crtc', which
  350. * we use as a pipe index
  351. */
  352. static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
  353. {
  354. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  355. unsigned long high_frame;
  356. unsigned long low_frame;
  357. u32 high1, high2, low;
  358. if (!i915_pipe_enabled(dev, pipe)) {
  359. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  360. "pipe %c\n", pipe_name(pipe));
  361. return 0;
  362. }
  363. high_frame = PIPEFRAME(pipe);
  364. low_frame = PIPEFRAMEPIXEL(pipe);
  365. /*
  366. * High & low register fields aren't synchronized, so make sure
  367. * we get a low value that's stable across two reads of the high
  368. * register.
  369. */
  370. do {
  371. high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  372. low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
  373. high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  374. } while (high1 != high2);
  375. high1 >>= PIPE_FRAME_HIGH_SHIFT;
  376. low >>= PIPE_FRAME_LOW_SHIFT;
  377. return (high1 << 8) | low;
  378. }
  379. static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
  380. {
  381. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  382. int reg = PIPE_FRMCOUNT_GM45(pipe);
  383. if (!i915_pipe_enabled(dev, pipe)) {
  384. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  385. "pipe %c\n", pipe_name(pipe));
  386. return 0;
  387. }
  388. return I915_READ(reg);
  389. }
  390. static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
  391. int *vpos, int *hpos)
  392. {
  393. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  394. u32 vbl = 0, position = 0;
  395. int vbl_start, vbl_end, htotal, vtotal;
  396. bool in_vbl = true;
  397. int ret = 0;
  398. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  399. pipe);
  400. if (!i915_pipe_enabled(dev, pipe)) {
  401. DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
  402. "pipe %c\n", pipe_name(pipe));
  403. return 0;
  404. }
  405. /* Get vtotal. */
  406. vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  407. if (INTEL_INFO(dev)->gen >= 4) {
  408. /* No obvious pixelcount register. Only query vertical
  409. * scanout position from Display scan line register.
  410. */
  411. position = I915_READ(PIPEDSL(pipe));
  412. /* Decode into vertical scanout position. Don't have
  413. * horizontal scanout position.
  414. */
  415. *vpos = position & 0x1fff;
  416. *hpos = 0;
  417. } else {
  418. /* Have access to pixelcount since start of frame.
  419. * We can split this into vertical and horizontal
  420. * scanout position.
  421. */
  422. position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
  423. htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  424. *vpos = position / htotal;
  425. *hpos = position - (*vpos * htotal);
  426. }
  427. /* Query vblank area. */
  428. vbl = I915_READ(VBLANK(cpu_transcoder));
  429. /* Test position against vblank region. */
  430. vbl_start = vbl & 0x1fff;
  431. vbl_end = (vbl >> 16) & 0x1fff;
  432. if ((*vpos < vbl_start) || (*vpos > vbl_end))
  433. in_vbl = false;
  434. /* Inside "upper part" of vblank area? Apply corrective offset: */
  435. if (in_vbl && (*vpos >= vbl_start))
  436. *vpos = *vpos - vtotal;
  437. /* Readouts valid? */
  438. if (vbl > 0)
  439. ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
  440. /* In vblank? */
  441. if (in_vbl)
  442. ret |= DRM_SCANOUTPOS_INVBL;
  443. return ret;
  444. }
  445. static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
  446. int *max_error,
  447. struct timeval *vblank_time,
  448. unsigned flags)
  449. {
  450. struct drm_crtc *crtc;
  451. if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
  452. DRM_ERROR("Invalid crtc %d\n", pipe);
  453. return -EINVAL;
  454. }
  455. /* Get drm_crtc to timestamp: */
  456. crtc = intel_get_crtc_for_pipe(dev, pipe);
  457. if (crtc == NULL) {
  458. DRM_ERROR("Invalid crtc %d\n", pipe);
  459. return -EINVAL;
  460. }
  461. if (!crtc->enabled) {
  462. DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
  463. return -EBUSY;
  464. }
  465. /* Helper routine in DRM core does all the work: */
  466. return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
  467. vblank_time, flags,
  468. crtc);
  469. }
  470. static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
  471. {
  472. enum drm_connector_status old_status;
  473. WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
  474. old_status = connector->status;
  475. connector->status = connector->funcs->detect(connector, false);
  476. DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
  477. connector->base.id,
  478. drm_get_connector_name(connector),
  479. old_status, connector->status);
  480. return (old_status != connector->status);
  481. }
  482. /*
  483. * Handle hotplug events outside the interrupt handler proper.
  484. */
  485. #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
  486. static void i915_hotplug_work_func(struct work_struct *work)
  487. {
  488. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  489. hotplug_work);
  490. struct drm_device *dev = dev_priv->dev;
  491. struct drm_mode_config *mode_config = &dev->mode_config;
  492. struct intel_connector *intel_connector;
  493. struct intel_encoder *intel_encoder;
  494. struct drm_connector *connector;
  495. unsigned long irqflags;
  496. bool hpd_disabled = false;
  497. bool changed = false;
  498. u32 hpd_event_bits;
  499. /* HPD irq before everything is fully set up. */
  500. if (!dev_priv->enable_hotplug_processing)
  501. return;
  502. mutex_lock(&mode_config->mutex);
  503. DRM_DEBUG_KMS("running encoder hotplug functions\n");
  504. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  505. hpd_event_bits = dev_priv->hpd_event_bits;
  506. dev_priv->hpd_event_bits = 0;
  507. list_for_each_entry(connector, &mode_config->connector_list, head) {
  508. intel_connector = to_intel_connector(connector);
  509. intel_encoder = intel_connector->encoder;
  510. if (intel_encoder->hpd_pin > HPD_NONE &&
  511. dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
  512. connector->polled == DRM_CONNECTOR_POLL_HPD) {
  513. DRM_INFO("HPD interrupt storm detected on connector %s: "
  514. "switching from hotplug detection to polling\n",
  515. drm_get_connector_name(connector));
  516. dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
  517. connector->polled = DRM_CONNECTOR_POLL_CONNECT
  518. | DRM_CONNECTOR_POLL_DISCONNECT;
  519. hpd_disabled = true;
  520. }
  521. if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
  522. DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
  523. drm_get_connector_name(connector), intel_encoder->hpd_pin);
  524. }
  525. }
  526. /* if there were no outputs to poll, poll was disabled,
  527. * therefore make sure it's enabled when disabling HPD on
  528. * some connectors */
  529. if (hpd_disabled) {
  530. drm_kms_helper_poll_enable(dev);
  531. mod_timer(&dev_priv->hotplug_reenable_timer,
  532. jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
  533. }
  534. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  535. list_for_each_entry(connector, &mode_config->connector_list, head) {
  536. intel_connector = to_intel_connector(connector);
  537. intel_encoder = intel_connector->encoder;
  538. if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
  539. if (intel_encoder->hot_plug)
  540. intel_encoder->hot_plug(intel_encoder);
  541. if (intel_hpd_irq_event(dev, connector))
  542. changed = true;
  543. }
  544. }
  545. mutex_unlock(&mode_config->mutex);
  546. if (changed)
  547. drm_kms_helper_hotplug_event(dev);
  548. }
  549. static void ironlake_rps_change_irq_handler(struct drm_device *dev)
  550. {
  551. drm_i915_private_t *dev_priv = dev->dev_private;
  552. u32 busy_up, busy_down, max_avg, min_avg;
  553. u8 new_delay;
  554. spin_lock(&mchdev_lock);
  555. I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  556. new_delay = dev_priv->ips.cur_delay;
  557. I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
  558. busy_up = I915_READ(RCPREVBSYTUPAVG);
  559. busy_down = I915_READ(RCPREVBSYTDNAVG);
  560. max_avg = I915_READ(RCBMAXAVG);
  561. min_avg = I915_READ(RCBMINAVG);
  562. /* Handle RCS change request from hw */
  563. if (busy_up > max_avg) {
  564. if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
  565. new_delay = dev_priv->ips.cur_delay - 1;
  566. if (new_delay < dev_priv->ips.max_delay)
  567. new_delay = dev_priv->ips.max_delay;
  568. } else if (busy_down < min_avg) {
  569. if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
  570. new_delay = dev_priv->ips.cur_delay + 1;
  571. if (new_delay > dev_priv->ips.min_delay)
  572. new_delay = dev_priv->ips.min_delay;
  573. }
  574. if (ironlake_set_drps(dev, new_delay))
  575. dev_priv->ips.cur_delay = new_delay;
  576. spin_unlock(&mchdev_lock);
  577. return;
  578. }
  579. static void notify_ring(struct drm_device *dev,
  580. struct intel_ring_buffer *ring)
  581. {
  582. if (ring->obj == NULL)
  583. return;
  584. trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
  585. wake_up_all(&ring->irq_queue);
  586. i915_queue_hangcheck(dev);
  587. }
  588. static void gen6_pm_rps_work(struct work_struct *work)
  589. {
  590. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  591. rps.work);
  592. u32 pm_iir, pm_imr;
  593. u8 new_delay;
  594. spin_lock_irq(&dev_priv->irq_lock);
  595. pm_iir = dev_priv->rps.pm_iir;
  596. dev_priv->rps.pm_iir = 0;
  597. pm_imr = I915_READ(GEN6_PMIMR);
  598. /* Make sure not to corrupt PMIMR state used by ringbuffer code */
  599. I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
  600. spin_unlock_irq(&dev_priv->irq_lock);
  601. if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
  602. return;
  603. mutex_lock(&dev_priv->rps.hw_lock);
  604. if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
  605. new_delay = dev_priv->rps.cur_delay + 1;
  606. /*
  607. * For better performance, jump directly
  608. * to RPe if we're below it.
  609. */
  610. if (IS_VALLEYVIEW(dev_priv->dev) &&
  611. dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
  612. new_delay = dev_priv->rps.rpe_delay;
  613. } else
  614. new_delay = dev_priv->rps.cur_delay - 1;
  615. /* sysfs frequency interfaces may have snuck in while servicing the
  616. * interrupt
  617. */
  618. if (new_delay >= dev_priv->rps.min_delay &&
  619. new_delay <= dev_priv->rps.max_delay) {
  620. if (IS_VALLEYVIEW(dev_priv->dev))
  621. valleyview_set_rps(dev_priv->dev, new_delay);
  622. else
  623. gen6_set_rps(dev_priv->dev, new_delay);
  624. }
  625. if (IS_VALLEYVIEW(dev_priv->dev)) {
  626. /*
  627. * On VLV, when we enter RC6 we may not be at the minimum
  628. * voltage level, so arm a timer to check. It should only
  629. * fire when there's activity or once after we've entered
  630. * RC6, and then won't be re-armed until the next RPS interrupt.
  631. */
  632. mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
  633. msecs_to_jiffies(100));
  634. }
  635. mutex_unlock(&dev_priv->rps.hw_lock);
  636. }
  637. /**
  638. * ivybridge_parity_work - Workqueue called when a parity error interrupt
  639. * occurred.
  640. * @work: workqueue struct
  641. *
  642. * Doesn't actually do anything except notify userspace. As a consequence of
  643. * this event, userspace should try to remap the bad rows since statistically
  644. * it is likely the same row is more likely to go bad again.
  645. */
  646. static void ivybridge_parity_work(struct work_struct *work)
  647. {
  648. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  649. l3_parity.error_work);
  650. u32 error_status, row, bank, subbank;
  651. char *parity_event[5];
  652. uint32_t misccpctl;
  653. unsigned long flags;
  654. /* We must turn off DOP level clock gating to access the L3 registers.
  655. * In order to prevent a get/put style interface, acquire struct mutex
  656. * any time we access those registers.
  657. */
  658. mutex_lock(&dev_priv->dev->struct_mutex);
  659. misccpctl = I915_READ(GEN7_MISCCPCTL);
  660. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  661. POSTING_READ(GEN7_MISCCPCTL);
  662. error_status = I915_READ(GEN7_L3CDERRST1);
  663. row = GEN7_PARITY_ERROR_ROW(error_status);
  664. bank = GEN7_PARITY_ERROR_BANK(error_status);
  665. subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
  666. I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
  667. GEN7_L3CDERRST1_ENABLE);
  668. POSTING_READ(GEN7_L3CDERRST1);
  669. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  670. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  671. dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  672. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  673. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  674. mutex_unlock(&dev_priv->dev->struct_mutex);
  675. parity_event[0] = I915_L3_PARITY_UEVENT "=1";
  676. parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
  677. parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
  678. parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
  679. parity_event[4] = NULL;
  680. kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
  681. KOBJ_CHANGE, parity_event);
  682. DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
  683. row, bank, subbank);
  684. kfree(parity_event[3]);
  685. kfree(parity_event[2]);
  686. kfree(parity_event[1]);
  687. }
  688. static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
  689. {
  690. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  691. if (!HAS_L3_GPU_CACHE(dev))
  692. return;
  693. spin_lock(&dev_priv->irq_lock);
  694. dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  695. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  696. spin_unlock(&dev_priv->irq_lock);
  697. queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
  698. }
  699. static void ilk_gt_irq_handler(struct drm_device *dev,
  700. struct drm_i915_private *dev_priv,
  701. u32 gt_iir)
  702. {
  703. if (gt_iir &
  704. (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
  705. notify_ring(dev, &dev_priv->ring[RCS]);
  706. if (gt_iir & ILK_BSD_USER_INTERRUPT)
  707. notify_ring(dev, &dev_priv->ring[VCS]);
  708. }
  709. static void snb_gt_irq_handler(struct drm_device *dev,
  710. struct drm_i915_private *dev_priv,
  711. u32 gt_iir)
  712. {
  713. if (gt_iir &
  714. (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
  715. notify_ring(dev, &dev_priv->ring[RCS]);
  716. if (gt_iir & GT_BSD_USER_INTERRUPT)
  717. notify_ring(dev, &dev_priv->ring[VCS]);
  718. if (gt_iir & GT_BLT_USER_INTERRUPT)
  719. notify_ring(dev, &dev_priv->ring[BCS]);
  720. if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
  721. GT_BSD_CS_ERROR_INTERRUPT |
  722. GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
  723. DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
  724. i915_handle_error(dev, false);
  725. }
  726. if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
  727. ivybridge_parity_error_irq_handler(dev);
  728. }
  729. /* Legacy way of handling PM interrupts */
  730. static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
  731. u32 pm_iir)
  732. {
  733. /*
  734. * IIR bits should never already be set because IMR should
  735. * prevent an interrupt from being shown in IIR. The warning
  736. * displays a case where we've unsafely cleared
  737. * dev_priv->rps.pm_iir. Although missing an interrupt of the same
  738. * type is not a problem, it displays a problem in the logic.
  739. *
  740. * The mask bit in IMR is cleared by dev_priv->rps.work.
  741. */
  742. spin_lock(&dev_priv->irq_lock);
  743. dev_priv->rps.pm_iir |= pm_iir;
  744. I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
  745. POSTING_READ(GEN6_PMIMR);
  746. spin_unlock(&dev_priv->irq_lock);
  747. queue_work(dev_priv->wq, &dev_priv->rps.work);
  748. }
  749. #define HPD_STORM_DETECT_PERIOD 1000
  750. #define HPD_STORM_THRESHOLD 5
  751. static inline void intel_hpd_irq_handler(struct drm_device *dev,
  752. u32 hotplug_trigger,
  753. const u32 *hpd)
  754. {
  755. drm_i915_private_t *dev_priv = dev->dev_private;
  756. int i;
  757. bool storm_detected = false;
  758. if (!hotplug_trigger)
  759. return;
  760. spin_lock(&dev_priv->irq_lock);
  761. for (i = 1; i < HPD_NUM_PINS; i++) {
  762. if (!(hpd[i] & hotplug_trigger) ||
  763. dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
  764. continue;
  765. dev_priv->hpd_event_bits |= (1 << i);
  766. if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
  767. dev_priv->hpd_stats[i].hpd_last_jiffies
  768. + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
  769. dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
  770. dev_priv->hpd_stats[i].hpd_cnt = 0;
  771. } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
  772. dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
  773. dev_priv->hpd_event_bits &= ~(1 << i);
  774. DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
  775. storm_detected = true;
  776. } else {
  777. dev_priv->hpd_stats[i].hpd_cnt++;
  778. }
  779. }
  780. if (storm_detected)
  781. dev_priv->display.hpd_irq_setup(dev);
  782. spin_unlock(&dev_priv->irq_lock);
  783. queue_work(dev_priv->wq,
  784. &dev_priv->hotplug_work);
  785. }
  786. static void gmbus_irq_handler(struct drm_device *dev)
  787. {
  788. struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
  789. wake_up_all(&dev_priv->gmbus_wait_queue);
  790. }
  791. static void dp_aux_irq_handler(struct drm_device *dev)
  792. {
  793. struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
  794. wake_up_all(&dev_priv->gmbus_wait_queue);
  795. }
  796. /* Unlike gen6_rps_irq_handler() from which this function is originally derived,
  797. * we must be able to deal with other PM interrupts. This is complicated because
  798. * of the way in which we use the masks to defer the RPS work (which for
  799. * posterity is necessary because of forcewake).
  800. */
  801. static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
  802. u32 pm_iir)
  803. {
  804. if (pm_iir & GEN6_PM_RPS_EVENTS) {
  805. spin_lock(&dev_priv->irq_lock);
  806. dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
  807. I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
  808. /* never want to mask useful interrupts. (also posting read) */
  809. WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
  810. spin_unlock(&dev_priv->irq_lock);
  811. queue_work(dev_priv->wq, &dev_priv->rps.work);
  812. }
  813. if (pm_iir & PM_VEBOX_USER_INTERRUPT)
  814. notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
  815. if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
  816. DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
  817. i915_handle_error(dev_priv->dev, false);
  818. }
  819. }
  820. static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  821. {
  822. struct drm_device *dev = (struct drm_device *) arg;
  823. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  824. u32 iir, gt_iir, pm_iir;
  825. irqreturn_t ret = IRQ_NONE;
  826. unsigned long irqflags;
  827. int pipe;
  828. u32 pipe_stats[I915_MAX_PIPES];
  829. atomic_inc(&dev_priv->irq_received);
  830. while (true) {
  831. iir = I915_READ(VLV_IIR);
  832. gt_iir = I915_READ(GTIIR);
  833. pm_iir = I915_READ(GEN6_PMIIR);
  834. if (gt_iir == 0 && pm_iir == 0 && iir == 0)
  835. goto out;
  836. ret = IRQ_HANDLED;
  837. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  838. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  839. for_each_pipe(pipe) {
  840. int reg = PIPESTAT(pipe);
  841. pipe_stats[pipe] = I915_READ(reg);
  842. /*
  843. * Clear the PIPE*STAT regs before the IIR
  844. */
  845. if (pipe_stats[pipe] & 0x8000ffff) {
  846. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  847. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  848. pipe_name(pipe));
  849. I915_WRITE(reg, pipe_stats[pipe]);
  850. }
  851. }
  852. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  853. for_each_pipe(pipe) {
  854. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
  855. drm_handle_vblank(dev, pipe);
  856. if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
  857. intel_prepare_page_flip(dev, pipe);
  858. intel_finish_page_flip(dev, pipe);
  859. }
  860. }
  861. /* Consume port. Then clear IIR or we'll miss events */
  862. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  863. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  864. u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  865. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  866. hotplug_status);
  867. intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
  868. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  869. I915_READ(PORT_HOTPLUG_STAT);
  870. }
  871. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  872. gmbus_irq_handler(dev);
  873. if (pm_iir & GEN6_PM_RPS_EVENTS)
  874. gen6_rps_irq_handler(dev_priv, pm_iir);
  875. I915_WRITE(GTIIR, gt_iir);
  876. I915_WRITE(GEN6_PMIIR, pm_iir);
  877. I915_WRITE(VLV_IIR, iir);
  878. }
  879. out:
  880. return ret;
  881. }
  882. static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
  883. {
  884. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  885. int pipe;
  886. u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
  887. intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
  888. if (pch_iir & SDE_AUDIO_POWER_MASK) {
  889. int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
  890. SDE_AUDIO_POWER_SHIFT);
  891. DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  892. port_name(port));
  893. }
  894. if (pch_iir & SDE_AUX_MASK)
  895. dp_aux_irq_handler(dev);
  896. if (pch_iir & SDE_GMBUS)
  897. gmbus_irq_handler(dev);
  898. if (pch_iir & SDE_AUDIO_HDCP_MASK)
  899. DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
  900. if (pch_iir & SDE_AUDIO_TRANS_MASK)
  901. DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
  902. if (pch_iir & SDE_POISON)
  903. DRM_ERROR("PCH poison interrupt\n");
  904. if (pch_iir & SDE_FDI_MASK)
  905. for_each_pipe(pipe)
  906. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  907. pipe_name(pipe),
  908. I915_READ(FDI_RX_IIR(pipe)));
  909. if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
  910. DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
  911. if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
  912. DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
  913. if (pch_iir & SDE_TRANSA_FIFO_UNDER)
  914. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
  915. false))
  916. DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
  917. if (pch_iir & SDE_TRANSB_FIFO_UNDER)
  918. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
  919. false))
  920. DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
  921. }
  922. static void ivb_err_int_handler(struct drm_device *dev)
  923. {
  924. struct drm_i915_private *dev_priv = dev->dev_private;
  925. u32 err_int = I915_READ(GEN7_ERR_INT);
  926. if (err_int & ERR_INT_POISON)
  927. DRM_ERROR("Poison interrupt\n");
  928. if (err_int & ERR_INT_FIFO_UNDERRUN_A)
  929. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
  930. DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
  931. if (err_int & ERR_INT_FIFO_UNDERRUN_B)
  932. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
  933. DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
  934. if (err_int & ERR_INT_FIFO_UNDERRUN_C)
  935. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
  936. DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
  937. I915_WRITE(GEN7_ERR_INT, err_int);
  938. }
  939. static void cpt_serr_int_handler(struct drm_device *dev)
  940. {
  941. struct drm_i915_private *dev_priv = dev->dev_private;
  942. u32 serr_int = I915_READ(SERR_INT);
  943. if (serr_int & SERR_INT_POISON)
  944. DRM_ERROR("PCH poison interrupt\n");
  945. if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
  946. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
  947. false))
  948. DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
  949. if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
  950. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
  951. false))
  952. DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
  953. if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
  954. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
  955. false))
  956. DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
  957. I915_WRITE(SERR_INT, serr_int);
  958. }
  959. static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
  960. {
  961. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  962. int pipe;
  963. u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
  964. intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
  965. if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
  966. int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
  967. SDE_AUDIO_POWER_SHIFT_CPT);
  968. DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
  969. port_name(port));
  970. }
  971. if (pch_iir & SDE_AUX_MASK_CPT)
  972. dp_aux_irq_handler(dev);
  973. if (pch_iir & SDE_GMBUS_CPT)
  974. gmbus_irq_handler(dev);
  975. if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
  976. DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
  977. if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
  978. DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
  979. if (pch_iir & SDE_FDI_MASK_CPT)
  980. for_each_pipe(pipe)
  981. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  982. pipe_name(pipe),
  983. I915_READ(FDI_RX_IIR(pipe)));
  984. if (pch_iir & SDE_ERROR_CPT)
  985. cpt_serr_int_handler(dev);
  986. }
  987. static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
  988. {
  989. struct drm_i915_private *dev_priv = dev->dev_private;
  990. if (de_iir & DE_AUX_CHANNEL_A)
  991. dp_aux_irq_handler(dev);
  992. if (de_iir & DE_GSE)
  993. intel_opregion_asle_intr(dev);
  994. if (de_iir & DE_PIPEA_VBLANK)
  995. drm_handle_vblank(dev, 0);
  996. if (de_iir & DE_PIPEB_VBLANK)
  997. drm_handle_vblank(dev, 1);
  998. if (de_iir & DE_POISON)
  999. DRM_ERROR("Poison interrupt\n");
  1000. if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
  1001. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
  1002. DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
  1003. if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
  1004. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
  1005. DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
  1006. if (de_iir & DE_PLANEA_FLIP_DONE) {
  1007. intel_prepare_page_flip(dev, 0);
  1008. intel_finish_page_flip_plane(dev, 0);
  1009. }
  1010. if (de_iir & DE_PLANEB_FLIP_DONE) {
  1011. intel_prepare_page_flip(dev, 1);
  1012. intel_finish_page_flip_plane(dev, 1);
  1013. }
  1014. /* check event from PCH */
  1015. if (de_iir & DE_PCH_EVENT) {
  1016. u32 pch_iir = I915_READ(SDEIIR);
  1017. if (HAS_PCH_CPT(dev))
  1018. cpt_irq_handler(dev, pch_iir);
  1019. else
  1020. ibx_irq_handler(dev, pch_iir);
  1021. /* should clear PCH hotplug event before clear CPU irq */
  1022. I915_WRITE(SDEIIR, pch_iir);
  1023. }
  1024. if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
  1025. ironlake_rps_change_irq_handler(dev);
  1026. }
  1027. static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
  1028. {
  1029. struct drm_i915_private *dev_priv = dev->dev_private;
  1030. int i;
  1031. if (de_iir & DE_ERR_INT_IVB)
  1032. ivb_err_int_handler(dev);
  1033. if (de_iir & DE_AUX_CHANNEL_A_IVB)
  1034. dp_aux_irq_handler(dev);
  1035. if (de_iir & DE_GSE_IVB)
  1036. intel_opregion_asle_intr(dev);
  1037. for (i = 0; i < 3; i++) {
  1038. if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
  1039. drm_handle_vblank(dev, i);
  1040. if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
  1041. intel_prepare_page_flip(dev, i);
  1042. intel_finish_page_flip_plane(dev, i);
  1043. }
  1044. }
  1045. /* check event from PCH */
  1046. if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
  1047. u32 pch_iir = I915_READ(SDEIIR);
  1048. cpt_irq_handler(dev, pch_iir);
  1049. /* clear PCH hotplug event before clear CPU irq */
  1050. I915_WRITE(SDEIIR, pch_iir);
  1051. }
  1052. }
  1053. static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  1054. {
  1055. struct drm_device *dev = (struct drm_device *) arg;
  1056. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1057. u32 de_iir, gt_iir, de_ier, sde_ier = 0;
  1058. irqreturn_t ret = IRQ_NONE;
  1059. atomic_inc(&dev_priv->irq_received);
  1060. /* We get interrupts on unclaimed registers, so check for this before we
  1061. * do any I915_{READ,WRITE}. */
  1062. intel_uncore_check_errors(dev);
  1063. /* disable master interrupt before clearing iir */
  1064. de_ier = I915_READ(DEIER);
  1065. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  1066. POSTING_READ(DEIER);
  1067. /* Disable south interrupts. We'll only write to SDEIIR once, so further
  1068. * interrupts will will be stored on its back queue, and then we'll be
  1069. * able to process them after we restore SDEIER (as soon as we restore
  1070. * it, we'll get an interrupt if SDEIIR still has something to process
  1071. * due to its back queue). */
  1072. if (!HAS_PCH_NOP(dev)) {
  1073. sde_ier = I915_READ(SDEIER);
  1074. I915_WRITE(SDEIER, 0);
  1075. POSTING_READ(SDEIER);
  1076. }
  1077. /* On Haswell, also mask ERR_INT because we don't want to risk
  1078. * generating "unclaimed register" interrupts from inside the interrupt
  1079. * handler. */
  1080. if (IS_HASWELL(dev)) {
  1081. spin_lock(&dev_priv->irq_lock);
  1082. ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
  1083. spin_unlock(&dev_priv->irq_lock);
  1084. }
  1085. gt_iir = I915_READ(GTIIR);
  1086. if (gt_iir) {
  1087. if (INTEL_INFO(dev)->gen >= 6)
  1088. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  1089. else
  1090. ilk_gt_irq_handler(dev, dev_priv, gt_iir);
  1091. I915_WRITE(GTIIR, gt_iir);
  1092. ret = IRQ_HANDLED;
  1093. }
  1094. de_iir = I915_READ(DEIIR);
  1095. if (de_iir) {
  1096. if (INTEL_INFO(dev)->gen >= 7)
  1097. ivb_display_irq_handler(dev, de_iir);
  1098. else
  1099. ilk_display_irq_handler(dev, de_iir);
  1100. I915_WRITE(DEIIR, de_iir);
  1101. ret = IRQ_HANDLED;
  1102. }
  1103. if (INTEL_INFO(dev)->gen >= 6) {
  1104. u32 pm_iir = I915_READ(GEN6_PMIIR);
  1105. if (pm_iir) {
  1106. if (IS_HASWELL(dev))
  1107. hsw_pm_irq_handler(dev_priv, pm_iir);
  1108. else if (pm_iir & GEN6_PM_RPS_EVENTS)
  1109. gen6_rps_irq_handler(dev_priv, pm_iir);
  1110. I915_WRITE(GEN6_PMIIR, pm_iir);
  1111. ret = IRQ_HANDLED;
  1112. }
  1113. }
  1114. if (IS_HASWELL(dev)) {
  1115. spin_lock(&dev_priv->irq_lock);
  1116. if (ivb_can_enable_err_int(dev))
  1117. ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
  1118. spin_unlock(&dev_priv->irq_lock);
  1119. }
  1120. I915_WRITE(DEIER, de_ier);
  1121. POSTING_READ(DEIER);
  1122. if (!HAS_PCH_NOP(dev)) {
  1123. I915_WRITE(SDEIER, sde_ier);
  1124. POSTING_READ(SDEIER);
  1125. }
  1126. return ret;
  1127. }
  1128. /**
  1129. * i915_error_work_func - do process context error handling work
  1130. * @work: work struct
  1131. *
  1132. * Fire an error uevent so userspace can see that a hang or error
  1133. * was detected.
  1134. */
  1135. static void i915_error_work_func(struct work_struct *work)
  1136. {
  1137. struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
  1138. work);
  1139. drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
  1140. gpu_error);
  1141. struct drm_device *dev = dev_priv->dev;
  1142. struct intel_ring_buffer *ring;
  1143. char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
  1144. char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
  1145. char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
  1146. int i, ret;
  1147. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
  1148. /*
  1149. * Note that there's only one work item which does gpu resets, so we
  1150. * need not worry about concurrent gpu resets potentially incrementing
  1151. * error->reset_counter twice. We only need to take care of another
  1152. * racing irq/hangcheck declaring the gpu dead for a second time. A
  1153. * quick check for that is good enough: schedule_work ensures the
  1154. * correct ordering between hang detection and this work item, and since
  1155. * the reset in-progress bit is only ever set by code outside of this
  1156. * work we don't need to worry about any other races.
  1157. */
  1158. if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
  1159. DRM_DEBUG_DRIVER("resetting chip\n");
  1160. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
  1161. reset_event);
  1162. ret = i915_reset(dev);
  1163. if (ret == 0) {
  1164. /*
  1165. * After all the gem state is reset, increment the reset
  1166. * counter and wake up everyone waiting for the reset to
  1167. * complete.
  1168. *
  1169. * Since unlock operations are a one-sided barrier only,
  1170. * we need to insert a barrier here to order any seqno
  1171. * updates before
  1172. * the counter increment.
  1173. */
  1174. smp_mb__before_atomic_inc();
  1175. atomic_inc(&dev_priv->gpu_error.reset_counter);
  1176. kobject_uevent_env(&dev->primary->kdev.kobj,
  1177. KOBJ_CHANGE, reset_done_event);
  1178. } else {
  1179. atomic_set(&error->reset_counter, I915_WEDGED);
  1180. }
  1181. for_each_ring(ring, dev_priv, i)
  1182. wake_up_all(&ring->irq_queue);
  1183. intel_display_handle_reset(dev);
  1184. wake_up_all(&dev_priv->gpu_error.reset_queue);
  1185. }
  1186. }
  1187. static void i915_report_and_clear_eir(struct drm_device *dev)
  1188. {
  1189. struct drm_i915_private *dev_priv = dev->dev_private;
  1190. uint32_t instdone[I915_NUM_INSTDONE_REG];
  1191. u32 eir = I915_READ(EIR);
  1192. int pipe, i;
  1193. if (!eir)
  1194. return;
  1195. pr_err("render error detected, EIR: 0x%08x\n", eir);
  1196. i915_get_extra_instdone(dev, instdone);
  1197. if (IS_G4X(dev)) {
  1198. if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
  1199. u32 ipeir = I915_READ(IPEIR_I965);
  1200. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1201. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1202. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1203. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1204. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1205. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1206. I915_WRITE(IPEIR_I965, ipeir);
  1207. POSTING_READ(IPEIR_I965);
  1208. }
  1209. if (eir & GM45_ERROR_PAGE_TABLE) {
  1210. u32 pgtbl_err = I915_READ(PGTBL_ER);
  1211. pr_err("page table error\n");
  1212. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  1213. I915_WRITE(PGTBL_ER, pgtbl_err);
  1214. POSTING_READ(PGTBL_ER);
  1215. }
  1216. }
  1217. if (!IS_GEN2(dev)) {
  1218. if (eir & I915_ERROR_PAGE_TABLE) {
  1219. u32 pgtbl_err = I915_READ(PGTBL_ER);
  1220. pr_err("page table error\n");
  1221. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  1222. I915_WRITE(PGTBL_ER, pgtbl_err);
  1223. POSTING_READ(PGTBL_ER);
  1224. }
  1225. }
  1226. if (eir & I915_ERROR_MEMORY_REFRESH) {
  1227. pr_err("memory refresh error:\n");
  1228. for_each_pipe(pipe)
  1229. pr_err("pipe %c stat: 0x%08x\n",
  1230. pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
  1231. /* pipestat has already been acked */
  1232. }
  1233. if (eir & I915_ERROR_INSTRUCTION) {
  1234. pr_err("instruction error\n");
  1235. pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
  1236. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1237. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1238. if (INTEL_INFO(dev)->gen < 4) {
  1239. u32 ipeir = I915_READ(IPEIR);
  1240. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
  1241. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
  1242. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
  1243. I915_WRITE(IPEIR, ipeir);
  1244. POSTING_READ(IPEIR);
  1245. } else {
  1246. u32 ipeir = I915_READ(IPEIR_I965);
  1247. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1248. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1249. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1250. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1251. I915_WRITE(IPEIR_I965, ipeir);
  1252. POSTING_READ(IPEIR_I965);
  1253. }
  1254. }
  1255. I915_WRITE(EIR, eir);
  1256. POSTING_READ(EIR);
  1257. eir = I915_READ(EIR);
  1258. if (eir) {
  1259. /*
  1260. * some errors might have become stuck,
  1261. * mask them.
  1262. */
  1263. DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
  1264. I915_WRITE(EMR, I915_READ(EMR) | eir);
  1265. I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1266. }
  1267. }
  1268. /**
  1269. * i915_handle_error - handle an error interrupt
  1270. * @dev: drm device
  1271. *
  1272. * Do some basic checking of regsiter state at error interrupt time and
  1273. * dump it to the syslog. Also call i915_capture_error_state() to make
  1274. * sure we get a record and make it available in debugfs. Fire a uevent
  1275. * so userspace knows something bad happened (should trigger collection
  1276. * of a ring dump etc.).
  1277. */
  1278. void i915_handle_error(struct drm_device *dev, bool wedged)
  1279. {
  1280. struct drm_i915_private *dev_priv = dev->dev_private;
  1281. struct intel_ring_buffer *ring;
  1282. int i;
  1283. i915_capture_error_state(dev);
  1284. i915_report_and_clear_eir(dev);
  1285. if (wedged) {
  1286. atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
  1287. &dev_priv->gpu_error.reset_counter);
  1288. /*
  1289. * Wakeup waiting processes so that the reset work item
  1290. * doesn't deadlock trying to grab various locks.
  1291. */
  1292. for_each_ring(ring, dev_priv, i)
  1293. wake_up_all(&ring->irq_queue);
  1294. }
  1295. queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
  1296. }
  1297. static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
  1298. {
  1299. drm_i915_private_t *dev_priv = dev->dev_private;
  1300. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  1301. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1302. struct drm_i915_gem_object *obj;
  1303. struct intel_unpin_work *work;
  1304. unsigned long flags;
  1305. bool stall_detected;
  1306. /* Ignore early vblank irqs */
  1307. if (intel_crtc == NULL)
  1308. return;
  1309. spin_lock_irqsave(&dev->event_lock, flags);
  1310. work = intel_crtc->unpin_work;
  1311. if (work == NULL ||
  1312. atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
  1313. !work->enable_stall_check) {
  1314. /* Either the pending flip IRQ arrived, or we're too early. Don't check */
  1315. spin_unlock_irqrestore(&dev->event_lock, flags);
  1316. return;
  1317. }
  1318. /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
  1319. obj = work->pending_flip_obj;
  1320. if (INTEL_INFO(dev)->gen >= 4) {
  1321. int dspsurf = DSPSURF(intel_crtc->plane);
  1322. stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
  1323. i915_gem_obj_ggtt_offset(obj);
  1324. } else {
  1325. int dspaddr = DSPADDR(intel_crtc->plane);
  1326. stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
  1327. crtc->y * crtc->fb->pitches[0] +
  1328. crtc->x * crtc->fb->bits_per_pixel/8);
  1329. }
  1330. spin_unlock_irqrestore(&dev->event_lock, flags);
  1331. if (stall_detected) {
  1332. DRM_DEBUG_DRIVER("Pageflip stall detected\n");
  1333. intel_prepare_page_flip(dev, intel_crtc->plane);
  1334. }
  1335. }
  1336. /* Called from drm generic code, passed 'crtc' which
  1337. * we use as a pipe index
  1338. */
  1339. static int i915_enable_vblank(struct drm_device *dev, int pipe)
  1340. {
  1341. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1342. unsigned long irqflags;
  1343. if (!i915_pipe_enabled(dev, pipe))
  1344. return -EINVAL;
  1345. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1346. if (INTEL_INFO(dev)->gen >= 4)
  1347. i915_enable_pipestat(dev_priv, pipe,
  1348. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1349. else
  1350. i915_enable_pipestat(dev_priv, pipe,
  1351. PIPE_VBLANK_INTERRUPT_ENABLE);
  1352. /* maintain vblank delivery even in deep C-states */
  1353. if (dev_priv->info->gen == 3)
  1354. I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
  1355. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1356. return 0;
  1357. }
  1358. static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
  1359. {
  1360. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1361. unsigned long irqflags;
  1362. uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
  1363. DE_PIPE_VBLANK_ILK(pipe);
  1364. if (!i915_pipe_enabled(dev, pipe))
  1365. return -EINVAL;
  1366. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1367. ironlake_enable_display_irq(dev_priv, bit);
  1368. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1369. return 0;
  1370. }
  1371. static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
  1372. {
  1373. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1374. unsigned long irqflags;
  1375. u32 imr;
  1376. if (!i915_pipe_enabled(dev, pipe))
  1377. return -EINVAL;
  1378. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1379. imr = I915_READ(VLV_IMR);
  1380. if (pipe == 0)
  1381. imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1382. else
  1383. imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1384. I915_WRITE(VLV_IMR, imr);
  1385. i915_enable_pipestat(dev_priv, pipe,
  1386. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1387. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1388. return 0;
  1389. }
  1390. /* Called from drm generic code, passed 'crtc' which
  1391. * we use as a pipe index
  1392. */
  1393. static void i915_disable_vblank(struct drm_device *dev, int pipe)
  1394. {
  1395. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1396. unsigned long irqflags;
  1397. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1398. if (dev_priv->info->gen == 3)
  1399. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
  1400. i915_disable_pipestat(dev_priv, pipe,
  1401. PIPE_VBLANK_INTERRUPT_ENABLE |
  1402. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1403. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1404. }
  1405. static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
  1406. {
  1407. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1408. unsigned long irqflags;
  1409. uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
  1410. DE_PIPE_VBLANK_ILK(pipe);
  1411. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1412. ironlake_disable_display_irq(dev_priv, bit);
  1413. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1414. }
  1415. static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
  1416. {
  1417. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1418. unsigned long irqflags;
  1419. u32 imr;
  1420. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1421. i915_disable_pipestat(dev_priv, pipe,
  1422. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1423. imr = I915_READ(VLV_IMR);
  1424. if (pipe == 0)
  1425. imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1426. else
  1427. imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1428. I915_WRITE(VLV_IMR, imr);
  1429. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1430. }
  1431. static u32
  1432. ring_last_seqno(struct intel_ring_buffer *ring)
  1433. {
  1434. return list_entry(ring->request_list.prev,
  1435. struct drm_i915_gem_request, list)->seqno;
  1436. }
  1437. static bool
  1438. ring_idle(struct intel_ring_buffer *ring, u32 seqno)
  1439. {
  1440. return (list_empty(&ring->request_list) ||
  1441. i915_seqno_passed(seqno, ring_last_seqno(ring)));
  1442. }
  1443. static struct intel_ring_buffer *
  1444. semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
  1445. {
  1446. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1447. u32 cmd, ipehr, acthd, acthd_min;
  1448. ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
  1449. if ((ipehr & ~(0x3 << 16)) !=
  1450. (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
  1451. return NULL;
  1452. /* ACTHD is likely pointing to the dword after the actual command,
  1453. * so scan backwards until we find the MBOX.
  1454. */
  1455. acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
  1456. acthd_min = max((int)acthd - 3 * 4, 0);
  1457. do {
  1458. cmd = ioread32(ring->virtual_start + acthd);
  1459. if (cmd == ipehr)
  1460. break;
  1461. acthd -= 4;
  1462. if (acthd < acthd_min)
  1463. return NULL;
  1464. } while (1);
  1465. *seqno = ioread32(ring->virtual_start+acthd+4)+1;
  1466. return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
  1467. }
  1468. static int semaphore_passed(struct intel_ring_buffer *ring)
  1469. {
  1470. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1471. struct intel_ring_buffer *signaller;
  1472. u32 seqno, ctl;
  1473. ring->hangcheck.deadlock = true;
  1474. signaller = semaphore_waits_for(ring, &seqno);
  1475. if (signaller == NULL || signaller->hangcheck.deadlock)
  1476. return -1;
  1477. /* cursory check for an unkickable deadlock */
  1478. ctl = I915_READ_CTL(signaller);
  1479. if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
  1480. return -1;
  1481. return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
  1482. }
  1483. static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
  1484. {
  1485. struct intel_ring_buffer *ring;
  1486. int i;
  1487. for_each_ring(ring, dev_priv, i)
  1488. ring->hangcheck.deadlock = false;
  1489. }
  1490. static enum intel_ring_hangcheck_action
  1491. ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
  1492. {
  1493. struct drm_device *dev = ring->dev;
  1494. struct drm_i915_private *dev_priv = dev->dev_private;
  1495. u32 tmp;
  1496. if (ring->hangcheck.acthd != acthd)
  1497. return active;
  1498. if (IS_GEN2(dev))
  1499. return hung;
  1500. /* Is the chip hanging on a WAIT_FOR_EVENT?
  1501. * If so we can simply poke the RB_WAIT bit
  1502. * and break the hang. This should work on
  1503. * all but the second generation chipsets.
  1504. */
  1505. tmp = I915_READ_CTL(ring);
  1506. if (tmp & RING_WAIT) {
  1507. DRM_ERROR("Kicking stuck wait on %s\n",
  1508. ring->name);
  1509. I915_WRITE_CTL(ring, tmp);
  1510. return kick;
  1511. }
  1512. if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
  1513. switch (semaphore_passed(ring)) {
  1514. default:
  1515. return hung;
  1516. case 1:
  1517. DRM_ERROR("Kicking stuck semaphore on %s\n",
  1518. ring->name);
  1519. I915_WRITE_CTL(ring, tmp);
  1520. return kick;
  1521. case 0:
  1522. return wait;
  1523. }
  1524. }
  1525. return hung;
  1526. }
  1527. /**
  1528. * This is called when the chip hasn't reported back with completed
  1529. * batchbuffers in a long time. We keep track per ring seqno progress and
  1530. * if there are no progress, hangcheck score for that ring is increased.
  1531. * Further, acthd is inspected to see if the ring is stuck. On stuck case
  1532. * we kick the ring. If we see no progress on three subsequent calls
  1533. * we assume chip is wedged and try to fix it by resetting the chip.
  1534. */
  1535. void i915_hangcheck_elapsed(unsigned long data)
  1536. {
  1537. struct drm_device *dev = (struct drm_device *)data;
  1538. drm_i915_private_t *dev_priv = dev->dev_private;
  1539. struct intel_ring_buffer *ring;
  1540. int i;
  1541. int busy_count = 0, rings_hung = 0;
  1542. bool stuck[I915_NUM_RINGS] = { 0 };
  1543. #define BUSY 1
  1544. #define KICK 5
  1545. #define HUNG 20
  1546. #define FIRE 30
  1547. if (!i915_enable_hangcheck)
  1548. return;
  1549. for_each_ring(ring, dev_priv, i) {
  1550. u32 seqno, acthd;
  1551. bool busy = true;
  1552. semaphore_clear_deadlocks(dev_priv);
  1553. seqno = ring->get_seqno(ring, false);
  1554. acthd = intel_ring_get_active_head(ring);
  1555. if (ring->hangcheck.seqno == seqno) {
  1556. if (ring_idle(ring, seqno)) {
  1557. if (waitqueue_active(&ring->irq_queue)) {
  1558. /* Issue a wake-up to catch stuck h/w. */
  1559. DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
  1560. ring->name);
  1561. wake_up_all(&ring->irq_queue);
  1562. ring->hangcheck.score += HUNG;
  1563. } else
  1564. busy = false;
  1565. } else {
  1566. int score;
  1567. /* We always increment the hangcheck score
  1568. * if the ring is busy and still processing
  1569. * the same request, so that no single request
  1570. * can run indefinitely (such as a chain of
  1571. * batches). The only time we do not increment
  1572. * the hangcheck score on this ring, if this
  1573. * ring is in a legitimate wait for another
  1574. * ring. In that case the waiting ring is a
  1575. * victim and we want to be sure we catch the
  1576. * right culprit. Then every time we do kick
  1577. * the ring, add a small increment to the
  1578. * score so that we can catch a batch that is
  1579. * being repeatedly kicked and so responsible
  1580. * for stalling the machine.
  1581. */
  1582. ring->hangcheck.action = ring_stuck(ring,
  1583. acthd);
  1584. switch (ring->hangcheck.action) {
  1585. case wait:
  1586. score = 0;
  1587. break;
  1588. case active:
  1589. score = BUSY;
  1590. break;
  1591. case kick:
  1592. score = KICK;
  1593. break;
  1594. case hung:
  1595. score = HUNG;
  1596. stuck[i] = true;
  1597. break;
  1598. }
  1599. ring->hangcheck.score += score;
  1600. }
  1601. } else {
  1602. /* Gradually reduce the count so that we catch DoS
  1603. * attempts across multiple batches.
  1604. */
  1605. if (ring->hangcheck.score > 0)
  1606. ring->hangcheck.score--;
  1607. }
  1608. ring->hangcheck.seqno = seqno;
  1609. ring->hangcheck.acthd = acthd;
  1610. busy_count += busy;
  1611. }
  1612. for_each_ring(ring, dev_priv, i) {
  1613. if (ring->hangcheck.score > FIRE) {
  1614. DRM_ERROR("%s on %s\n",
  1615. stuck[i] ? "stuck" : "no progress",
  1616. ring->name);
  1617. rings_hung++;
  1618. }
  1619. }
  1620. if (rings_hung)
  1621. return i915_handle_error(dev, true);
  1622. if (busy_count)
  1623. /* Reset timer case chip hangs without another request
  1624. * being added */
  1625. i915_queue_hangcheck(dev);
  1626. }
  1627. void i915_queue_hangcheck(struct drm_device *dev)
  1628. {
  1629. struct drm_i915_private *dev_priv = dev->dev_private;
  1630. if (!i915_enable_hangcheck)
  1631. return;
  1632. mod_timer(&dev_priv->gpu_error.hangcheck_timer,
  1633. round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  1634. }
  1635. static void ibx_irq_preinstall(struct drm_device *dev)
  1636. {
  1637. struct drm_i915_private *dev_priv = dev->dev_private;
  1638. if (HAS_PCH_NOP(dev))
  1639. return;
  1640. /* south display irq */
  1641. I915_WRITE(SDEIMR, 0xffffffff);
  1642. /*
  1643. * SDEIER is also touched by the interrupt handler to work around missed
  1644. * PCH interrupts. Hence we can't update it after the interrupt handler
  1645. * is enabled - instead we unconditionally enable all PCH interrupt
  1646. * sources here, but then only unmask them as needed with SDEIMR.
  1647. */
  1648. I915_WRITE(SDEIER, 0xffffffff);
  1649. POSTING_READ(SDEIER);
  1650. }
  1651. static void gen5_gt_irq_preinstall(struct drm_device *dev)
  1652. {
  1653. struct drm_i915_private *dev_priv = dev->dev_private;
  1654. /* and GT */
  1655. I915_WRITE(GTIMR, 0xffffffff);
  1656. I915_WRITE(GTIER, 0x0);
  1657. POSTING_READ(GTIER);
  1658. if (INTEL_INFO(dev)->gen >= 6) {
  1659. /* and PM */
  1660. I915_WRITE(GEN6_PMIMR, 0xffffffff);
  1661. I915_WRITE(GEN6_PMIER, 0x0);
  1662. POSTING_READ(GEN6_PMIER);
  1663. }
  1664. }
  1665. /* drm_dma.h hooks
  1666. */
  1667. static void ironlake_irq_preinstall(struct drm_device *dev)
  1668. {
  1669. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1670. atomic_set(&dev_priv->irq_received, 0);
  1671. I915_WRITE(HWSTAM, 0xeffe);
  1672. I915_WRITE(DEIMR, 0xffffffff);
  1673. I915_WRITE(DEIER, 0x0);
  1674. POSTING_READ(DEIER);
  1675. gen5_gt_irq_preinstall(dev);
  1676. ibx_irq_preinstall(dev);
  1677. }
  1678. static void valleyview_irq_preinstall(struct drm_device *dev)
  1679. {
  1680. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1681. int pipe;
  1682. atomic_set(&dev_priv->irq_received, 0);
  1683. /* VLV magic */
  1684. I915_WRITE(VLV_IMR, 0);
  1685. I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
  1686. I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
  1687. I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
  1688. /* and GT */
  1689. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1690. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1691. gen5_gt_irq_preinstall(dev);
  1692. I915_WRITE(DPINVGTT, 0xff);
  1693. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1694. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1695. for_each_pipe(pipe)
  1696. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1697. I915_WRITE(VLV_IIR, 0xffffffff);
  1698. I915_WRITE(VLV_IMR, 0xffffffff);
  1699. I915_WRITE(VLV_IER, 0x0);
  1700. POSTING_READ(VLV_IER);
  1701. }
  1702. static void ibx_hpd_irq_setup(struct drm_device *dev)
  1703. {
  1704. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1705. struct drm_mode_config *mode_config = &dev->mode_config;
  1706. struct intel_encoder *intel_encoder;
  1707. u32 hotplug_irqs, hotplug, enabled_irqs = 0;
  1708. if (HAS_PCH_IBX(dev)) {
  1709. hotplug_irqs = SDE_HOTPLUG_MASK;
  1710. list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  1711. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  1712. enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
  1713. } else {
  1714. hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
  1715. list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  1716. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  1717. enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
  1718. }
  1719. ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
  1720. /*
  1721. * Enable digital hotplug on the PCH, and configure the DP short pulse
  1722. * duration to 2ms (which is the minimum in the Display Port spec)
  1723. *
  1724. * This register is the same on all known PCH chips.
  1725. */
  1726. hotplug = I915_READ(PCH_PORT_HOTPLUG);
  1727. hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
  1728. hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
  1729. hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
  1730. hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
  1731. I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  1732. }
  1733. static void ibx_irq_postinstall(struct drm_device *dev)
  1734. {
  1735. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1736. u32 mask;
  1737. if (HAS_PCH_NOP(dev))
  1738. return;
  1739. if (HAS_PCH_IBX(dev)) {
  1740. mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
  1741. SDE_TRANSA_FIFO_UNDER | SDE_POISON;
  1742. } else {
  1743. mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
  1744. I915_WRITE(SERR_INT, I915_READ(SERR_INT));
  1745. }
  1746. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  1747. I915_WRITE(SDEIMR, ~mask);
  1748. }
  1749. static void gen5_gt_irq_postinstall(struct drm_device *dev)
  1750. {
  1751. struct drm_i915_private *dev_priv = dev->dev_private;
  1752. u32 pm_irqs, gt_irqs;
  1753. pm_irqs = gt_irqs = 0;
  1754. dev_priv->gt_irq_mask = ~0;
  1755. if (HAS_L3_GPU_CACHE(dev)) {
  1756. /* L3 parity interrupt is always unmasked. */
  1757. dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  1758. gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  1759. }
  1760. gt_irqs |= GT_RENDER_USER_INTERRUPT;
  1761. if (IS_GEN5(dev)) {
  1762. gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
  1763. ILK_BSD_USER_INTERRUPT;
  1764. } else {
  1765. gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
  1766. }
  1767. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1768. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  1769. I915_WRITE(GTIER, gt_irqs);
  1770. POSTING_READ(GTIER);
  1771. if (INTEL_INFO(dev)->gen >= 6) {
  1772. pm_irqs |= GEN6_PM_RPS_EVENTS;
  1773. if (HAS_VEBOX(dev))
  1774. pm_irqs |= PM_VEBOX_USER_INTERRUPT;
  1775. I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
  1776. I915_WRITE(GEN6_PMIMR, 0xffffffff);
  1777. I915_WRITE(GEN6_PMIER, pm_irqs);
  1778. POSTING_READ(GEN6_PMIER);
  1779. }
  1780. }
  1781. static int ironlake_irq_postinstall(struct drm_device *dev)
  1782. {
  1783. unsigned long irqflags;
  1784. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1785. u32 display_mask, extra_mask;
  1786. if (INTEL_INFO(dev)->gen >= 7) {
  1787. display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
  1788. DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
  1789. DE_PLANEB_FLIP_DONE_IVB |
  1790. DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
  1791. DE_ERR_INT_IVB);
  1792. extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
  1793. DE_PIPEA_VBLANK_IVB);
  1794. I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
  1795. } else {
  1796. display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  1797. DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
  1798. DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
  1799. DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
  1800. extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
  1801. }
  1802. dev_priv->irq_mask = ~display_mask;
  1803. /* should always can generate irq */
  1804. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1805. I915_WRITE(DEIMR, dev_priv->irq_mask);
  1806. I915_WRITE(DEIER, display_mask | extra_mask);
  1807. POSTING_READ(DEIER);
  1808. gen5_gt_irq_postinstall(dev);
  1809. ibx_irq_postinstall(dev);
  1810. if (IS_IRONLAKE_M(dev)) {
  1811. /* Enable PCU event interrupts
  1812. *
  1813. * spinlocking not required here for correctness since interrupt
  1814. * setup is guaranteed to run in single-threaded context. But we
  1815. * need it to make the assert_spin_locked happy. */
  1816. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1817. ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
  1818. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1819. }
  1820. return 0;
  1821. }
  1822. static int valleyview_irq_postinstall(struct drm_device *dev)
  1823. {
  1824. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1825. u32 enable_mask;
  1826. u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
  1827. unsigned long irqflags;
  1828. enable_mask = I915_DISPLAY_PORT_INTERRUPT;
  1829. enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1830. I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  1831. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1832. I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1833. /*
  1834. *Leave vblank interrupts masked initially. enable/disable will
  1835. * toggle them based on usage.
  1836. */
  1837. dev_priv->irq_mask = (~enable_mask) |
  1838. I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  1839. I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1840. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1841. POSTING_READ(PORT_HOTPLUG_EN);
  1842. I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  1843. I915_WRITE(VLV_IER, enable_mask);
  1844. I915_WRITE(VLV_IIR, 0xffffffff);
  1845. I915_WRITE(PIPESTAT(0), 0xffff);
  1846. I915_WRITE(PIPESTAT(1), 0xffff);
  1847. POSTING_READ(VLV_IER);
  1848. /* Interrupt setup is already guaranteed to be single-threaded, this is
  1849. * just to make the assert_spin_locked check happy. */
  1850. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1851. i915_enable_pipestat(dev_priv, 0, pipestat_enable);
  1852. i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  1853. i915_enable_pipestat(dev_priv, 1, pipestat_enable);
  1854. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1855. I915_WRITE(VLV_IIR, 0xffffffff);
  1856. I915_WRITE(VLV_IIR, 0xffffffff);
  1857. gen5_gt_irq_postinstall(dev);
  1858. /* ack & enable invalid PTE error interrupts */
  1859. #if 0 /* FIXME: add support to irq handler for checking these bits */
  1860. I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  1861. I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
  1862. #endif
  1863. I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  1864. return 0;
  1865. }
  1866. static void valleyview_irq_uninstall(struct drm_device *dev)
  1867. {
  1868. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1869. int pipe;
  1870. if (!dev_priv)
  1871. return;
  1872. del_timer_sync(&dev_priv->hotplug_reenable_timer);
  1873. for_each_pipe(pipe)
  1874. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1875. I915_WRITE(HWSTAM, 0xffffffff);
  1876. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1877. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1878. for_each_pipe(pipe)
  1879. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1880. I915_WRITE(VLV_IIR, 0xffffffff);
  1881. I915_WRITE(VLV_IMR, 0xffffffff);
  1882. I915_WRITE(VLV_IER, 0x0);
  1883. POSTING_READ(VLV_IER);
  1884. }
  1885. static void ironlake_irq_uninstall(struct drm_device *dev)
  1886. {
  1887. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1888. if (!dev_priv)
  1889. return;
  1890. del_timer_sync(&dev_priv->hotplug_reenable_timer);
  1891. I915_WRITE(HWSTAM, 0xffffffff);
  1892. I915_WRITE(DEIMR, 0xffffffff);
  1893. I915_WRITE(DEIER, 0x0);
  1894. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1895. if (IS_GEN7(dev))
  1896. I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
  1897. I915_WRITE(GTIMR, 0xffffffff);
  1898. I915_WRITE(GTIER, 0x0);
  1899. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1900. if (HAS_PCH_NOP(dev))
  1901. return;
  1902. I915_WRITE(SDEIMR, 0xffffffff);
  1903. I915_WRITE(SDEIER, 0x0);
  1904. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  1905. if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
  1906. I915_WRITE(SERR_INT, I915_READ(SERR_INT));
  1907. }
  1908. static void i8xx_irq_preinstall(struct drm_device * dev)
  1909. {
  1910. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1911. int pipe;
  1912. atomic_set(&dev_priv->irq_received, 0);
  1913. for_each_pipe(pipe)
  1914. I915_WRITE(PIPESTAT(pipe), 0);
  1915. I915_WRITE16(IMR, 0xffff);
  1916. I915_WRITE16(IER, 0x0);
  1917. POSTING_READ16(IER);
  1918. }
  1919. static int i8xx_irq_postinstall(struct drm_device *dev)
  1920. {
  1921. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1922. I915_WRITE16(EMR,
  1923. ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  1924. /* Unmask the interrupts that we always want on. */
  1925. dev_priv->irq_mask =
  1926. ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1927. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1928. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  1929. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  1930. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1931. I915_WRITE16(IMR, dev_priv->irq_mask);
  1932. I915_WRITE16(IER,
  1933. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1934. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1935. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  1936. I915_USER_INTERRUPT);
  1937. POSTING_READ16(IER);
  1938. return 0;
  1939. }
  1940. /*
  1941. * Returns true when a page flip has completed.
  1942. */
  1943. static bool i8xx_handle_vblank(struct drm_device *dev,
  1944. int pipe, u16 iir)
  1945. {
  1946. drm_i915_private_t *dev_priv = dev->dev_private;
  1947. u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
  1948. if (!drm_handle_vblank(dev, pipe))
  1949. return false;
  1950. if ((iir & flip_pending) == 0)
  1951. return false;
  1952. intel_prepare_page_flip(dev, pipe);
  1953. /* We detect FlipDone by looking for the change in PendingFlip from '1'
  1954. * to '0' on the following vblank, i.e. IIR has the Pendingflip
  1955. * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  1956. * the flip is completed (no longer pending). Since this doesn't raise
  1957. * an interrupt per se, we watch for the change at vblank.
  1958. */
  1959. if (I915_READ16(ISR) & flip_pending)
  1960. return false;
  1961. intel_finish_page_flip(dev, pipe);
  1962. return true;
  1963. }
  1964. static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  1965. {
  1966. struct drm_device *dev = (struct drm_device *) arg;
  1967. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1968. u16 iir, new_iir;
  1969. u32 pipe_stats[2];
  1970. unsigned long irqflags;
  1971. int irq_received;
  1972. int pipe;
  1973. u16 flip_mask =
  1974. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  1975. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  1976. atomic_inc(&dev_priv->irq_received);
  1977. iir = I915_READ16(IIR);
  1978. if (iir == 0)
  1979. return IRQ_NONE;
  1980. while (iir & ~flip_mask) {
  1981. /* Can't rely on pipestat interrupt bit in iir as it might
  1982. * have been cleared after the pipestat interrupt was received.
  1983. * It doesn't set the bit in iir again, but it still produces
  1984. * interrupts (for non-MSI).
  1985. */
  1986. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1987. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  1988. i915_handle_error(dev, false);
  1989. for_each_pipe(pipe) {
  1990. int reg = PIPESTAT(pipe);
  1991. pipe_stats[pipe] = I915_READ(reg);
  1992. /*
  1993. * Clear the PIPE*STAT regs before the IIR
  1994. */
  1995. if (pipe_stats[pipe] & 0x8000ffff) {
  1996. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  1997. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  1998. pipe_name(pipe));
  1999. I915_WRITE(reg, pipe_stats[pipe]);
  2000. irq_received = 1;
  2001. }
  2002. }
  2003. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2004. I915_WRITE16(IIR, iir & ~flip_mask);
  2005. new_iir = I915_READ16(IIR); /* Flush posted writes */
  2006. i915_update_dri1_breadcrumb(dev);
  2007. if (iir & I915_USER_INTERRUPT)
  2008. notify_ring(dev, &dev_priv->ring[RCS]);
  2009. if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2010. i8xx_handle_vblank(dev, 0, iir))
  2011. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
  2012. if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2013. i8xx_handle_vblank(dev, 1, iir))
  2014. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
  2015. iir = new_iir;
  2016. }
  2017. return IRQ_HANDLED;
  2018. }
  2019. static void i8xx_irq_uninstall(struct drm_device * dev)
  2020. {
  2021. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2022. int pipe;
  2023. for_each_pipe(pipe) {
  2024. /* Clear enable bits; then clear status bits */
  2025. I915_WRITE(PIPESTAT(pipe), 0);
  2026. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  2027. }
  2028. I915_WRITE16(IMR, 0xffff);
  2029. I915_WRITE16(IER, 0x0);
  2030. I915_WRITE16(IIR, I915_READ16(IIR));
  2031. }
  2032. static void i915_irq_preinstall(struct drm_device * dev)
  2033. {
  2034. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2035. int pipe;
  2036. atomic_set(&dev_priv->irq_received, 0);
  2037. if (I915_HAS_HOTPLUG(dev)) {
  2038. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2039. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2040. }
  2041. I915_WRITE16(HWSTAM, 0xeffe);
  2042. for_each_pipe(pipe)
  2043. I915_WRITE(PIPESTAT(pipe), 0);
  2044. I915_WRITE(IMR, 0xffffffff);
  2045. I915_WRITE(IER, 0x0);
  2046. POSTING_READ(IER);
  2047. }
  2048. static int i915_irq_postinstall(struct drm_device *dev)
  2049. {
  2050. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2051. u32 enable_mask;
  2052. I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  2053. /* Unmask the interrupts that we always want on. */
  2054. dev_priv->irq_mask =
  2055. ~(I915_ASLE_INTERRUPT |
  2056. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2057. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2058. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2059. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2060. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2061. enable_mask =
  2062. I915_ASLE_INTERRUPT |
  2063. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2064. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2065. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  2066. I915_USER_INTERRUPT;
  2067. if (I915_HAS_HOTPLUG(dev)) {
  2068. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2069. POSTING_READ(PORT_HOTPLUG_EN);
  2070. /* Enable in IER... */
  2071. enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  2072. /* and unmask in IMR */
  2073. dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  2074. }
  2075. I915_WRITE(IMR, dev_priv->irq_mask);
  2076. I915_WRITE(IER, enable_mask);
  2077. POSTING_READ(IER);
  2078. i915_enable_asle_pipestat(dev);
  2079. return 0;
  2080. }
  2081. /*
  2082. * Returns true when a page flip has completed.
  2083. */
  2084. static bool i915_handle_vblank(struct drm_device *dev,
  2085. int plane, int pipe, u32 iir)
  2086. {
  2087. drm_i915_private_t *dev_priv = dev->dev_private;
  2088. u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  2089. if (!drm_handle_vblank(dev, pipe))
  2090. return false;
  2091. if ((iir & flip_pending) == 0)
  2092. return false;
  2093. intel_prepare_page_flip(dev, plane);
  2094. /* We detect FlipDone by looking for the change in PendingFlip from '1'
  2095. * to '0' on the following vblank, i.e. IIR has the Pendingflip
  2096. * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  2097. * the flip is completed (no longer pending). Since this doesn't raise
  2098. * an interrupt per se, we watch for the change at vblank.
  2099. */
  2100. if (I915_READ(ISR) & flip_pending)
  2101. return false;
  2102. intel_finish_page_flip(dev, pipe);
  2103. return true;
  2104. }
  2105. static irqreturn_t i915_irq_handler(int irq, void *arg)
  2106. {
  2107. struct drm_device *dev = (struct drm_device *) arg;
  2108. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2109. u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
  2110. unsigned long irqflags;
  2111. u32 flip_mask =
  2112. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2113. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2114. int pipe, ret = IRQ_NONE;
  2115. atomic_inc(&dev_priv->irq_received);
  2116. iir = I915_READ(IIR);
  2117. do {
  2118. bool irq_received = (iir & ~flip_mask) != 0;
  2119. bool blc_event = false;
  2120. /* Can't rely on pipestat interrupt bit in iir as it might
  2121. * have been cleared after the pipestat interrupt was received.
  2122. * It doesn't set the bit in iir again, but it still produces
  2123. * interrupts (for non-MSI).
  2124. */
  2125. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2126. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2127. i915_handle_error(dev, false);
  2128. for_each_pipe(pipe) {
  2129. int reg = PIPESTAT(pipe);
  2130. pipe_stats[pipe] = I915_READ(reg);
  2131. /* Clear the PIPE*STAT regs before the IIR */
  2132. if (pipe_stats[pipe] & 0x8000ffff) {
  2133. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2134. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2135. pipe_name(pipe));
  2136. I915_WRITE(reg, pipe_stats[pipe]);
  2137. irq_received = true;
  2138. }
  2139. }
  2140. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2141. if (!irq_received)
  2142. break;
  2143. /* Consume port. Then clear IIR or we'll miss events */
  2144. if ((I915_HAS_HOTPLUG(dev)) &&
  2145. (iir & I915_DISPLAY_PORT_INTERRUPT)) {
  2146. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2147. u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  2148. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2149. hotplug_status);
  2150. intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
  2151. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2152. POSTING_READ(PORT_HOTPLUG_STAT);
  2153. }
  2154. I915_WRITE(IIR, iir & ~flip_mask);
  2155. new_iir = I915_READ(IIR); /* Flush posted writes */
  2156. if (iir & I915_USER_INTERRUPT)
  2157. notify_ring(dev, &dev_priv->ring[RCS]);
  2158. for_each_pipe(pipe) {
  2159. int plane = pipe;
  2160. if (IS_MOBILE(dev))
  2161. plane = !plane;
  2162. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2163. i915_handle_vblank(dev, plane, pipe, iir))
  2164. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  2165. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  2166. blc_event = true;
  2167. }
  2168. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  2169. intel_opregion_asle_intr(dev);
  2170. /* With MSI, interrupts are only generated when iir
  2171. * transitions from zero to nonzero. If another bit got
  2172. * set while we were handling the existing iir bits, then
  2173. * we would never get another interrupt.
  2174. *
  2175. * This is fine on non-MSI as well, as if we hit this path
  2176. * we avoid exiting the interrupt handler only to generate
  2177. * another one.
  2178. *
  2179. * Note that for MSI this could cause a stray interrupt report
  2180. * if an interrupt landed in the time between writing IIR and
  2181. * the posting read. This should be rare enough to never
  2182. * trigger the 99% of 100,000 interrupts test for disabling
  2183. * stray interrupts.
  2184. */
  2185. ret = IRQ_HANDLED;
  2186. iir = new_iir;
  2187. } while (iir & ~flip_mask);
  2188. i915_update_dri1_breadcrumb(dev);
  2189. return ret;
  2190. }
  2191. static void i915_irq_uninstall(struct drm_device * dev)
  2192. {
  2193. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2194. int pipe;
  2195. del_timer_sync(&dev_priv->hotplug_reenable_timer);
  2196. if (I915_HAS_HOTPLUG(dev)) {
  2197. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2198. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2199. }
  2200. I915_WRITE16(HWSTAM, 0xffff);
  2201. for_each_pipe(pipe) {
  2202. /* Clear enable bits; then clear status bits */
  2203. I915_WRITE(PIPESTAT(pipe), 0);
  2204. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  2205. }
  2206. I915_WRITE(IMR, 0xffffffff);
  2207. I915_WRITE(IER, 0x0);
  2208. I915_WRITE(IIR, I915_READ(IIR));
  2209. }
  2210. static void i965_irq_preinstall(struct drm_device * dev)
  2211. {
  2212. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2213. int pipe;
  2214. atomic_set(&dev_priv->irq_received, 0);
  2215. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2216. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2217. I915_WRITE(HWSTAM, 0xeffe);
  2218. for_each_pipe(pipe)
  2219. I915_WRITE(PIPESTAT(pipe), 0);
  2220. I915_WRITE(IMR, 0xffffffff);
  2221. I915_WRITE(IER, 0x0);
  2222. POSTING_READ(IER);
  2223. }
  2224. static int i965_irq_postinstall(struct drm_device *dev)
  2225. {
  2226. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2227. u32 enable_mask;
  2228. u32 error_mask;
  2229. unsigned long irqflags;
  2230. /* Unmask the interrupts that we always want on. */
  2231. dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
  2232. I915_DISPLAY_PORT_INTERRUPT |
  2233. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2234. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2235. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2236. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2237. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2238. enable_mask = ~dev_priv->irq_mask;
  2239. enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2240. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  2241. enable_mask |= I915_USER_INTERRUPT;
  2242. if (IS_G4X(dev))
  2243. enable_mask |= I915_BSD_USER_INTERRUPT;
  2244. /* Interrupt setup is already guaranteed to be single-threaded, this is
  2245. * just to make the assert_spin_locked check happy. */
  2246. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2247. i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  2248. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2249. /*
  2250. * Enable some error detection, note the instruction error mask
  2251. * bit is reserved, so we leave it masked.
  2252. */
  2253. if (IS_G4X(dev)) {
  2254. error_mask = ~(GM45_ERROR_PAGE_TABLE |
  2255. GM45_ERROR_MEM_PRIV |
  2256. GM45_ERROR_CP_PRIV |
  2257. I915_ERROR_MEMORY_REFRESH);
  2258. } else {
  2259. error_mask = ~(I915_ERROR_PAGE_TABLE |
  2260. I915_ERROR_MEMORY_REFRESH);
  2261. }
  2262. I915_WRITE(EMR, error_mask);
  2263. I915_WRITE(IMR, dev_priv->irq_mask);
  2264. I915_WRITE(IER, enable_mask);
  2265. POSTING_READ(IER);
  2266. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2267. POSTING_READ(PORT_HOTPLUG_EN);
  2268. i915_enable_asle_pipestat(dev);
  2269. return 0;
  2270. }
  2271. static void i915_hpd_irq_setup(struct drm_device *dev)
  2272. {
  2273. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2274. struct drm_mode_config *mode_config = &dev->mode_config;
  2275. struct intel_encoder *intel_encoder;
  2276. u32 hotplug_en;
  2277. assert_spin_locked(&dev_priv->irq_lock);
  2278. if (I915_HAS_HOTPLUG(dev)) {
  2279. hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  2280. hotplug_en &= ~HOTPLUG_INT_EN_MASK;
  2281. /* Note HDMI and DP share hotplug bits */
  2282. /* enable bits are the same for all generations */
  2283. list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  2284. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  2285. hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
  2286. /* Programming the CRT detection parameters tends
  2287. to generate a spurious hotplug event about three
  2288. seconds later. So just do it once.
  2289. */
  2290. if (IS_G4X(dev))
  2291. hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  2292. hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
  2293. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  2294. /* Ignore TV since it's buggy */
  2295. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  2296. }
  2297. }
  2298. static irqreturn_t i965_irq_handler(int irq, void *arg)
  2299. {
  2300. struct drm_device *dev = (struct drm_device *) arg;
  2301. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2302. u32 iir, new_iir;
  2303. u32 pipe_stats[I915_MAX_PIPES];
  2304. unsigned long irqflags;
  2305. int irq_received;
  2306. int ret = IRQ_NONE, pipe;
  2307. u32 flip_mask =
  2308. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2309. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2310. atomic_inc(&dev_priv->irq_received);
  2311. iir = I915_READ(IIR);
  2312. for (;;) {
  2313. bool blc_event = false;
  2314. irq_received = (iir & ~flip_mask) != 0;
  2315. /* Can't rely on pipestat interrupt bit in iir as it might
  2316. * have been cleared after the pipestat interrupt was received.
  2317. * It doesn't set the bit in iir again, but it still produces
  2318. * interrupts (for non-MSI).
  2319. */
  2320. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2321. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2322. i915_handle_error(dev, false);
  2323. for_each_pipe(pipe) {
  2324. int reg = PIPESTAT(pipe);
  2325. pipe_stats[pipe] = I915_READ(reg);
  2326. /*
  2327. * Clear the PIPE*STAT regs before the IIR
  2328. */
  2329. if (pipe_stats[pipe] & 0x8000ffff) {
  2330. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2331. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2332. pipe_name(pipe));
  2333. I915_WRITE(reg, pipe_stats[pipe]);
  2334. irq_received = 1;
  2335. }
  2336. }
  2337. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2338. if (!irq_received)
  2339. break;
  2340. ret = IRQ_HANDLED;
  2341. /* Consume port. Then clear IIR or we'll miss events */
  2342. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  2343. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2344. u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
  2345. HOTPLUG_INT_STATUS_G4X :
  2346. HOTPLUG_INT_STATUS_I915);
  2347. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2348. hotplug_status);
  2349. intel_hpd_irq_handler(dev, hotplug_trigger,
  2350. IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
  2351. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2352. I915_READ(PORT_HOTPLUG_STAT);
  2353. }
  2354. I915_WRITE(IIR, iir & ~flip_mask);
  2355. new_iir = I915_READ(IIR); /* Flush posted writes */
  2356. if (iir & I915_USER_INTERRUPT)
  2357. notify_ring(dev, &dev_priv->ring[RCS]);
  2358. if (iir & I915_BSD_USER_INTERRUPT)
  2359. notify_ring(dev, &dev_priv->ring[VCS]);
  2360. for_each_pipe(pipe) {
  2361. if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  2362. i915_handle_vblank(dev, pipe, pipe, iir))
  2363. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
  2364. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  2365. blc_event = true;
  2366. }
  2367. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  2368. intel_opregion_asle_intr(dev);
  2369. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  2370. gmbus_irq_handler(dev);
  2371. /* With MSI, interrupts are only generated when iir
  2372. * transitions from zero to nonzero. If another bit got
  2373. * set while we were handling the existing iir bits, then
  2374. * we would never get another interrupt.
  2375. *
  2376. * This is fine on non-MSI as well, as if we hit this path
  2377. * we avoid exiting the interrupt handler only to generate
  2378. * another one.
  2379. *
  2380. * Note that for MSI this could cause a stray interrupt report
  2381. * if an interrupt landed in the time between writing IIR and
  2382. * the posting read. This should be rare enough to never
  2383. * trigger the 99% of 100,000 interrupts test for disabling
  2384. * stray interrupts.
  2385. */
  2386. iir = new_iir;
  2387. }
  2388. i915_update_dri1_breadcrumb(dev);
  2389. return ret;
  2390. }
  2391. static void i965_irq_uninstall(struct drm_device * dev)
  2392. {
  2393. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2394. int pipe;
  2395. if (!dev_priv)
  2396. return;
  2397. del_timer_sync(&dev_priv->hotplug_reenable_timer);
  2398. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2399. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2400. I915_WRITE(HWSTAM, 0xffffffff);
  2401. for_each_pipe(pipe)
  2402. I915_WRITE(PIPESTAT(pipe), 0);
  2403. I915_WRITE(IMR, 0xffffffff);
  2404. I915_WRITE(IER, 0x0);
  2405. for_each_pipe(pipe)
  2406. I915_WRITE(PIPESTAT(pipe),
  2407. I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
  2408. I915_WRITE(IIR, I915_READ(IIR));
  2409. }
  2410. static void i915_reenable_hotplug_timer_func(unsigned long data)
  2411. {
  2412. drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
  2413. struct drm_device *dev = dev_priv->dev;
  2414. struct drm_mode_config *mode_config = &dev->mode_config;
  2415. unsigned long irqflags;
  2416. int i;
  2417. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2418. for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
  2419. struct drm_connector *connector;
  2420. if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
  2421. continue;
  2422. dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  2423. list_for_each_entry(connector, &mode_config->connector_list, head) {
  2424. struct intel_connector *intel_connector = to_intel_connector(connector);
  2425. if (intel_connector->encoder->hpd_pin == i) {
  2426. if (connector->polled != intel_connector->polled)
  2427. DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
  2428. drm_get_connector_name(connector));
  2429. connector->polled = intel_connector->polled;
  2430. if (!connector->polled)
  2431. connector->polled = DRM_CONNECTOR_POLL_HPD;
  2432. }
  2433. }
  2434. }
  2435. if (dev_priv->display.hpd_irq_setup)
  2436. dev_priv->display.hpd_irq_setup(dev);
  2437. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2438. }
  2439. void intel_irq_init(struct drm_device *dev)
  2440. {
  2441. struct drm_i915_private *dev_priv = dev->dev_private;
  2442. INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
  2443. INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
  2444. INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
  2445. INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
  2446. setup_timer(&dev_priv->gpu_error.hangcheck_timer,
  2447. i915_hangcheck_elapsed,
  2448. (unsigned long) dev);
  2449. setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
  2450. (unsigned long) dev_priv);
  2451. pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
  2452. dev->driver->get_vblank_counter = i915_get_vblank_counter;
  2453. dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  2454. if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  2455. dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
  2456. dev->driver->get_vblank_counter = gm45_get_vblank_counter;
  2457. }
  2458. if (drm_core_check_feature(dev, DRIVER_MODESET))
  2459. dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
  2460. else
  2461. dev->driver->get_vblank_timestamp = NULL;
  2462. dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  2463. if (IS_VALLEYVIEW(dev)) {
  2464. dev->driver->irq_handler = valleyview_irq_handler;
  2465. dev->driver->irq_preinstall = valleyview_irq_preinstall;
  2466. dev->driver->irq_postinstall = valleyview_irq_postinstall;
  2467. dev->driver->irq_uninstall = valleyview_irq_uninstall;
  2468. dev->driver->enable_vblank = valleyview_enable_vblank;
  2469. dev->driver->disable_vblank = valleyview_disable_vblank;
  2470. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  2471. } else if (HAS_PCH_SPLIT(dev)) {
  2472. dev->driver->irq_handler = ironlake_irq_handler;
  2473. dev->driver->irq_preinstall = ironlake_irq_preinstall;
  2474. dev->driver->irq_postinstall = ironlake_irq_postinstall;
  2475. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  2476. dev->driver->enable_vblank = ironlake_enable_vblank;
  2477. dev->driver->disable_vblank = ironlake_disable_vblank;
  2478. dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
  2479. } else {
  2480. if (INTEL_INFO(dev)->gen == 2) {
  2481. dev->driver->irq_preinstall = i8xx_irq_preinstall;
  2482. dev->driver->irq_postinstall = i8xx_irq_postinstall;
  2483. dev->driver->irq_handler = i8xx_irq_handler;
  2484. dev->driver->irq_uninstall = i8xx_irq_uninstall;
  2485. } else if (INTEL_INFO(dev)->gen == 3) {
  2486. dev->driver->irq_preinstall = i915_irq_preinstall;
  2487. dev->driver->irq_postinstall = i915_irq_postinstall;
  2488. dev->driver->irq_uninstall = i915_irq_uninstall;
  2489. dev->driver->irq_handler = i915_irq_handler;
  2490. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  2491. } else {
  2492. dev->driver->irq_preinstall = i965_irq_preinstall;
  2493. dev->driver->irq_postinstall = i965_irq_postinstall;
  2494. dev->driver->irq_uninstall = i965_irq_uninstall;
  2495. dev->driver->irq_handler = i965_irq_handler;
  2496. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  2497. }
  2498. dev->driver->enable_vblank = i915_enable_vblank;
  2499. dev->driver->disable_vblank = i915_disable_vblank;
  2500. }
  2501. }
  2502. void intel_hpd_init(struct drm_device *dev)
  2503. {
  2504. struct drm_i915_private *dev_priv = dev->dev_private;
  2505. struct drm_mode_config *mode_config = &dev->mode_config;
  2506. struct drm_connector *connector;
  2507. unsigned long irqflags;
  2508. int i;
  2509. for (i = 1; i < HPD_NUM_PINS; i++) {
  2510. dev_priv->hpd_stats[i].hpd_cnt = 0;
  2511. dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  2512. }
  2513. list_for_each_entry(connector, &mode_config->connector_list, head) {
  2514. struct intel_connector *intel_connector = to_intel_connector(connector);
  2515. connector->polled = intel_connector->polled;
  2516. if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
  2517. connector->polled = DRM_CONNECTOR_POLL_HPD;
  2518. }
  2519. /* Interrupt setup is already guaranteed to be single-threaded, this is
  2520. * just to make the assert_spin_locked checks happy. */
  2521. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2522. if (dev_priv->display.hpd_irq_setup)
  2523. dev_priv->display.hpd_irq_setup(dev);
  2524. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2525. }