i915_irq.c 87 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/sysrq.h>
  30. #include <linux/slab.h>
  31. #include <drm/drmP.h>
  32. #include <drm/i915_drm.h>
  33. #include "i915_drv.h"
  34. #include "i915_trace.h"
  35. #include "intel_drv.h"
  36. static const u32 hpd_ibx[] = {
  37. [HPD_CRT] = SDE_CRT_HOTPLUG,
  38. [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
  39. [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
  40. [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
  41. [HPD_PORT_D] = SDE_PORTD_HOTPLUG
  42. };
  43. static const u32 hpd_cpt[] = {
  44. [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
  45. [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
  46. [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  47. [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  48. [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
  49. };
  50. static const u32 hpd_mask_i915[] = {
  51. [HPD_CRT] = CRT_HOTPLUG_INT_EN,
  52. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
  53. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
  54. [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
  55. [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
  56. [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
  57. };
  58. static const u32 hpd_status_gen4[] = {
  59. [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  60. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
  61. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
  62. [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  63. [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  64. [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  65. };
  66. static const u32 hpd_status_i965[] = {
  67. [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  68. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
  69. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
  70. [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  71. [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  72. [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  73. };
  74. static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
  75. [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  76. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
  77. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
  78. [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  79. [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  80. [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  81. };
  82. static void ibx_hpd_irq_setup(struct drm_device *dev);
  83. static void i915_hpd_irq_setup(struct drm_device *dev);
  84. /* For display hotplug interrupt */
  85. static void
  86. ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  87. {
  88. if ((dev_priv->irq_mask & mask) != 0) {
  89. dev_priv->irq_mask &= ~mask;
  90. I915_WRITE(DEIMR, dev_priv->irq_mask);
  91. POSTING_READ(DEIMR);
  92. }
  93. }
  94. static void
  95. ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  96. {
  97. if ((dev_priv->irq_mask & mask) != mask) {
  98. dev_priv->irq_mask |= mask;
  99. I915_WRITE(DEIMR, dev_priv->irq_mask);
  100. POSTING_READ(DEIMR);
  101. }
  102. }
  103. void
  104. i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  105. {
  106. u32 reg = PIPESTAT(pipe);
  107. u32 pipestat = I915_READ(reg) & 0x7fff0000;
  108. if ((pipestat & mask) == mask)
  109. return;
  110. /* Enable the interrupt, clear any pending status */
  111. pipestat |= mask | (mask >> 16);
  112. I915_WRITE(reg, pipestat);
  113. POSTING_READ(reg);
  114. }
  115. void
  116. i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  117. {
  118. u32 reg = PIPESTAT(pipe);
  119. u32 pipestat = I915_READ(reg) & 0x7fff0000;
  120. if ((pipestat & mask) == 0)
  121. return;
  122. pipestat &= ~mask;
  123. I915_WRITE(reg, pipestat);
  124. POSTING_READ(reg);
  125. }
  126. /**
  127. * intel_enable_asle - enable ASLE interrupt for OpRegion
  128. */
  129. void intel_enable_asle(struct drm_device *dev)
  130. {
  131. drm_i915_private_t *dev_priv = dev->dev_private;
  132. unsigned long irqflags;
  133. /* FIXME: opregion/asle for VLV */
  134. if (IS_VALLEYVIEW(dev))
  135. return;
  136. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  137. if (HAS_PCH_SPLIT(dev))
  138. ironlake_enable_display_irq(dev_priv, DE_GSE);
  139. else {
  140. i915_enable_pipestat(dev_priv, 1,
  141. PIPE_LEGACY_BLC_EVENT_ENABLE);
  142. if (INTEL_INFO(dev)->gen >= 4)
  143. i915_enable_pipestat(dev_priv, 0,
  144. PIPE_LEGACY_BLC_EVENT_ENABLE);
  145. }
  146. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  147. }
  148. /**
  149. * i915_pipe_enabled - check if a pipe is enabled
  150. * @dev: DRM device
  151. * @pipe: pipe to check
  152. *
  153. * Reading certain registers when the pipe is disabled can hang the chip.
  154. * Use this routine to make sure the PLL is running and the pipe is active
  155. * before reading such registers if unsure.
  156. */
  157. static int
  158. i915_pipe_enabled(struct drm_device *dev, int pipe)
  159. {
  160. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  161. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  162. pipe);
  163. return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
  164. }
  165. /* Called from drm generic code, passed a 'crtc', which
  166. * we use as a pipe index
  167. */
  168. static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
  169. {
  170. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  171. unsigned long high_frame;
  172. unsigned long low_frame;
  173. u32 high1, high2, low;
  174. if (!i915_pipe_enabled(dev, pipe)) {
  175. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  176. "pipe %c\n", pipe_name(pipe));
  177. return 0;
  178. }
  179. high_frame = PIPEFRAME(pipe);
  180. low_frame = PIPEFRAMEPIXEL(pipe);
  181. /*
  182. * High & low register fields aren't synchronized, so make sure
  183. * we get a low value that's stable across two reads of the high
  184. * register.
  185. */
  186. do {
  187. high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  188. low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
  189. high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  190. } while (high1 != high2);
  191. high1 >>= PIPE_FRAME_HIGH_SHIFT;
  192. low >>= PIPE_FRAME_LOW_SHIFT;
  193. return (high1 << 8) | low;
  194. }
  195. static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
  196. {
  197. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  198. int reg = PIPE_FRMCOUNT_GM45(pipe);
  199. if (!i915_pipe_enabled(dev, pipe)) {
  200. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  201. "pipe %c\n", pipe_name(pipe));
  202. return 0;
  203. }
  204. return I915_READ(reg);
  205. }
  206. static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
  207. int *vpos, int *hpos)
  208. {
  209. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  210. u32 vbl = 0, position = 0;
  211. int vbl_start, vbl_end, htotal, vtotal;
  212. bool in_vbl = true;
  213. int ret = 0;
  214. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  215. pipe);
  216. if (!i915_pipe_enabled(dev, pipe)) {
  217. DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
  218. "pipe %c\n", pipe_name(pipe));
  219. return 0;
  220. }
  221. /* Get vtotal. */
  222. vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  223. if (INTEL_INFO(dev)->gen >= 4) {
  224. /* No obvious pixelcount register. Only query vertical
  225. * scanout position from Display scan line register.
  226. */
  227. position = I915_READ(PIPEDSL(pipe));
  228. /* Decode into vertical scanout position. Don't have
  229. * horizontal scanout position.
  230. */
  231. *vpos = position & 0x1fff;
  232. *hpos = 0;
  233. } else {
  234. /* Have access to pixelcount since start of frame.
  235. * We can split this into vertical and horizontal
  236. * scanout position.
  237. */
  238. position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
  239. htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  240. *vpos = position / htotal;
  241. *hpos = position - (*vpos * htotal);
  242. }
  243. /* Query vblank area. */
  244. vbl = I915_READ(VBLANK(cpu_transcoder));
  245. /* Test position against vblank region. */
  246. vbl_start = vbl & 0x1fff;
  247. vbl_end = (vbl >> 16) & 0x1fff;
  248. if ((*vpos < vbl_start) || (*vpos > vbl_end))
  249. in_vbl = false;
  250. /* Inside "upper part" of vblank area? Apply corrective offset: */
  251. if (in_vbl && (*vpos >= vbl_start))
  252. *vpos = *vpos - vtotal;
  253. /* Readouts valid? */
  254. if (vbl > 0)
  255. ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
  256. /* In vblank? */
  257. if (in_vbl)
  258. ret |= DRM_SCANOUTPOS_INVBL;
  259. return ret;
  260. }
  261. static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
  262. int *max_error,
  263. struct timeval *vblank_time,
  264. unsigned flags)
  265. {
  266. struct drm_crtc *crtc;
  267. if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
  268. DRM_ERROR("Invalid crtc %d\n", pipe);
  269. return -EINVAL;
  270. }
  271. /* Get drm_crtc to timestamp: */
  272. crtc = intel_get_crtc_for_pipe(dev, pipe);
  273. if (crtc == NULL) {
  274. DRM_ERROR("Invalid crtc %d\n", pipe);
  275. return -EINVAL;
  276. }
  277. if (!crtc->enabled) {
  278. DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
  279. return -EBUSY;
  280. }
  281. /* Helper routine in DRM core does all the work: */
  282. return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
  283. vblank_time, flags,
  284. crtc);
  285. }
  286. /*
  287. * Handle hotplug events outside the interrupt handler proper.
  288. */
  289. static void i915_hotplug_work_func(struct work_struct *work)
  290. {
  291. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  292. hotplug_work);
  293. struct drm_device *dev = dev_priv->dev;
  294. struct drm_mode_config *mode_config = &dev->mode_config;
  295. struct intel_connector *intel_connector;
  296. struct intel_encoder *intel_encoder;
  297. struct drm_connector *connector;
  298. unsigned long irqflags;
  299. bool hpd_disabled = false;
  300. /* HPD irq before everything is fully set up. */
  301. if (!dev_priv->enable_hotplug_processing)
  302. return;
  303. mutex_lock(&mode_config->mutex);
  304. DRM_DEBUG_KMS("running encoder hotplug functions\n");
  305. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  306. list_for_each_entry(connector, &mode_config->connector_list, head) {
  307. intel_connector = to_intel_connector(connector);
  308. intel_encoder = intel_connector->encoder;
  309. if (intel_encoder->hpd_pin > HPD_NONE &&
  310. dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
  311. connector->polled == DRM_CONNECTOR_POLL_HPD) {
  312. DRM_INFO("HPD interrupt storm detected on connector %s: "
  313. "switching from hotplug detection to polling\n",
  314. drm_get_connector_name(connector));
  315. dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
  316. connector->polled = DRM_CONNECTOR_POLL_CONNECT
  317. | DRM_CONNECTOR_POLL_DISCONNECT;
  318. hpd_disabled = true;
  319. }
  320. }
  321. /* if there were no outputs to poll, poll was disabled,
  322. * therefore make sure it's enabled when disabling HPD on
  323. * some connectors */
  324. if (hpd_disabled)
  325. drm_kms_helper_poll_enable(dev);
  326. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  327. list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  328. if (intel_encoder->hot_plug)
  329. intel_encoder->hot_plug(intel_encoder);
  330. mutex_unlock(&mode_config->mutex);
  331. /* Just fire off a uevent and let userspace tell us what to do */
  332. drm_helper_hpd_irq_event(dev);
  333. }
  334. static void ironlake_handle_rps_change(struct drm_device *dev)
  335. {
  336. drm_i915_private_t *dev_priv = dev->dev_private;
  337. u32 busy_up, busy_down, max_avg, min_avg;
  338. u8 new_delay;
  339. unsigned long flags;
  340. spin_lock_irqsave(&mchdev_lock, flags);
  341. I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  342. new_delay = dev_priv->ips.cur_delay;
  343. I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
  344. busy_up = I915_READ(RCPREVBSYTUPAVG);
  345. busy_down = I915_READ(RCPREVBSYTDNAVG);
  346. max_avg = I915_READ(RCBMAXAVG);
  347. min_avg = I915_READ(RCBMINAVG);
  348. /* Handle RCS change request from hw */
  349. if (busy_up > max_avg) {
  350. if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
  351. new_delay = dev_priv->ips.cur_delay - 1;
  352. if (new_delay < dev_priv->ips.max_delay)
  353. new_delay = dev_priv->ips.max_delay;
  354. } else if (busy_down < min_avg) {
  355. if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
  356. new_delay = dev_priv->ips.cur_delay + 1;
  357. if (new_delay > dev_priv->ips.min_delay)
  358. new_delay = dev_priv->ips.min_delay;
  359. }
  360. if (ironlake_set_drps(dev, new_delay))
  361. dev_priv->ips.cur_delay = new_delay;
  362. spin_unlock_irqrestore(&mchdev_lock, flags);
  363. return;
  364. }
  365. static void notify_ring(struct drm_device *dev,
  366. struct intel_ring_buffer *ring)
  367. {
  368. struct drm_i915_private *dev_priv = dev->dev_private;
  369. if (ring->obj == NULL)
  370. return;
  371. trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
  372. wake_up_all(&ring->irq_queue);
  373. if (i915_enable_hangcheck) {
  374. dev_priv->gpu_error.hangcheck_count = 0;
  375. mod_timer(&dev_priv->gpu_error.hangcheck_timer,
  376. round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  377. }
  378. }
  379. static void gen6_pm_rps_work(struct work_struct *work)
  380. {
  381. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  382. rps.work);
  383. u32 pm_iir, pm_imr;
  384. u8 new_delay;
  385. spin_lock_irq(&dev_priv->rps.lock);
  386. pm_iir = dev_priv->rps.pm_iir;
  387. dev_priv->rps.pm_iir = 0;
  388. pm_imr = I915_READ(GEN6_PMIMR);
  389. I915_WRITE(GEN6_PMIMR, 0);
  390. spin_unlock_irq(&dev_priv->rps.lock);
  391. if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
  392. return;
  393. mutex_lock(&dev_priv->rps.hw_lock);
  394. if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
  395. new_delay = dev_priv->rps.cur_delay + 1;
  396. else
  397. new_delay = dev_priv->rps.cur_delay - 1;
  398. /* sysfs frequency interfaces may have snuck in while servicing the
  399. * interrupt
  400. */
  401. if (!(new_delay > dev_priv->rps.max_delay ||
  402. new_delay < dev_priv->rps.min_delay)) {
  403. gen6_set_rps(dev_priv->dev, new_delay);
  404. }
  405. mutex_unlock(&dev_priv->rps.hw_lock);
  406. }
  407. /**
  408. * ivybridge_parity_work - Workqueue called when a parity error interrupt
  409. * occurred.
  410. * @work: workqueue struct
  411. *
  412. * Doesn't actually do anything except notify userspace. As a consequence of
  413. * this event, userspace should try to remap the bad rows since statistically
  414. * it is likely the same row is more likely to go bad again.
  415. */
  416. static void ivybridge_parity_work(struct work_struct *work)
  417. {
  418. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  419. l3_parity.error_work);
  420. u32 error_status, row, bank, subbank;
  421. char *parity_event[5];
  422. uint32_t misccpctl;
  423. unsigned long flags;
  424. /* We must turn off DOP level clock gating to access the L3 registers.
  425. * In order to prevent a get/put style interface, acquire struct mutex
  426. * any time we access those registers.
  427. */
  428. mutex_lock(&dev_priv->dev->struct_mutex);
  429. misccpctl = I915_READ(GEN7_MISCCPCTL);
  430. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  431. POSTING_READ(GEN7_MISCCPCTL);
  432. error_status = I915_READ(GEN7_L3CDERRST1);
  433. row = GEN7_PARITY_ERROR_ROW(error_status);
  434. bank = GEN7_PARITY_ERROR_BANK(error_status);
  435. subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
  436. I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
  437. GEN7_L3CDERRST1_ENABLE);
  438. POSTING_READ(GEN7_L3CDERRST1);
  439. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  440. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  441. dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  442. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  443. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  444. mutex_unlock(&dev_priv->dev->struct_mutex);
  445. parity_event[0] = "L3_PARITY_ERROR=1";
  446. parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
  447. parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
  448. parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
  449. parity_event[4] = NULL;
  450. kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
  451. KOBJ_CHANGE, parity_event);
  452. DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
  453. row, bank, subbank);
  454. kfree(parity_event[3]);
  455. kfree(parity_event[2]);
  456. kfree(parity_event[1]);
  457. }
  458. static void ivybridge_handle_parity_error(struct drm_device *dev)
  459. {
  460. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  461. unsigned long flags;
  462. if (!HAS_L3_GPU_CACHE(dev))
  463. return;
  464. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  465. dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  466. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  467. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  468. queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
  469. }
  470. static void snb_gt_irq_handler(struct drm_device *dev,
  471. struct drm_i915_private *dev_priv,
  472. u32 gt_iir)
  473. {
  474. if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
  475. GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
  476. notify_ring(dev, &dev_priv->ring[RCS]);
  477. if (gt_iir & GEN6_BSD_USER_INTERRUPT)
  478. notify_ring(dev, &dev_priv->ring[VCS]);
  479. if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
  480. notify_ring(dev, &dev_priv->ring[BCS]);
  481. if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
  482. GT_GEN6_BSD_CS_ERROR_INTERRUPT |
  483. GT_RENDER_CS_ERROR_INTERRUPT)) {
  484. DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
  485. i915_handle_error(dev, false);
  486. }
  487. if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
  488. ivybridge_handle_parity_error(dev);
  489. }
  490. static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
  491. u32 pm_iir)
  492. {
  493. unsigned long flags;
  494. /*
  495. * IIR bits should never already be set because IMR should
  496. * prevent an interrupt from being shown in IIR. The warning
  497. * displays a case where we've unsafely cleared
  498. * dev_priv->rps.pm_iir. Although missing an interrupt of the same
  499. * type is not a problem, it displays a problem in the logic.
  500. *
  501. * The mask bit in IMR is cleared by dev_priv->rps.work.
  502. */
  503. spin_lock_irqsave(&dev_priv->rps.lock, flags);
  504. dev_priv->rps.pm_iir |= pm_iir;
  505. I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
  506. POSTING_READ(GEN6_PMIMR);
  507. spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
  508. queue_work(dev_priv->wq, &dev_priv->rps.work);
  509. }
  510. #define HPD_STORM_DETECT_PERIOD 1000
  511. #define HPD_STORM_THRESHOLD 5
  512. static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
  513. u32 hotplug_trigger,
  514. const u32 *hpd)
  515. {
  516. drm_i915_private_t *dev_priv = dev->dev_private;
  517. unsigned long irqflags;
  518. int i;
  519. bool ret = false;
  520. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  521. for (i = 1; i < HPD_NUM_PINS; i++) {
  522. if (!(hpd[i] & hotplug_trigger) ||
  523. dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
  524. continue;
  525. if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
  526. dev_priv->hpd_stats[i].hpd_last_jiffies
  527. + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
  528. dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
  529. dev_priv->hpd_stats[i].hpd_cnt = 0;
  530. } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
  531. dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
  532. DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
  533. ret = true;
  534. } else {
  535. dev_priv->hpd_stats[i].hpd_cnt++;
  536. }
  537. }
  538. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  539. return ret;
  540. }
  541. static void gmbus_irq_handler(struct drm_device *dev)
  542. {
  543. struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
  544. wake_up_all(&dev_priv->gmbus_wait_queue);
  545. }
  546. static void dp_aux_irq_handler(struct drm_device *dev)
  547. {
  548. struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
  549. wake_up_all(&dev_priv->gmbus_wait_queue);
  550. }
  551. static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  552. {
  553. struct drm_device *dev = (struct drm_device *) arg;
  554. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  555. u32 iir, gt_iir, pm_iir;
  556. irqreturn_t ret = IRQ_NONE;
  557. unsigned long irqflags;
  558. int pipe;
  559. u32 pipe_stats[I915_MAX_PIPES];
  560. atomic_inc(&dev_priv->irq_received);
  561. while (true) {
  562. iir = I915_READ(VLV_IIR);
  563. gt_iir = I915_READ(GTIIR);
  564. pm_iir = I915_READ(GEN6_PMIIR);
  565. if (gt_iir == 0 && pm_iir == 0 && iir == 0)
  566. goto out;
  567. ret = IRQ_HANDLED;
  568. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  569. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  570. for_each_pipe(pipe) {
  571. int reg = PIPESTAT(pipe);
  572. pipe_stats[pipe] = I915_READ(reg);
  573. /*
  574. * Clear the PIPE*STAT regs before the IIR
  575. */
  576. if (pipe_stats[pipe] & 0x8000ffff) {
  577. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  578. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  579. pipe_name(pipe));
  580. I915_WRITE(reg, pipe_stats[pipe]);
  581. }
  582. }
  583. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  584. for_each_pipe(pipe) {
  585. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
  586. drm_handle_vblank(dev, pipe);
  587. if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
  588. intel_prepare_page_flip(dev, pipe);
  589. intel_finish_page_flip(dev, pipe);
  590. }
  591. }
  592. /* Consume port. Then clear IIR or we'll miss events */
  593. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  594. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  595. u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  596. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  597. hotplug_status);
  598. if (hotplug_trigger) {
  599. if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
  600. i915_hpd_irq_setup(dev);
  601. queue_work(dev_priv->wq,
  602. &dev_priv->hotplug_work);
  603. }
  604. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  605. I915_READ(PORT_HOTPLUG_STAT);
  606. }
  607. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  608. gmbus_irq_handler(dev);
  609. if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
  610. gen6_queue_rps_work(dev_priv, pm_iir);
  611. I915_WRITE(GTIIR, gt_iir);
  612. I915_WRITE(GEN6_PMIIR, pm_iir);
  613. I915_WRITE(VLV_IIR, iir);
  614. }
  615. out:
  616. return ret;
  617. }
  618. static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
  619. {
  620. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  621. int pipe;
  622. u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
  623. if (hotplug_trigger) {
  624. if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
  625. ibx_hpd_irq_setup(dev);
  626. queue_work(dev_priv->wq, &dev_priv->hotplug_work);
  627. }
  628. if (pch_iir & SDE_AUDIO_POWER_MASK)
  629. DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  630. (pch_iir & SDE_AUDIO_POWER_MASK) >>
  631. SDE_AUDIO_POWER_SHIFT);
  632. if (pch_iir & SDE_AUX_MASK)
  633. dp_aux_irq_handler(dev);
  634. if (pch_iir & SDE_GMBUS)
  635. gmbus_irq_handler(dev);
  636. if (pch_iir & SDE_AUDIO_HDCP_MASK)
  637. DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
  638. if (pch_iir & SDE_AUDIO_TRANS_MASK)
  639. DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
  640. if (pch_iir & SDE_POISON)
  641. DRM_ERROR("PCH poison interrupt\n");
  642. if (pch_iir & SDE_FDI_MASK)
  643. for_each_pipe(pipe)
  644. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  645. pipe_name(pipe),
  646. I915_READ(FDI_RX_IIR(pipe)));
  647. if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
  648. DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
  649. if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
  650. DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
  651. if (pch_iir & SDE_TRANSB_FIFO_UNDER)
  652. DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
  653. if (pch_iir & SDE_TRANSA_FIFO_UNDER)
  654. DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
  655. }
  656. static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
  657. {
  658. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  659. int pipe;
  660. u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
  661. if (hotplug_trigger) {
  662. if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
  663. ibx_hpd_irq_setup(dev);
  664. queue_work(dev_priv->wq, &dev_priv->hotplug_work);
  665. }
  666. if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
  667. DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  668. (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
  669. SDE_AUDIO_POWER_SHIFT_CPT);
  670. if (pch_iir & SDE_AUX_MASK_CPT)
  671. dp_aux_irq_handler(dev);
  672. if (pch_iir & SDE_GMBUS_CPT)
  673. gmbus_irq_handler(dev);
  674. if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
  675. DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
  676. if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
  677. DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
  678. if (pch_iir & SDE_FDI_MASK_CPT)
  679. for_each_pipe(pipe)
  680. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  681. pipe_name(pipe),
  682. I915_READ(FDI_RX_IIR(pipe)));
  683. }
  684. static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
  685. {
  686. struct drm_device *dev = (struct drm_device *) arg;
  687. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  688. u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
  689. irqreturn_t ret = IRQ_NONE;
  690. int i;
  691. atomic_inc(&dev_priv->irq_received);
  692. /* disable master interrupt before clearing iir */
  693. de_ier = I915_READ(DEIER);
  694. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  695. /* Disable south interrupts. We'll only write to SDEIIR once, so further
  696. * interrupts will will be stored on its back queue, and then we'll be
  697. * able to process them after we restore SDEIER (as soon as we restore
  698. * it, we'll get an interrupt if SDEIIR still has something to process
  699. * due to its back queue). */
  700. if (!HAS_PCH_NOP(dev)) {
  701. sde_ier = I915_READ(SDEIER);
  702. I915_WRITE(SDEIER, 0);
  703. POSTING_READ(SDEIER);
  704. }
  705. gt_iir = I915_READ(GTIIR);
  706. if (gt_iir) {
  707. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  708. I915_WRITE(GTIIR, gt_iir);
  709. ret = IRQ_HANDLED;
  710. }
  711. de_iir = I915_READ(DEIIR);
  712. if (de_iir) {
  713. if (de_iir & DE_AUX_CHANNEL_A_IVB)
  714. dp_aux_irq_handler(dev);
  715. if (de_iir & DE_GSE_IVB)
  716. intel_opregion_gse_intr(dev);
  717. for (i = 0; i < 3; i++) {
  718. if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
  719. drm_handle_vblank(dev, i);
  720. if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
  721. intel_prepare_page_flip(dev, i);
  722. intel_finish_page_flip_plane(dev, i);
  723. }
  724. }
  725. /* check event from PCH */
  726. if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
  727. u32 pch_iir = I915_READ(SDEIIR);
  728. cpt_irq_handler(dev, pch_iir);
  729. /* clear PCH hotplug event before clear CPU irq */
  730. I915_WRITE(SDEIIR, pch_iir);
  731. }
  732. I915_WRITE(DEIIR, de_iir);
  733. ret = IRQ_HANDLED;
  734. }
  735. pm_iir = I915_READ(GEN6_PMIIR);
  736. if (pm_iir) {
  737. if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
  738. gen6_queue_rps_work(dev_priv, pm_iir);
  739. I915_WRITE(GEN6_PMIIR, pm_iir);
  740. ret = IRQ_HANDLED;
  741. }
  742. I915_WRITE(DEIER, de_ier);
  743. POSTING_READ(DEIER);
  744. if (!HAS_PCH_NOP(dev)) {
  745. I915_WRITE(SDEIER, sde_ier);
  746. POSTING_READ(SDEIER);
  747. }
  748. return ret;
  749. }
  750. static void ilk_gt_irq_handler(struct drm_device *dev,
  751. struct drm_i915_private *dev_priv,
  752. u32 gt_iir)
  753. {
  754. if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
  755. notify_ring(dev, &dev_priv->ring[RCS]);
  756. if (gt_iir & GT_BSD_USER_INTERRUPT)
  757. notify_ring(dev, &dev_priv->ring[VCS]);
  758. }
  759. static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  760. {
  761. struct drm_device *dev = (struct drm_device *) arg;
  762. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  763. int ret = IRQ_NONE;
  764. u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
  765. atomic_inc(&dev_priv->irq_received);
  766. /* disable master interrupt before clearing iir */
  767. de_ier = I915_READ(DEIER);
  768. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  769. POSTING_READ(DEIER);
  770. /* Disable south interrupts. We'll only write to SDEIIR once, so further
  771. * interrupts will will be stored on its back queue, and then we'll be
  772. * able to process them after we restore SDEIER (as soon as we restore
  773. * it, we'll get an interrupt if SDEIIR still has something to process
  774. * due to its back queue). */
  775. sde_ier = I915_READ(SDEIER);
  776. I915_WRITE(SDEIER, 0);
  777. POSTING_READ(SDEIER);
  778. de_iir = I915_READ(DEIIR);
  779. gt_iir = I915_READ(GTIIR);
  780. pm_iir = I915_READ(GEN6_PMIIR);
  781. if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
  782. goto done;
  783. ret = IRQ_HANDLED;
  784. if (IS_GEN5(dev))
  785. ilk_gt_irq_handler(dev, dev_priv, gt_iir);
  786. else
  787. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  788. if (de_iir & DE_AUX_CHANNEL_A)
  789. dp_aux_irq_handler(dev);
  790. if (de_iir & DE_GSE)
  791. intel_opregion_gse_intr(dev);
  792. if (de_iir & DE_PIPEA_VBLANK)
  793. drm_handle_vblank(dev, 0);
  794. if (de_iir & DE_PIPEB_VBLANK)
  795. drm_handle_vblank(dev, 1);
  796. if (de_iir & DE_PLANEA_FLIP_DONE) {
  797. intel_prepare_page_flip(dev, 0);
  798. intel_finish_page_flip_plane(dev, 0);
  799. }
  800. if (de_iir & DE_PLANEB_FLIP_DONE) {
  801. intel_prepare_page_flip(dev, 1);
  802. intel_finish_page_flip_plane(dev, 1);
  803. }
  804. /* check event from PCH */
  805. if (de_iir & DE_PCH_EVENT) {
  806. u32 pch_iir = I915_READ(SDEIIR);
  807. if (HAS_PCH_CPT(dev))
  808. cpt_irq_handler(dev, pch_iir);
  809. else
  810. ibx_irq_handler(dev, pch_iir);
  811. /* should clear PCH hotplug event before clear CPU irq */
  812. I915_WRITE(SDEIIR, pch_iir);
  813. }
  814. if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
  815. ironlake_handle_rps_change(dev);
  816. if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
  817. gen6_queue_rps_work(dev_priv, pm_iir);
  818. I915_WRITE(GTIIR, gt_iir);
  819. I915_WRITE(DEIIR, de_iir);
  820. I915_WRITE(GEN6_PMIIR, pm_iir);
  821. done:
  822. I915_WRITE(DEIER, de_ier);
  823. POSTING_READ(DEIER);
  824. I915_WRITE(SDEIER, sde_ier);
  825. POSTING_READ(SDEIER);
  826. return ret;
  827. }
  828. /**
  829. * i915_error_work_func - do process context error handling work
  830. * @work: work struct
  831. *
  832. * Fire an error uevent so userspace can see that a hang or error
  833. * was detected.
  834. */
  835. static void i915_error_work_func(struct work_struct *work)
  836. {
  837. struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
  838. work);
  839. drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
  840. gpu_error);
  841. struct drm_device *dev = dev_priv->dev;
  842. struct intel_ring_buffer *ring;
  843. char *error_event[] = { "ERROR=1", NULL };
  844. char *reset_event[] = { "RESET=1", NULL };
  845. char *reset_done_event[] = { "ERROR=0", NULL };
  846. int i, ret;
  847. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
  848. /*
  849. * Note that there's only one work item which does gpu resets, so we
  850. * need not worry about concurrent gpu resets potentially incrementing
  851. * error->reset_counter twice. We only need to take care of another
  852. * racing irq/hangcheck declaring the gpu dead for a second time. A
  853. * quick check for that is good enough: schedule_work ensures the
  854. * correct ordering between hang detection and this work item, and since
  855. * the reset in-progress bit is only ever set by code outside of this
  856. * work we don't need to worry about any other races.
  857. */
  858. if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
  859. DRM_DEBUG_DRIVER("resetting chip\n");
  860. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
  861. reset_event);
  862. ret = i915_reset(dev);
  863. if (ret == 0) {
  864. /*
  865. * After all the gem state is reset, increment the reset
  866. * counter and wake up everyone waiting for the reset to
  867. * complete.
  868. *
  869. * Since unlock operations are a one-sided barrier only,
  870. * we need to insert a barrier here to order any seqno
  871. * updates before
  872. * the counter increment.
  873. */
  874. smp_mb__before_atomic_inc();
  875. atomic_inc(&dev_priv->gpu_error.reset_counter);
  876. kobject_uevent_env(&dev->primary->kdev.kobj,
  877. KOBJ_CHANGE, reset_done_event);
  878. } else {
  879. atomic_set(&error->reset_counter, I915_WEDGED);
  880. }
  881. for_each_ring(ring, dev_priv, i)
  882. wake_up_all(&ring->irq_queue);
  883. intel_display_handle_reset(dev);
  884. wake_up_all(&dev_priv->gpu_error.reset_queue);
  885. }
  886. }
  887. /* NB: please notice the memset */
  888. static void i915_get_extra_instdone(struct drm_device *dev,
  889. uint32_t *instdone)
  890. {
  891. struct drm_i915_private *dev_priv = dev->dev_private;
  892. memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
  893. switch(INTEL_INFO(dev)->gen) {
  894. case 2:
  895. case 3:
  896. instdone[0] = I915_READ(INSTDONE);
  897. break;
  898. case 4:
  899. case 5:
  900. case 6:
  901. instdone[0] = I915_READ(INSTDONE_I965);
  902. instdone[1] = I915_READ(INSTDONE1);
  903. break;
  904. default:
  905. WARN_ONCE(1, "Unsupported platform\n");
  906. case 7:
  907. instdone[0] = I915_READ(GEN7_INSTDONE_1);
  908. instdone[1] = I915_READ(GEN7_SC_INSTDONE);
  909. instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
  910. instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
  911. break;
  912. }
  913. }
  914. #ifdef CONFIG_DEBUG_FS
  915. static struct drm_i915_error_object *
  916. i915_error_object_create_sized(struct drm_i915_private *dev_priv,
  917. struct drm_i915_gem_object *src,
  918. const int num_pages)
  919. {
  920. struct drm_i915_error_object *dst;
  921. int i;
  922. u32 reloc_offset;
  923. if (src == NULL || src->pages == NULL)
  924. return NULL;
  925. dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
  926. if (dst == NULL)
  927. return NULL;
  928. reloc_offset = src->gtt_offset;
  929. for (i = 0; i < num_pages; i++) {
  930. unsigned long flags;
  931. void *d;
  932. d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
  933. if (d == NULL)
  934. goto unwind;
  935. local_irq_save(flags);
  936. if (reloc_offset < dev_priv->gtt.mappable_end &&
  937. src->has_global_gtt_mapping) {
  938. void __iomem *s;
  939. /* Simply ignore tiling or any overlapping fence.
  940. * It's part of the error state, and this hopefully
  941. * captures what the GPU read.
  942. */
  943. s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
  944. reloc_offset);
  945. memcpy_fromio(d, s, PAGE_SIZE);
  946. io_mapping_unmap_atomic(s);
  947. } else if (src->stolen) {
  948. unsigned long offset;
  949. offset = dev_priv->mm.stolen_base;
  950. offset += src->stolen->start;
  951. offset += i << PAGE_SHIFT;
  952. memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
  953. } else {
  954. struct page *page;
  955. void *s;
  956. page = i915_gem_object_get_page(src, i);
  957. drm_clflush_pages(&page, 1);
  958. s = kmap_atomic(page);
  959. memcpy(d, s, PAGE_SIZE);
  960. kunmap_atomic(s);
  961. drm_clflush_pages(&page, 1);
  962. }
  963. local_irq_restore(flags);
  964. dst->pages[i] = d;
  965. reloc_offset += PAGE_SIZE;
  966. }
  967. dst->page_count = num_pages;
  968. dst->gtt_offset = src->gtt_offset;
  969. return dst;
  970. unwind:
  971. while (i--)
  972. kfree(dst->pages[i]);
  973. kfree(dst);
  974. return NULL;
  975. }
  976. #define i915_error_object_create(dev_priv, src) \
  977. i915_error_object_create_sized((dev_priv), (src), \
  978. (src)->base.size>>PAGE_SHIFT)
  979. static void
  980. i915_error_object_free(struct drm_i915_error_object *obj)
  981. {
  982. int page;
  983. if (obj == NULL)
  984. return;
  985. for (page = 0; page < obj->page_count; page++)
  986. kfree(obj->pages[page]);
  987. kfree(obj);
  988. }
  989. void
  990. i915_error_state_free(struct kref *error_ref)
  991. {
  992. struct drm_i915_error_state *error = container_of(error_ref,
  993. typeof(*error), ref);
  994. int i;
  995. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  996. i915_error_object_free(error->ring[i].batchbuffer);
  997. i915_error_object_free(error->ring[i].ringbuffer);
  998. kfree(error->ring[i].requests);
  999. }
  1000. kfree(error->active_bo);
  1001. kfree(error->overlay);
  1002. kfree(error);
  1003. }
  1004. static void capture_bo(struct drm_i915_error_buffer *err,
  1005. struct drm_i915_gem_object *obj)
  1006. {
  1007. err->size = obj->base.size;
  1008. err->name = obj->base.name;
  1009. err->rseqno = obj->last_read_seqno;
  1010. err->wseqno = obj->last_write_seqno;
  1011. err->gtt_offset = obj->gtt_offset;
  1012. err->read_domains = obj->base.read_domains;
  1013. err->write_domain = obj->base.write_domain;
  1014. err->fence_reg = obj->fence_reg;
  1015. err->pinned = 0;
  1016. if (obj->pin_count > 0)
  1017. err->pinned = 1;
  1018. if (obj->user_pin_count > 0)
  1019. err->pinned = -1;
  1020. err->tiling = obj->tiling_mode;
  1021. err->dirty = obj->dirty;
  1022. err->purgeable = obj->madv != I915_MADV_WILLNEED;
  1023. err->ring = obj->ring ? obj->ring->id : -1;
  1024. err->cache_level = obj->cache_level;
  1025. }
  1026. static u32 capture_active_bo(struct drm_i915_error_buffer *err,
  1027. int count, struct list_head *head)
  1028. {
  1029. struct drm_i915_gem_object *obj;
  1030. int i = 0;
  1031. list_for_each_entry(obj, head, mm_list) {
  1032. capture_bo(err++, obj);
  1033. if (++i == count)
  1034. break;
  1035. }
  1036. return i;
  1037. }
  1038. static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
  1039. int count, struct list_head *head)
  1040. {
  1041. struct drm_i915_gem_object *obj;
  1042. int i = 0;
  1043. list_for_each_entry(obj, head, gtt_list) {
  1044. if (obj->pin_count == 0)
  1045. continue;
  1046. capture_bo(err++, obj);
  1047. if (++i == count)
  1048. break;
  1049. }
  1050. return i;
  1051. }
  1052. static void i915_gem_record_fences(struct drm_device *dev,
  1053. struct drm_i915_error_state *error)
  1054. {
  1055. struct drm_i915_private *dev_priv = dev->dev_private;
  1056. int i;
  1057. /* Fences */
  1058. switch (INTEL_INFO(dev)->gen) {
  1059. case 7:
  1060. case 6:
  1061. for (i = 0; i < dev_priv->num_fence_regs; i++)
  1062. error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
  1063. break;
  1064. case 5:
  1065. case 4:
  1066. for (i = 0; i < 16; i++)
  1067. error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
  1068. break;
  1069. case 3:
  1070. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  1071. for (i = 0; i < 8; i++)
  1072. error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
  1073. case 2:
  1074. for (i = 0; i < 8; i++)
  1075. error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
  1076. break;
  1077. default:
  1078. BUG();
  1079. }
  1080. }
  1081. static struct drm_i915_error_object *
  1082. i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
  1083. struct intel_ring_buffer *ring)
  1084. {
  1085. struct drm_i915_gem_object *obj;
  1086. u32 seqno;
  1087. if (!ring->get_seqno)
  1088. return NULL;
  1089. if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
  1090. u32 acthd = I915_READ(ACTHD);
  1091. if (WARN_ON(ring->id != RCS))
  1092. return NULL;
  1093. obj = ring->private;
  1094. if (acthd >= obj->gtt_offset &&
  1095. acthd < obj->gtt_offset + obj->base.size)
  1096. return i915_error_object_create(dev_priv, obj);
  1097. }
  1098. seqno = ring->get_seqno(ring, false);
  1099. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
  1100. if (obj->ring != ring)
  1101. continue;
  1102. if (i915_seqno_passed(seqno, obj->last_read_seqno))
  1103. continue;
  1104. if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
  1105. continue;
  1106. /* We need to copy these to an anonymous buffer as the simplest
  1107. * method to avoid being overwritten by userspace.
  1108. */
  1109. return i915_error_object_create(dev_priv, obj);
  1110. }
  1111. return NULL;
  1112. }
  1113. static void i915_record_ring_state(struct drm_device *dev,
  1114. struct drm_i915_error_state *error,
  1115. struct intel_ring_buffer *ring)
  1116. {
  1117. struct drm_i915_private *dev_priv = dev->dev_private;
  1118. if (INTEL_INFO(dev)->gen >= 6) {
  1119. error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
  1120. error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
  1121. error->semaphore_mboxes[ring->id][0]
  1122. = I915_READ(RING_SYNC_0(ring->mmio_base));
  1123. error->semaphore_mboxes[ring->id][1]
  1124. = I915_READ(RING_SYNC_1(ring->mmio_base));
  1125. error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
  1126. error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
  1127. }
  1128. if (INTEL_INFO(dev)->gen >= 4) {
  1129. error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
  1130. error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
  1131. error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
  1132. error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
  1133. error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
  1134. if (ring->id == RCS)
  1135. error->bbaddr = I915_READ64(BB_ADDR);
  1136. } else {
  1137. error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
  1138. error->ipeir[ring->id] = I915_READ(IPEIR);
  1139. error->ipehr[ring->id] = I915_READ(IPEHR);
  1140. error->instdone[ring->id] = I915_READ(INSTDONE);
  1141. }
  1142. error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
  1143. error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
  1144. error->seqno[ring->id] = ring->get_seqno(ring, false);
  1145. error->acthd[ring->id] = intel_ring_get_active_head(ring);
  1146. error->head[ring->id] = I915_READ_HEAD(ring);
  1147. error->tail[ring->id] = I915_READ_TAIL(ring);
  1148. error->ctl[ring->id] = I915_READ_CTL(ring);
  1149. error->cpu_ring_head[ring->id] = ring->head;
  1150. error->cpu_ring_tail[ring->id] = ring->tail;
  1151. }
  1152. static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
  1153. struct drm_i915_error_state *error,
  1154. struct drm_i915_error_ring *ering)
  1155. {
  1156. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1157. struct drm_i915_gem_object *obj;
  1158. /* Currently render ring is the only HW context user */
  1159. if (ring->id != RCS || !error->ccid)
  1160. return;
  1161. list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
  1162. if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
  1163. ering->ctx = i915_error_object_create_sized(dev_priv,
  1164. obj, 1);
  1165. }
  1166. }
  1167. }
  1168. static void i915_gem_record_rings(struct drm_device *dev,
  1169. struct drm_i915_error_state *error)
  1170. {
  1171. struct drm_i915_private *dev_priv = dev->dev_private;
  1172. struct intel_ring_buffer *ring;
  1173. struct drm_i915_gem_request *request;
  1174. int i, count;
  1175. for_each_ring(ring, dev_priv, i) {
  1176. i915_record_ring_state(dev, error, ring);
  1177. error->ring[i].batchbuffer =
  1178. i915_error_first_batchbuffer(dev_priv, ring);
  1179. error->ring[i].ringbuffer =
  1180. i915_error_object_create(dev_priv, ring->obj);
  1181. i915_gem_record_active_context(ring, error, &error->ring[i]);
  1182. count = 0;
  1183. list_for_each_entry(request, &ring->request_list, list)
  1184. count++;
  1185. error->ring[i].num_requests = count;
  1186. error->ring[i].requests =
  1187. kmalloc(count*sizeof(struct drm_i915_error_request),
  1188. GFP_ATOMIC);
  1189. if (error->ring[i].requests == NULL) {
  1190. error->ring[i].num_requests = 0;
  1191. continue;
  1192. }
  1193. count = 0;
  1194. list_for_each_entry(request, &ring->request_list, list) {
  1195. struct drm_i915_error_request *erq;
  1196. erq = &error->ring[i].requests[count++];
  1197. erq->seqno = request->seqno;
  1198. erq->jiffies = request->emitted_jiffies;
  1199. erq->tail = request->tail;
  1200. }
  1201. }
  1202. }
  1203. /**
  1204. * i915_capture_error_state - capture an error record for later analysis
  1205. * @dev: drm device
  1206. *
  1207. * Should be called when an error is detected (either a hang or an error
  1208. * interrupt) to capture error state from the time of the error. Fills
  1209. * out a structure which becomes available in debugfs for user level tools
  1210. * to pick up.
  1211. */
  1212. static void i915_capture_error_state(struct drm_device *dev)
  1213. {
  1214. struct drm_i915_private *dev_priv = dev->dev_private;
  1215. struct drm_i915_gem_object *obj;
  1216. struct drm_i915_error_state *error;
  1217. unsigned long flags;
  1218. int i, pipe;
  1219. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1220. error = dev_priv->gpu_error.first_error;
  1221. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1222. if (error)
  1223. return;
  1224. /* Account for pipe specific data like PIPE*STAT */
  1225. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  1226. if (!error) {
  1227. DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
  1228. return;
  1229. }
  1230. DRM_INFO("capturing error event; look for more information in "
  1231. "/sys/kernel/debug/dri/%d/i915_error_state\n",
  1232. dev->primary->index);
  1233. kref_init(&error->ref);
  1234. error->eir = I915_READ(EIR);
  1235. error->pgtbl_er = I915_READ(PGTBL_ER);
  1236. if (HAS_HW_CONTEXTS(dev))
  1237. error->ccid = I915_READ(CCID);
  1238. if (HAS_PCH_SPLIT(dev))
  1239. error->ier = I915_READ(DEIER) | I915_READ(GTIER);
  1240. else if (IS_VALLEYVIEW(dev))
  1241. error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
  1242. else if (IS_GEN2(dev))
  1243. error->ier = I915_READ16(IER);
  1244. else
  1245. error->ier = I915_READ(IER);
  1246. if (INTEL_INFO(dev)->gen >= 6)
  1247. error->derrmr = I915_READ(DERRMR);
  1248. if (IS_VALLEYVIEW(dev))
  1249. error->forcewake = I915_READ(FORCEWAKE_VLV);
  1250. else if (INTEL_INFO(dev)->gen >= 7)
  1251. error->forcewake = I915_READ(FORCEWAKE_MT);
  1252. else if (INTEL_INFO(dev)->gen == 6)
  1253. error->forcewake = I915_READ(FORCEWAKE);
  1254. if (!HAS_PCH_SPLIT(dev))
  1255. for_each_pipe(pipe)
  1256. error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
  1257. if (INTEL_INFO(dev)->gen >= 6) {
  1258. error->error = I915_READ(ERROR_GEN6);
  1259. error->done_reg = I915_READ(DONE_REG);
  1260. }
  1261. if (INTEL_INFO(dev)->gen == 7)
  1262. error->err_int = I915_READ(GEN7_ERR_INT);
  1263. i915_get_extra_instdone(dev, error->extra_instdone);
  1264. i915_gem_record_fences(dev, error);
  1265. i915_gem_record_rings(dev, error);
  1266. /* Record buffers on the active and pinned lists. */
  1267. error->active_bo = NULL;
  1268. error->pinned_bo = NULL;
  1269. i = 0;
  1270. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
  1271. i++;
  1272. error->active_bo_count = i;
  1273. list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
  1274. if (obj->pin_count)
  1275. i++;
  1276. error->pinned_bo_count = i - error->active_bo_count;
  1277. error->active_bo = NULL;
  1278. error->pinned_bo = NULL;
  1279. if (i) {
  1280. error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
  1281. GFP_ATOMIC);
  1282. if (error->active_bo)
  1283. error->pinned_bo =
  1284. error->active_bo + error->active_bo_count;
  1285. }
  1286. if (error->active_bo)
  1287. error->active_bo_count =
  1288. capture_active_bo(error->active_bo,
  1289. error->active_bo_count,
  1290. &dev_priv->mm.active_list);
  1291. if (error->pinned_bo)
  1292. error->pinned_bo_count =
  1293. capture_pinned_bo(error->pinned_bo,
  1294. error->pinned_bo_count,
  1295. &dev_priv->mm.bound_list);
  1296. do_gettimeofday(&error->time);
  1297. error->overlay = intel_overlay_capture_error_state(dev);
  1298. error->display = intel_display_capture_error_state(dev);
  1299. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1300. if (dev_priv->gpu_error.first_error == NULL) {
  1301. dev_priv->gpu_error.first_error = error;
  1302. error = NULL;
  1303. }
  1304. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1305. if (error)
  1306. i915_error_state_free(&error->ref);
  1307. }
  1308. void i915_destroy_error_state(struct drm_device *dev)
  1309. {
  1310. struct drm_i915_private *dev_priv = dev->dev_private;
  1311. struct drm_i915_error_state *error;
  1312. unsigned long flags;
  1313. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1314. error = dev_priv->gpu_error.first_error;
  1315. dev_priv->gpu_error.first_error = NULL;
  1316. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1317. if (error)
  1318. kref_put(&error->ref, i915_error_state_free);
  1319. }
  1320. #else
  1321. #define i915_capture_error_state(x)
  1322. #endif
  1323. static void i915_report_and_clear_eir(struct drm_device *dev)
  1324. {
  1325. struct drm_i915_private *dev_priv = dev->dev_private;
  1326. uint32_t instdone[I915_NUM_INSTDONE_REG];
  1327. u32 eir = I915_READ(EIR);
  1328. int pipe, i;
  1329. if (!eir)
  1330. return;
  1331. pr_err("render error detected, EIR: 0x%08x\n", eir);
  1332. i915_get_extra_instdone(dev, instdone);
  1333. if (IS_G4X(dev)) {
  1334. if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
  1335. u32 ipeir = I915_READ(IPEIR_I965);
  1336. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1337. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1338. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1339. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1340. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1341. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1342. I915_WRITE(IPEIR_I965, ipeir);
  1343. POSTING_READ(IPEIR_I965);
  1344. }
  1345. if (eir & GM45_ERROR_PAGE_TABLE) {
  1346. u32 pgtbl_err = I915_READ(PGTBL_ER);
  1347. pr_err("page table error\n");
  1348. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  1349. I915_WRITE(PGTBL_ER, pgtbl_err);
  1350. POSTING_READ(PGTBL_ER);
  1351. }
  1352. }
  1353. if (!IS_GEN2(dev)) {
  1354. if (eir & I915_ERROR_PAGE_TABLE) {
  1355. u32 pgtbl_err = I915_READ(PGTBL_ER);
  1356. pr_err("page table error\n");
  1357. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  1358. I915_WRITE(PGTBL_ER, pgtbl_err);
  1359. POSTING_READ(PGTBL_ER);
  1360. }
  1361. }
  1362. if (eir & I915_ERROR_MEMORY_REFRESH) {
  1363. pr_err("memory refresh error:\n");
  1364. for_each_pipe(pipe)
  1365. pr_err("pipe %c stat: 0x%08x\n",
  1366. pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
  1367. /* pipestat has already been acked */
  1368. }
  1369. if (eir & I915_ERROR_INSTRUCTION) {
  1370. pr_err("instruction error\n");
  1371. pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
  1372. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1373. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1374. if (INTEL_INFO(dev)->gen < 4) {
  1375. u32 ipeir = I915_READ(IPEIR);
  1376. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
  1377. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
  1378. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
  1379. I915_WRITE(IPEIR, ipeir);
  1380. POSTING_READ(IPEIR);
  1381. } else {
  1382. u32 ipeir = I915_READ(IPEIR_I965);
  1383. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1384. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1385. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1386. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1387. I915_WRITE(IPEIR_I965, ipeir);
  1388. POSTING_READ(IPEIR_I965);
  1389. }
  1390. }
  1391. I915_WRITE(EIR, eir);
  1392. POSTING_READ(EIR);
  1393. eir = I915_READ(EIR);
  1394. if (eir) {
  1395. /*
  1396. * some errors might have become stuck,
  1397. * mask them.
  1398. */
  1399. DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
  1400. I915_WRITE(EMR, I915_READ(EMR) | eir);
  1401. I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1402. }
  1403. }
  1404. /**
  1405. * i915_handle_error - handle an error interrupt
  1406. * @dev: drm device
  1407. *
  1408. * Do some basic checking of regsiter state at error interrupt time and
  1409. * dump it to the syslog. Also call i915_capture_error_state() to make
  1410. * sure we get a record and make it available in debugfs. Fire a uevent
  1411. * so userspace knows something bad happened (should trigger collection
  1412. * of a ring dump etc.).
  1413. */
  1414. void i915_handle_error(struct drm_device *dev, bool wedged)
  1415. {
  1416. struct drm_i915_private *dev_priv = dev->dev_private;
  1417. struct intel_ring_buffer *ring;
  1418. int i;
  1419. i915_capture_error_state(dev);
  1420. i915_report_and_clear_eir(dev);
  1421. if (wedged) {
  1422. atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
  1423. &dev_priv->gpu_error.reset_counter);
  1424. /*
  1425. * Wakeup waiting processes so that the reset work item
  1426. * doesn't deadlock trying to grab various locks.
  1427. */
  1428. for_each_ring(ring, dev_priv, i)
  1429. wake_up_all(&ring->irq_queue);
  1430. }
  1431. queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
  1432. }
  1433. static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
  1434. {
  1435. drm_i915_private_t *dev_priv = dev->dev_private;
  1436. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  1437. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1438. struct drm_i915_gem_object *obj;
  1439. struct intel_unpin_work *work;
  1440. unsigned long flags;
  1441. bool stall_detected;
  1442. /* Ignore early vblank irqs */
  1443. if (intel_crtc == NULL)
  1444. return;
  1445. spin_lock_irqsave(&dev->event_lock, flags);
  1446. work = intel_crtc->unpin_work;
  1447. if (work == NULL ||
  1448. atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
  1449. !work->enable_stall_check) {
  1450. /* Either the pending flip IRQ arrived, or we're too early. Don't check */
  1451. spin_unlock_irqrestore(&dev->event_lock, flags);
  1452. return;
  1453. }
  1454. /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
  1455. obj = work->pending_flip_obj;
  1456. if (INTEL_INFO(dev)->gen >= 4) {
  1457. int dspsurf = DSPSURF(intel_crtc->plane);
  1458. stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
  1459. obj->gtt_offset;
  1460. } else {
  1461. int dspaddr = DSPADDR(intel_crtc->plane);
  1462. stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
  1463. crtc->y * crtc->fb->pitches[0] +
  1464. crtc->x * crtc->fb->bits_per_pixel/8);
  1465. }
  1466. spin_unlock_irqrestore(&dev->event_lock, flags);
  1467. if (stall_detected) {
  1468. DRM_DEBUG_DRIVER("Pageflip stall detected\n");
  1469. intel_prepare_page_flip(dev, intel_crtc->plane);
  1470. }
  1471. }
  1472. /* Called from drm generic code, passed 'crtc' which
  1473. * we use as a pipe index
  1474. */
  1475. static int i915_enable_vblank(struct drm_device *dev, int pipe)
  1476. {
  1477. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1478. unsigned long irqflags;
  1479. if (!i915_pipe_enabled(dev, pipe))
  1480. return -EINVAL;
  1481. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1482. if (INTEL_INFO(dev)->gen >= 4)
  1483. i915_enable_pipestat(dev_priv, pipe,
  1484. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1485. else
  1486. i915_enable_pipestat(dev_priv, pipe,
  1487. PIPE_VBLANK_INTERRUPT_ENABLE);
  1488. /* maintain vblank delivery even in deep C-states */
  1489. if (dev_priv->info->gen == 3)
  1490. I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
  1491. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1492. return 0;
  1493. }
  1494. static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
  1495. {
  1496. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1497. unsigned long irqflags;
  1498. if (!i915_pipe_enabled(dev, pipe))
  1499. return -EINVAL;
  1500. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1501. ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
  1502. DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
  1503. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1504. return 0;
  1505. }
  1506. static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
  1507. {
  1508. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1509. unsigned long irqflags;
  1510. if (!i915_pipe_enabled(dev, pipe))
  1511. return -EINVAL;
  1512. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1513. ironlake_enable_display_irq(dev_priv,
  1514. DE_PIPEA_VBLANK_IVB << (5 * pipe));
  1515. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1516. return 0;
  1517. }
  1518. static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
  1519. {
  1520. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1521. unsigned long irqflags;
  1522. u32 imr;
  1523. if (!i915_pipe_enabled(dev, pipe))
  1524. return -EINVAL;
  1525. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1526. imr = I915_READ(VLV_IMR);
  1527. if (pipe == 0)
  1528. imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1529. else
  1530. imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1531. I915_WRITE(VLV_IMR, imr);
  1532. i915_enable_pipestat(dev_priv, pipe,
  1533. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1534. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1535. return 0;
  1536. }
  1537. /* Called from drm generic code, passed 'crtc' which
  1538. * we use as a pipe index
  1539. */
  1540. static void i915_disable_vblank(struct drm_device *dev, int pipe)
  1541. {
  1542. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1543. unsigned long irqflags;
  1544. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1545. if (dev_priv->info->gen == 3)
  1546. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
  1547. i915_disable_pipestat(dev_priv, pipe,
  1548. PIPE_VBLANK_INTERRUPT_ENABLE |
  1549. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1550. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1551. }
  1552. static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
  1553. {
  1554. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1555. unsigned long irqflags;
  1556. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1557. ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
  1558. DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
  1559. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1560. }
  1561. static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
  1562. {
  1563. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1564. unsigned long irqflags;
  1565. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1566. ironlake_disable_display_irq(dev_priv,
  1567. DE_PIPEA_VBLANK_IVB << (pipe * 5));
  1568. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1569. }
  1570. static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
  1571. {
  1572. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1573. unsigned long irqflags;
  1574. u32 imr;
  1575. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1576. i915_disable_pipestat(dev_priv, pipe,
  1577. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1578. imr = I915_READ(VLV_IMR);
  1579. if (pipe == 0)
  1580. imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1581. else
  1582. imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1583. I915_WRITE(VLV_IMR, imr);
  1584. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1585. }
  1586. static u32
  1587. ring_last_seqno(struct intel_ring_buffer *ring)
  1588. {
  1589. return list_entry(ring->request_list.prev,
  1590. struct drm_i915_gem_request, list)->seqno;
  1591. }
  1592. static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
  1593. {
  1594. if (list_empty(&ring->request_list) ||
  1595. i915_seqno_passed(ring->get_seqno(ring, false),
  1596. ring_last_seqno(ring))) {
  1597. /* Issue a wake-up to catch stuck h/w. */
  1598. if (waitqueue_active(&ring->irq_queue)) {
  1599. DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
  1600. ring->name);
  1601. wake_up_all(&ring->irq_queue);
  1602. *err = true;
  1603. }
  1604. return true;
  1605. }
  1606. return false;
  1607. }
  1608. static bool semaphore_passed(struct intel_ring_buffer *ring)
  1609. {
  1610. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1611. u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
  1612. struct intel_ring_buffer *signaller;
  1613. u32 cmd, ipehr, acthd_min;
  1614. ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
  1615. if ((ipehr & ~(0x3 << 16)) !=
  1616. (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
  1617. return false;
  1618. /* ACTHD is likely pointing to the dword after the actual command,
  1619. * so scan backwards until we find the MBOX.
  1620. */
  1621. acthd_min = max((int)acthd - 3 * 4, 0);
  1622. do {
  1623. cmd = ioread32(ring->virtual_start + acthd);
  1624. if (cmd == ipehr)
  1625. break;
  1626. acthd -= 4;
  1627. if (acthd < acthd_min)
  1628. return false;
  1629. } while (1);
  1630. signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
  1631. return i915_seqno_passed(signaller->get_seqno(signaller, false),
  1632. ioread32(ring->virtual_start+acthd+4)+1);
  1633. }
  1634. static bool kick_ring(struct intel_ring_buffer *ring)
  1635. {
  1636. struct drm_device *dev = ring->dev;
  1637. struct drm_i915_private *dev_priv = dev->dev_private;
  1638. u32 tmp = I915_READ_CTL(ring);
  1639. if (tmp & RING_WAIT) {
  1640. DRM_ERROR("Kicking stuck wait on %s\n",
  1641. ring->name);
  1642. I915_WRITE_CTL(ring, tmp);
  1643. return true;
  1644. }
  1645. if (INTEL_INFO(dev)->gen >= 6 &&
  1646. tmp & RING_WAIT_SEMAPHORE &&
  1647. semaphore_passed(ring)) {
  1648. DRM_ERROR("Kicking stuck semaphore on %s\n",
  1649. ring->name);
  1650. I915_WRITE_CTL(ring, tmp);
  1651. return true;
  1652. }
  1653. return false;
  1654. }
  1655. static bool i915_hangcheck_hung(struct drm_device *dev)
  1656. {
  1657. drm_i915_private_t *dev_priv = dev->dev_private;
  1658. if (dev_priv->gpu_error.hangcheck_count++ > 1) {
  1659. bool hung = true;
  1660. DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
  1661. i915_handle_error(dev, true);
  1662. if (!IS_GEN2(dev)) {
  1663. struct intel_ring_buffer *ring;
  1664. int i;
  1665. /* Is the chip hanging on a WAIT_FOR_EVENT?
  1666. * If so we can simply poke the RB_WAIT bit
  1667. * and break the hang. This should work on
  1668. * all but the second generation chipsets.
  1669. */
  1670. for_each_ring(ring, dev_priv, i)
  1671. hung &= !kick_ring(ring);
  1672. }
  1673. return hung;
  1674. }
  1675. return false;
  1676. }
  1677. /**
  1678. * This is called when the chip hasn't reported back with completed
  1679. * batchbuffers in a long time. The first time this is called we simply record
  1680. * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
  1681. * again, we assume the chip is wedged and try to fix it.
  1682. */
  1683. void i915_hangcheck_elapsed(unsigned long data)
  1684. {
  1685. struct drm_device *dev = (struct drm_device *)data;
  1686. drm_i915_private_t *dev_priv = dev->dev_private;
  1687. uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
  1688. struct intel_ring_buffer *ring;
  1689. bool err = false, idle;
  1690. int i;
  1691. if (!i915_enable_hangcheck)
  1692. return;
  1693. memset(acthd, 0, sizeof(acthd));
  1694. idle = true;
  1695. for_each_ring(ring, dev_priv, i) {
  1696. idle &= i915_hangcheck_ring_idle(ring, &err);
  1697. acthd[i] = intel_ring_get_active_head(ring);
  1698. }
  1699. /* If all work is done then ACTHD clearly hasn't advanced. */
  1700. if (idle) {
  1701. if (err) {
  1702. if (i915_hangcheck_hung(dev))
  1703. return;
  1704. goto repeat;
  1705. }
  1706. dev_priv->gpu_error.hangcheck_count = 0;
  1707. return;
  1708. }
  1709. i915_get_extra_instdone(dev, instdone);
  1710. if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
  1711. sizeof(acthd)) == 0 &&
  1712. memcmp(dev_priv->gpu_error.prev_instdone, instdone,
  1713. sizeof(instdone)) == 0) {
  1714. if (i915_hangcheck_hung(dev))
  1715. return;
  1716. } else {
  1717. dev_priv->gpu_error.hangcheck_count = 0;
  1718. memcpy(dev_priv->gpu_error.last_acthd, acthd,
  1719. sizeof(acthd));
  1720. memcpy(dev_priv->gpu_error.prev_instdone, instdone,
  1721. sizeof(instdone));
  1722. }
  1723. repeat:
  1724. /* Reset timer case chip hangs without another request being added */
  1725. mod_timer(&dev_priv->gpu_error.hangcheck_timer,
  1726. round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  1727. }
  1728. /* drm_dma.h hooks
  1729. */
  1730. static void ironlake_irq_preinstall(struct drm_device *dev)
  1731. {
  1732. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1733. atomic_set(&dev_priv->irq_received, 0);
  1734. I915_WRITE(HWSTAM, 0xeffe);
  1735. /* XXX hotplug from PCH */
  1736. I915_WRITE(DEIMR, 0xffffffff);
  1737. I915_WRITE(DEIER, 0x0);
  1738. POSTING_READ(DEIER);
  1739. /* and GT */
  1740. I915_WRITE(GTIMR, 0xffffffff);
  1741. I915_WRITE(GTIER, 0x0);
  1742. POSTING_READ(GTIER);
  1743. if (HAS_PCH_NOP(dev))
  1744. return;
  1745. /* south display irq */
  1746. I915_WRITE(SDEIMR, 0xffffffff);
  1747. /*
  1748. * SDEIER is also touched by the interrupt handler to work around missed
  1749. * PCH interrupts. Hence we can't update it after the interrupt handler
  1750. * is enabled - instead we unconditionally enable all PCH interrupt
  1751. * sources here, but then only unmask them as needed with SDEIMR.
  1752. */
  1753. I915_WRITE(SDEIER, 0xffffffff);
  1754. POSTING_READ(SDEIER);
  1755. }
  1756. static void valleyview_irq_preinstall(struct drm_device *dev)
  1757. {
  1758. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1759. int pipe;
  1760. atomic_set(&dev_priv->irq_received, 0);
  1761. /* VLV magic */
  1762. I915_WRITE(VLV_IMR, 0);
  1763. I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
  1764. I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
  1765. I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
  1766. /* and GT */
  1767. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1768. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1769. I915_WRITE(GTIMR, 0xffffffff);
  1770. I915_WRITE(GTIER, 0x0);
  1771. POSTING_READ(GTIER);
  1772. I915_WRITE(DPINVGTT, 0xff);
  1773. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1774. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1775. for_each_pipe(pipe)
  1776. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1777. I915_WRITE(VLV_IIR, 0xffffffff);
  1778. I915_WRITE(VLV_IMR, 0xffffffff);
  1779. I915_WRITE(VLV_IER, 0x0);
  1780. POSTING_READ(VLV_IER);
  1781. }
  1782. static void ibx_hpd_irq_setup(struct drm_device *dev)
  1783. {
  1784. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1785. struct drm_mode_config *mode_config = &dev->mode_config;
  1786. struct intel_encoder *intel_encoder;
  1787. u32 mask = ~I915_READ(SDEIMR);
  1788. u32 hotplug;
  1789. if (HAS_PCH_IBX(dev)) {
  1790. mask &= ~SDE_HOTPLUG_MASK;
  1791. list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  1792. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  1793. mask |= hpd_ibx[intel_encoder->hpd_pin];
  1794. } else {
  1795. mask &= ~SDE_HOTPLUG_MASK_CPT;
  1796. list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  1797. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  1798. mask |= hpd_cpt[intel_encoder->hpd_pin];
  1799. }
  1800. I915_WRITE(SDEIMR, ~mask);
  1801. /*
  1802. * Enable digital hotplug on the PCH, and configure the DP short pulse
  1803. * duration to 2ms (which is the minimum in the Display Port spec)
  1804. *
  1805. * This register is the same on all known PCH chips.
  1806. */
  1807. hotplug = I915_READ(PCH_PORT_HOTPLUG);
  1808. hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
  1809. hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
  1810. hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
  1811. hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
  1812. I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  1813. }
  1814. static void ibx_irq_postinstall(struct drm_device *dev)
  1815. {
  1816. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1817. u32 mask;
  1818. if (HAS_PCH_IBX(dev))
  1819. mask = SDE_GMBUS | SDE_AUX_MASK;
  1820. else
  1821. mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
  1822. if (HAS_PCH_NOP(dev))
  1823. return;
  1824. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  1825. I915_WRITE(SDEIMR, ~mask);
  1826. }
  1827. static int ironlake_irq_postinstall(struct drm_device *dev)
  1828. {
  1829. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1830. /* enable kind of interrupts always enabled */
  1831. u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  1832. DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
  1833. DE_AUX_CHANNEL_A;
  1834. u32 render_irqs;
  1835. dev_priv->irq_mask = ~display_mask;
  1836. /* should always can generate irq */
  1837. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1838. I915_WRITE(DEIMR, dev_priv->irq_mask);
  1839. I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
  1840. POSTING_READ(DEIER);
  1841. dev_priv->gt_irq_mask = ~0;
  1842. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1843. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  1844. if (IS_GEN6(dev))
  1845. render_irqs =
  1846. GT_USER_INTERRUPT |
  1847. GEN6_BSD_USER_INTERRUPT |
  1848. GEN6_BLITTER_USER_INTERRUPT;
  1849. else
  1850. render_irqs =
  1851. GT_USER_INTERRUPT |
  1852. GT_PIPE_NOTIFY |
  1853. GT_BSD_USER_INTERRUPT;
  1854. I915_WRITE(GTIER, render_irqs);
  1855. POSTING_READ(GTIER);
  1856. ibx_irq_postinstall(dev);
  1857. if (IS_IRONLAKE_M(dev)) {
  1858. /* Clear & enable PCU event interrupts */
  1859. I915_WRITE(DEIIR, DE_PCU_EVENT);
  1860. I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
  1861. ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
  1862. }
  1863. return 0;
  1864. }
  1865. static int ivybridge_irq_postinstall(struct drm_device *dev)
  1866. {
  1867. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1868. /* enable kind of interrupts always enabled */
  1869. u32 display_mask =
  1870. DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
  1871. DE_PLANEC_FLIP_DONE_IVB |
  1872. DE_PLANEB_FLIP_DONE_IVB |
  1873. DE_PLANEA_FLIP_DONE_IVB |
  1874. DE_AUX_CHANNEL_A_IVB;
  1875. u32 render_irqs;
  1876. dev_priv->irq_mask = ~display_mask;
  1877. /* should always can generate irq */
  1878. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1879. I915_WRITE(DEIMR, dev_priv->irq_mask);
  1880. I915_WRITE(DEIER,
  1881. display_mask |
  1882. DE_PIPEC_VBLANK_IVB |
  1883. DE_PIPEB_VBLANK_IVB |
  1884. DE_PIPEA_VBLANK_IVB);
  1885. POSTING_READ(DEIER);
  1886. dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  1887. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1888. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  1889. render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
  1890. GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  1891. I915_WRITE(GTIER, render_irqs);
  1892. POSTING_READ(GTIER);
  1893. ibx_irq_postinstall(dev);
  1894. return 0;
  1895. }
  1896. static int valleyview_irq_postinstall(struct drm_device *dev)
  1897. {
  1898. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1899. u32 enable_mask;
  1900. u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
  1901. u32 render_irqs;
  1902. u16 msid;
  1903. enable_mask = I915_DISPLAY_PORT_INTERRUPT;
  1904. enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1905. I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  1906. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1907. I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1908. /*
  1909. *Leave vblank interrupts masked initially. enable/disable will
  1910. * toggle them based on usage.
  1911. */
  1912. dev_priv->irq_mask = (~enable_mask) |
  1913. I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  1914. I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1915. /* Hack for broken MSIs on VLV */
  1916. pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
  1917. pci_read_config_word(dev->pdev, 0x98, &msid);
  1918. msid &= 0xff; /* mask out delivery bits */
  1919. msid |= (1<<14);
  1920. pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
  1921. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1922. POSTING_READ(PORT_HOTPLUG_EN);
  1923. I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  1924. I915_WRITE(VLV_IER, enable_mask);
  1925. I915_WRITE(VLV_IIR, 0xffffffff);
  1926. I915_WRITE(PIPESTAT(0), 0xffff);
  1927. I915_WRITE(PIPESTAT(1), 0xffff);
  1928. POSTING_READ(VLV_IER);
  1929. i915_enable_pipestat(dev_priv, 0, pipestat_enable);
  1930. i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  1931. i915_enable_pipestat(dev_priv, 1, pipestat_enable);
  1932. I915_WRITE(VLV_IIR, 0xffffffff);
  1933. I915_WRITE(VLV_IIR, 0xffffffff);
  1934. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1935. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  1936. render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
  1937. GEN6_BLITTER_USER_INTERRUPT;
  1938. I915_WRITE(GTIER, render_irqs);
  1939. POSTING_READ(GTIER);
  1940. /* ack & enable invalid PTE error interrupts */
  1941. #if 0 /* FIXME: add support to irq handler for checking these bits */
  1942. I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  1943. I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
  1944. #endif
  1945. I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  1946. return 0;
  1947. }
  1948. static void valleyview_irq_uninstall(struct drm_device *dev)
  1949. {
  1950. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1951. int pipe;
  1952. if (!dev_priv)
  1953. return;
  1954. for_each_pipe(pipe)
  1955. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1956. I915_WRITE(HWSTAM, 0xffffffff);
  1957. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1958. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1959. for_each_pipe(pipe)
  1960. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1961. I915_WRITE(VLV_IIR, 0xffffffff);
  1962. I915_WRITE(VLV_IMR, 0xffffffff);
  1963. I915_WRITE(VLV_IER, 0x0);
  1964. POSTING_READ(VLV_IER);
  1965. }
  1966. static void ironlake_irq_uninstall(struct drm_device *dev)
  1967. {
  1968. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1969. if (!dev_priv)
  1970. return;
  1971. I915_WRITE(HWSTAM, 0xffffffff);
  1972. I915_WRITE(DEIMR, 0xffffffff);
  1973. I915_WRITE(DEIER, 0x0);
  1974. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1975. I915_WRITE(GTIMR, 0xffffffff);
  1976. I915_WRITE(GTIER, 0x0);
  1977. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1978. if (HAS_PCH_NOP(dev))
  1979. return;
  1980. I915_WRITE(SDEIMR, 0xffffffff);
  1981. I915_WRITE(SDEIER, 0x0);
  1982. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  1983. }
  1984. static void i8xx_irq_preinstall(struct drm_device * dev)
  1985. {
  1986. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1987. int pipe;
  1988. atomic_set(&dev_priv->irq_received, 0);
  1989. for_each_pipe(pipe)
  1990. I915_WRITE(PIPESTAT(pipe), 0);
  1991. I915_WRITE16(IMR, 0xffff);
  1992. I915_WRITE16(IER, 0x0);
  1993. POSTING_READ16(IER);
  1994. }
  1995. static int i8xx_irq_postinstall(struct drm_device *dev)
  1996. {
  1997. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1998. I915_WRITE16(EMR,
  1999. ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  2000. /* Unmask the interrupts that we always want on. */
  2001. dev_priv->irq_mask =
  2002. ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2003. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2004. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2005. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2006. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2007. I915_WRITE16(IMR, dev_priv->irq_mask);
  2008. I915_WRITE16(IER,
  2009. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2010. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2011. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  2012. I915_USER_INTERRUPT);
  2013. POSTING_READ16(IER);
  2014. return 0;
  2015. }
  2016. /*
  2017. * Returns true when a page flip has completed.
  2018. */
  2019. static bool i8xx_handle_vblank(struct drm_device *dev,
  2020. int pipe, u16 iir)
  2021. {
  2022. drm_i915_private_t *dev_priv = dev->dev_private;
  2023. u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
  2024. if (!drm_handle_vblank(dev, pipe))
  2025. return false;
  2026. if ((iir & flip_pending) == 0)
  2027. return false;
  2028. intel_prepare_page_flip(dev, pipe);
  2029. /* We detect FlipDone by looking for the change in PendingFlip from '1'
  2030. * to '0' on the following vblank, i.e. IIR has the Pendingflip
  2031. * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  2032. * the flip is completed (no longer pending). Since this doesn't raise
  2033. * an interrupt per se, we watch for the change at vblank.
  2034. */
  2035. if (I915_READ16(ISR) & flip_pending)
  2036. return false;
  2037. intel_finish_page_flip(dev, pipe);
  2038. return true;
  2039. }
  2040. static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  2041. {
  2042. struct drm_device *dev = (struct drm_device *) arg;
  2043. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2044. u16 iir, new_iir;
  2045. u32 pipe_stats[2];
  2046. unsigned long irqflags;
  2047. int irq_received;
  2048. int pipe;
  2049. u16 flip_mask =
  2050. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2051. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2052. atomic_inc(&dev_priv->irq_received);
  2053. iir = I915_READ16(IIR);
  2054. if (iir == 0)
  2055. return IRQ_NONE;
  2056. while (iir & ~flip_mask) {
  2057. /* Can't rely on pipestat interrupt bit in iir as it might
  2058. * have been cleared after the pipestat interrupt was received.
  2059. * It doesn't set the bit in iir again, but it still produces
  2060. * interrupts (for non-MSI).
  2061. */
  2062. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2063. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2064. i915_handle_error(dev, false);
  2065. for_each_pipe(pipe) {
  2066. int reg = PIPESTAT(pipe);
  2067. pipe_stats[pipe] = I915_READ(reg);
  2068. /*
  2069. * Clear the PIPE*STAT regs before the IIR
  2070. */
  2071. if (pipe_stats[pipe] & 0x8000ffff) {
  2072. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2073. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2074. pipe_name(pipe));
  2075. I915_WRITE(reg, pipe_stats[pipe]);
  2076. irq_received = 1;
  2077. }
  2078. }
  2079. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2080. I915_WRITE16(IIR, iir & ~flip_mask);
  2081. new_iir = I915_READ16(IIR); /* Flush posted writes */
  2082. i915_update_dri1_breadcrumb(dev);
  2083. if (iir & I915_USER_INTERRUPT)
  2084. notify_ring(dev, &dev_priv->ring[RCS]);
  2085. if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2086. i8xx_handle_vblank(dev, 0, iir))
  2087. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
  2088. if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2089. i8xx_handle_vblank(dev, 1, iir))
  2090. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
  2091. iir = new_iir;
  2092. }
  2093. return IRQ_HANDLED;
  2094. }
  2095. static void i8xx_irq_uninstall(struct drm_device * dev)
  2096. {
  2097. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2098. int pipe;
  2099. for_each_pipe(pipe) {
  2100. /* Clear enable bits; then clear status bits */
  2101. I915_WRITE(PIPESTAT(pipe), 0);
  2102. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  2103. }
  2104. I915_WRITE16(IMR, 0xffff);
  2105. I915_WRITE16(IER, 0x0);
  2106. I915_WRITE16(IIR, I915_READ16(IIR));
  2107. }
  2108. static void i915_irq_preinstall(struct drm_device * dev)
  2109. {
  2110. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2111. int pipe;
  2112. atomic_set(&dev_priv->irq_received, 0);
  2113. if (I915_HAS_HOTPLUG(dev)) {
  2114. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2115. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2116. }
  2117. I915_WRITE16(HWSTAM, 0xeffe);
  2118. for_each_pipe(pipe)
  2119. I915_WRITE(PIPESTAT(pipe), 0);
  2120. I915_WRITE(IMR, 0xffffffff);
  2121. I915_WRITE(IER, 0x0);
  2122. POSTING_READ(IER);
  2123. }
  2124. static int i915_irq_postinstall(struct drm_device *dev)
  2125. {
  2126. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2127. u32 enable_mask;
  2128. I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  2129. /* Unmask the interrupts that we always want on. */
  2130. dev_priv->irq_mask =
  2131. ~(I915_ASLE_INTERRUPT |
  2132. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2133. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2134. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2135. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2136. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2137. enable_mask =
  2138. I915_ASLE_INTERRUPT |
  2139. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2140. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2141. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  2142. I915_USER_INTERRUPT;
  2143. if (I915_HAS_HOTPLUG(dev)) {
  2144. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2145. POSTING_READ(PORT_HOTPLUG_EN);
  2146. /* Enable in IER... */
  2147. enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  2148. /* and unmask in IMR */
  2149. dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  2150. }
  2151. I915_WRITE(IMR, dev_priv->irq_mask);
  2152. I915_WRITE(IER, enable_mask);
  2153. POSTING_READ(IER);
  2154. intel_opregion_enable_asle(dev);
  2155. return 0;
  2156. }
  2157. /*
  2158. * Returns true when a page flip has completed.
  2159. */
  2160. static bool i915_handle_vblank(struct drm_device *dev,
  2161. int plane, int pipe, u32 iir)
  2162. {
  2163. drm_i915_private_t *dev_priv = dev->dev_private;
  2164. u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  2165. if (!drm_handle_vblank(dev, pipe))
  2166. return false;
  2167. if ((iir & flip_pending) == 0)
  2168. return false;
  2169. intel_prepare_page_flip(dev, plane);
  2170. /* We detect FlipDone by looking for the change in PendingFlip from '1'
  2171. * to '0' on the following vblank, i.e. IIR has the Pendingflip
  2172. * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  2173. * the flip is completed (no longer pending). Since this doesn't raise
  2174. * an interrupt per se, we watch for the change at vblank.
  2175. */
  2176. if (I915_READ(ISR) & flip_pending)
  2177. return false;
  2178. intel_finish_page_flip(dev, pipe);
  2179. return true;
  2180. }
  2181. static irqreturn_t i915_irq_handler(int irq, void *arg)
  2182. {
  2183. struct drm_device *dev = (struct drm_device *) arg;
  2184. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2185. u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
  2186. unsigned long irqflags;
  2187. u32 flip_mask =
  2188. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2189. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2190. int pipe, ret = IRQ_NONE;
  2191. atomic_inc(&dev_priv->irq_received);
  2192. iir = I915_READ(IIR);
  2193. do {
  2194. bool irq_received = (iir & ~flip_mask) != 0;
  2195. bool blc_event = false;
  2196. /* Can't rely on pipestat interrupt bit in iir as it might
  2197. * have been cleared after the pipestat interrupt was received.
  2198. * It doesn't set the bit in iir again, but it still produces
  2199. * interrupts (for non-MSI).
  2200. */
  2201. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2202. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2203. i915_handle_error(dev, false);
  2204. for_each_pipe(pipe) {
  2205. int reg = PIPESTAT(pipe);
  2206. pipe_stats[pipe] = I915_READ(reg);
  2207. /* Clear the PIPE*STAT regs before the IIR */
  2208. if (pipe_stats[pipe] & 0x8000ffff) {
  2209. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2210. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2211. pipe_name(pipe));
  2212. I915_WRITE(reg, pipe_stats[pipe]);
  2213. irq_received = true;
  2214. }
  2215. }
  2216. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2217. if (!irq_received)
  2218. break;
  2219. /* Consume port. Then clear IIR or we'll miss events */
  2220. if ((I915_HAS_HOTPLUG(dev)) &&
  2221. (iir & I915_DISPLAY_PORT_INTERRUPT)) {
  2222. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2223. u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  2224. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2225. hotplug_status);
  2226. if (hotplug_trigger) {
  2227. if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
  2228. i915_hpd_irq_setup(dev);
  2229. queue_work(dev_priv->wq,
  2230. &dev_priv->hotplug_work);
  2231. }
  2232. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2233. POSTING_READ(PORT_HOTPLUG_STAT);
  2234. }
  2235. I915_WRITE(IIR, iir & ~flip_mask);
  2236. new_iir = I915_READ(IIR); /* Flush posted writes */
  2237. if (iir & I915_USER_INTERRUPT)
  2238. notify_ring(dev, &dev_priv->ring[RCS]);
  2239. for_each_pipe(pipe) {
  2240. int plane = pipe;
  2241. if (IS_MOBILE(dev))
  2242. plane = !plane;
  2243. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2244. i915_handle_vblank(dev, plane, pipe, iir))
  2245. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  2246. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  2247. blc_event = true;
  2248. }
  2249. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  2250. intel_opregion_asle_intr(dev);
  2251. /* With MSI, interrupts are only generated when iir
  2252. * transitions from zero to nonzero. If another bit got
  2253. * set while we were handling the existing iir bits, then
  2254. * we would never get another interrupt.
  2255. *
  2256. * This is fine on non-MSI as well, as if we hit this path
  2257. * we avoid exiting the interrupt handler only to generate
  2258. * another one.
  2259. *
  2260. * Note that for MSI this could cause a stray interrupt report
  2261. * if an interrupt landed in the time between writing IIR and
  2262. * the posting read. This should be rare enough to never
  2263. * trigger the 99% of 100,000 interrupts test for disabling
  2264. * stray interrupts.
  2265. */
  2266. ret = IRQ_HANDLED;
  2267. iir = new_iir;
  2268. } while (iir & ~flip_mask);
  2269. i915_update_dri1_breadcrumb(dev);
  2270. return ret;
  2271. }
  2272. static void i915_irq_uninstall(struct drm_device * dev)
  2273. {
  2274. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2275. int pipe;
  2276. if (I915_HAS_HOTPLUG(dev)) {
  2277. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2278. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2279. }
  2280. I915_WRITE16(HWSTAM, 0xffff);
  2281. for_each_pipe(pipe) {
  2282. /* Clear enable bits; then clear status bits */
  2283. I915_WRITE(PIPESTAT(pipe), 0);
  2284. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  2285. }
  2286. I915_WRITE(IMR, 0xffffffff);
  2287. I915_WRITE(IER, 0x0);
  2288. I915_WRITE(IIR, I915_READ(IIR));
  2289. }
  2290. static void i965_irq_preinstall(struct drm_device * dev)
  2291. {
  2292. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2293. int pipe;
  2294. atomic_set(&dev_priv->irq_received, 0);
  2295. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2296. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2297. I915_WRITE(HWSTAM, 0xeffe);
  2298. for_each_pipe(pipe)
  2299. I915_WRITE(PIPESTAT(pipe), 0);
  2300. I915_WRITE(IMR, 0xffffffff);
  2301. I915_WRITE(IER, 0x0);
  2302. POSTING_READ(IER);
  2303. }
  2304. static int i965_irq_postinstall(struct drm_device *dev)
  2305. {
  2306. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2307. u32 enable_mask;
  2308. u32 error_mask;
  2309. /* Unmask the interrupts that we always want on. */
  2310. dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
  2311. I915_DISPLAY_PORT_INTERRUPT |
  2312. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2313. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2314. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2315. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2316. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2317. enable_mask = ~dev_priv->irq_mask;
  2318. enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2319. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  2320. enable_mask |= I915_USER_INTERRUPT;
  2321. if (IS_G4X(dev))
  2322. enable_mask |= I915_BSD_USER_INTERRUPT;
  2323. i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  2324. /*
  2325. * Enable some error detection, note the instruction error mask
  2326. * bit is reserved, so we leave it masked.
  2327. */
  2328. if (IS_G4X(dev)) {
  2329. error_mask = ~(GM45_ERROR_PAGE_TABLE |
  2330. GM45_ERROR_MEM_PRIV |
  2331. GM45_ERROR_CP_PRIV |
  2332. I915_ERROR_MEMORY_REFRESH);
  2333. } else {
  2334. error_mask = ~(I915_ERROR_PAGE_TABLE |
  2335. I915_ERROR_MEMORY_REFRESH);
  2336. }
  2337. I915_WRITE(EMR, error_mask);
  2338. I915_WRITE(IMR, dev_priv->irq_mask);
  2339. I915_WRITE(IER, enable_mask);
  2340. POSTING_READ(IER);
  2341. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2342. POSTING_READ(PORT_HOTPLUG_EN);
  2343. intel_opregion_enable_asle(dev);
  2344. return 0;
  2345. }
  2346. static void i915_hpd_irq_setup(struct drm_device *dev)
  2347. {
  2348. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2349. struct drm_mode_config *mode_config = &dev->mode_config;
  2350. struct intel_encoder *intel_encoder;
  2351. u32 hotplug_en;
  2352. if (I915_HAS_HOTPLUG(dev)) {
  2353. hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  2354. hotplug_en &= ~HOTPLUG_INT_EN_MASK;
  2355. /* Note HDMI and DP share hotplug bits */
  2356. /* enable bits are the same for all generations */
  2357. list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  2358. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  2359. hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
  2360. /* Programming the CRT detection parameters tends
  2361. to generate a spurious hotplug event about three
  2362. seconds later. So just do it once.
  2363. */
  2364. if (IS_G4X(dev))
  2365. hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  2366. hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
  2367. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  2368. /* Ignore TV since it's buggy */
  2369. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  2370. }
  2371. }
  2372. static irqreturn_t i965_irq_handler(int irq, void *arg)
  2373. {
  2374. struct drm_device *dev = (struct drm_device *) arg;
  2375. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2376. u32 iir, new_iir;
  2377. u32 pipe_stats[I915_MAX_PIPES];
  2378. unsigned long irqflags;
  2379. int irq_received;
  2380. int ret = IRQ_NONE, pipe;
  2381. u32 flip_mask =
  2382. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2383. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2384. atomic_inc(&dev_priv->irq_received);
  2385. iir = I915_READ(IIR);
  2386. for (;;) {
  2387. bool blc_event = false;
  2388. irq_received = (iir & ~flip_mask) != 0;
  2389. /* Can't rely on pipestat interrupt bit in iir as it might
  2390. * have been cleared after the pipestat interrupt was received.
  2391. * It doesn't set the bit in iir again, but it still produces
  2392. * interrupts (for non-MSI).
  2393. */
  2394. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2395. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2396. i915_handle_error(dev, false);
  2397. for_each_pipe(pipe) {
  2398. int reg = PIPESTAT(pipe);
  2399. pipe_stats[pipe] = I915_READ(reg);
  2400. /*
  2401. * Clear the PIPE*STAT regs before the IIR
  2402. */
  2403. if (pipe_stats[pipe] & 0x8000ffff) {
  2404. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2405. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2406. pipe_name(pipe));
  2407. I915_WRITE(reg, pipe_stats[pipe]);
  2408. irq_received = 1;
  2409. }
  2410. }
  2411. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2412. if (!irq_received)
  2413. break;
  2414. ret = IRQ_HANDLED;
  2415. /* Consume port. Then clear IIR or we'll miss events */
  2416. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  2417. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2418. u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
  2419. HOTPLUG_INT_STATUS_G4X :
  2420. HOTPLUG_INT_STATUS_I965);
  2421. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2422. hotplug_status);
  2423. if (hotplug_trigger) {
  2424. if (hotplug_irq_storm_detect(dev, hotplug_trigger,
  2425. IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
  2426. i915_hpd_irq_setup(dev);
  2427. queue_work(dev_priv->wq,
  2428. &dev_priv->hotplug_work);
  2429. }
  2430. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2431. I915_READ(PORT_HOTPLUG_STAT);
  2432. }
  2433. I915_WRITE(IIR, iir & ~flip_mask);
  2434. new_iir = I915_READ(IIR); /* Flush posted writes */
  2435. if (iir & I915_USER_INTERRUPT)
  2436. notify_ring(dev, &dev_priv->ring[RCS]);
  2437. if (iir & I915_BSD_USER_INTERRUPT)
  2438. notify_ring(dev, &dev_priv->ring[VCS]);
  2439. for_each_pipe(pipe) {
  2440. if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  2441. i915_handle_vblank(dev, pipe, pipe, iir))
  2442. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
  2443. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  2444. blc_event = true;
  2445. }
  2446. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  2447. intel_opregion_asle_intr(dev);
  2448. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  2449. gmbus_irq_handler(dev);
  2450. /* With MSI, interrupts are only generated when iir
  2451. * transitions from zero to nonzero. If another bit got
  2452. * set while we were handling the existing iir bits, then
  2453. * we would never get another interrupt.
  2454. *
  2455. * This is fine on non-MSI as well, as if we hit this path
  2456. * we avoid exiting the interrupt handler only to generate
  2457. * another one.
  2458. *
  2459. * Note that for MSI this could cause a stray interrupt report
  2460. * if an interrupt landed in the time between writing IIR and
  2461. * the posting read. This should be rare enough to never
  2462. * trigger the 99% of 100,000 interrupts test for disabling
  2463. * stray interrupts.
  2464. */
  2465. iir = new_iir;
  2466. }
  2467. i915_update_dri1_breadcrumb(dev);
  2468. return ret;
  2469. }
  2470. static void i965_irq_uninstall(struct drm_device * dev)
  2471. {
  2472. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2473. int pipe;
  2474. if (!dev_priv)
  2475. return;
  2476. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2477. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2478. I915_WRITE(HWSTAM, 0xffffffff);
  2479. for_each_pipe(pipe)
  2480. I915_WRITE(PIPESTAT(pipe), 0);
  2481. I915_WRITE(IMR, 0xffffffff);
  2482. I915_WRITE(IER, 0x0);
  2483. for_each_pipe(pipe)
  2484. I915_WRITE(PIPESTAT(pipe),
  2485. I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
  2486. I915_WRITE(IIR, I915_READ(IIR));
  2487. }
  2488. void intel_irq_init(struct drm_device *dev)
  2489. {
  2490. struct drm_i915_private *dev_priv = dev->dev_private;
  2491. INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
  2492. INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
  2493. INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
  2494. INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
  2495. setup_timer(&dev_priv->gpu_error.hangcheck_timer,
  2496. i915_hangcheck_elapsed,
  2497. (unsigned long) dev);
  2498. pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
  2499. dev->driver->get_vblank_counter = i915_get_vblank_counter;
  2500. dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  2501. if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  2502. dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
  2503. dev->driver->get_vblank_counter = gm45_get_vblank_counter;
  2504. }
  2505. if (drm_core_check_feature(dev, DRIVER_MODESET))
  2506. dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
  2507. else
  2508. dev->driver->get_vblank_timestamp = NULL;
  2509. dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  2510. if (IS_VALLEYVIEW(dev)) {
  2511. dev->driver->irq_handler = valleyview_irq_handler;
  2512. dev->driver->irq_preinstall = valleyview_irq_preinstall;
  2513. dev->driver->irq_postinstall = valleyview_irq_postinstall;
  2514. dev->driver->irq_uninstall = valleyview_irq_uninstall;
  2515. dev->driver->enable_vblank = valleyview_enable_vblank;
  2516. dev->driver->disable_vblank = valleyview_disable_vblank;
  2517. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  2518. } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
  2519. /* Share pre & uninstall handlers with ILK/SNB */
  2520. dev->driver->irq_handler = ivybridge_irq_handler;
  2521. dev->driver->irq_preinstall = ironlake_irq_preinstall;
  2522. dev->driver->irq_postinstall = ivybridge_irq_postinstall;
  2523. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  2524. dev->driver->enable_vblank = ivybridge_enable_vblank;
  2525. dev->driver->disable_vblank = ivybridge_disable_vblank;
  2526. dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
  2527. } else if (HAS_PCH_SPLIT(dev)) {
  2528. dev->driver->irq_handler = ironlake_irq_handler;
  2529. dev->driver->irq_preinstall = ironlake_irq_preinstall;
  2530. dev->driver->irq_postinstall = ironlake_irq_postinstall;
  2531. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  2532. dev->driver->enable_vblank = ironlake_enable_vblank;
  2533. dev->driver->disable_vblank = ironlake_disable_vblank;
  2534. dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
  2535. } else {
  2536. if (INTEL_INFO(dev)->gen == 2) {
  2537. dev->driver->irq_preinstall = i8xx_irq_preinstall;
  2538. dev->driver->irq_postinstall = i8xx_irq_postinstall;
  2539. dev->driver->irq_handler = i8xx_irq_handler;
  2540. dev->driver->irq_uninstall = i8xx_irq_uninstall;
  2541. } else if (INTEL_INFO(dev)->gen == 3) {
  2542. dev->driver->irq_preinstall = i915_irq_preinstall;
  2543. dev->driver->irq_postinstall = i915_irq_postinstall;
  2544. dev->driver->irq_uninstall = i915_irq_uninstall;
  2545. dev->driver->irq_handler = i915_irq_handler;
  2546. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  2547. } else {
  2548. dev->driver->irq_preinstall = i965_irq_preinstall;
  2549. dev->driver->irq_postinstall = i965_irq_postinstall;
  2550. dev->driver->irq_uninstall = i965_irq_uninstall;
  2551. dev->driver->irq_handler = i965_irq_handler;
  2552. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  2553. }
  2554. dev->driver->enable_vblank = i915_enable_vblank;
  2555. dev->driver->disable_vblank = i915_disable_vblank;
  2556. }
  2557. }
  2558. void intel_hpd_init(struct drm_device *dev)
  2559. {
  2560. struct drm_i915_private *dev_priv = dev->dev_private;
  2561. struct drm_mode_config *mode_config = &dev->mode_config;
  2562. struct drm_connector *connector;
  2563. int i;
  2564. for (i = 1; i < HPD_NUM_PINS; i++) {
  2565. dev_priv->hpd_stats[i].hpd_cnt = 0;
  2566. dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  2567. }
  2568. list_for_each_entry(connector, &mode_config->connector_list, head) {
  2569. struct intel_connector *intel_connector = to_intel_connector(connector);
  2570. connector->polled = intel_connector->polled;
  2571. if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
  2572. connector->polled = DRM_CONNECTOR_POLL_HPD;
  2573. }
  2574. if (dev_priv->display.hpd_irq_setup)
  2575. dev_priv->display.hpd_irq_setup(dev);
  2576. }