i915_irq.c 103 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/sysrq.h>
  30. #include <linux/slab.h>
  31. #include <drm/drmP.h>
  32. #include <drm/i915_drm.h>
  33. #include "i915_drv.h"
  34. #include "i915_trace.h"
  35. #include "intel_drv.h"
  36. static const u32 hpd_ibx[] = {
  37. [HPD_CRT] = SDE_CRT_HOTPLUG,
  38. [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
  39. [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
  40. [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
  41. [HPD_PORT_D] = SDE_PORTD_HOTPLUG
  42. };
  43. static const u32 hpd_cpt[] = {
  44. [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
  45. [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
  46. [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  47. [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  48. [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
  49. };
  50. static const u32 hpd_mask_i915[] = {
  51. [HPD_CRT] = CRT_HOTPLUG_INT_EN,
  52. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
  53. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
  54. [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
  55. [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
  56. [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
  57. };
  58. static const u32 hpd_status_gen4[] = {
  59. [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  60. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
  61. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
  62. [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  63. [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  64. [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  65. };
  66. static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
  67. [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  68. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
  69. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
  70. [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  71. [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  72. [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  73. };
  74. /* For display hotplug interrupt */
  75. static void
  76. ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  77. {
  78. assert_spin_locked(&dev_priv->irq_lock);
  79. if ((dev_priv->irq_mask & mask) != 0) {
  80. dev_priv->irq_mask &= ~mask;
  81. I915_WRITE(DEIMR, dev_priv->irq_mask);
  82. POSTING_READ(DEIMR);
  83. }
  84. }
  85. static void
  86. ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  87. {
  88. assert_spin_locked(&dev_priv->irq_lock);
  89. if ((dev_priv->irq_mask & mask) != mask) {
  90. dev_priv->irq_mask |= mask;
  91. I915_WRITE(DEIMR, dev_priv->irq_mask);
  92. POSTING_READ(DEIMR);
  93. }
  94. }
  95. static bool ivb_can_enable_err_int(struct drm_device *dev)
  96. {
  97. struct drm_i915_private *dev_priv = dev->dev_private;
  98. struct intel_crtc *crtc;
  99. enum pipe pipe;
  100. assert_spin_locked(&dev_priv->irq_lock);
  101. for_each_pipe(pipe) {
  102. crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  103. if (crtc->cpu_fifo_underrun_disabled)
  104. return false;
  105. }
  106. return true;
  107. }
  108. static bool cpt_can_enable_serr_int(struct drm_device *dev)
  109. {
  110. struct drm_i915_private *dev_priv = dev->dev_private;
  111. enum pipe pipe;
  112. struct intel_crtc *crtc;
  113. for_each_pipe(pipe) {
  114. crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  115. if (crtc->pch_fifo_underrun_disabled)
  116. return false;
  117. }
  118. return true;
  119. }
  120. static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
  121. enum pipe pipe, bool enable)
  122. {
  123. struct drm_i915_private *dev_priv = dev->dev_private;
  124. uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
  125. DE_PIPEB_FIFO_UNDERRUN;
  126. if (enable)
  127. ironlake_enable_display_irq(dev_priv, bit);
  128. else
  129. ironlake_disable_display_irq(dev_priv, bit);
  130. }
  131. static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
  132. bool enable)
  133. {
  134. struct drm_i915_private *dev_priv = dev->dev_private;
  135. if (enable) {
  136. if (!ivb_can_enable_err_int(dev))
  137. return;
  138. I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
  139. ERR_INT_FIFO_UNDERRUN_B |
  140. ERR_INT_FIFO_UNDERRUN_C);
  141. ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
  142. } else {
  143. ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
  144. }
  145. }
  146. static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
  147. bool enable)
  148. {
  149. struct drm_device *dev = crtc->base.dev;
  150. struct drm_i915_private *dev_priv = dev->dev_private;
  151. uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
  152. SDE_TRANSB_FIFO_UNDER;
  153. if (enable)
  154. I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
  155. else
  156. I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
  157. POSTING_READ(SDEIMR);
  158. }
  159. static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
  160. enum transcoder pch_transcoder,
  161. bool enable)
  162. {
  163. struct drm_i915_private *dev_priv = dev->dev_private;
  164. if (enable) {
  165. if (!cpt_can_enable_serr_int(dev))
  166. return;
  167. I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
  168. SERR_INT_TRANS_B_FIFO_UNDERRUN |
  169. SERR_INT_TRANS_C_FIFO_UNDERRUN);
  170. I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
  171. } else {
  172. I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
  173. }
  174. POSTING_READ(SDEIMR);
  175. }
  176. /**
  177. * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
  178. * @dev: drm device
  179. * @pipe: pipe
  180. * @enable: true if we want to report FIFO underrun errors, false otherwise
  181. *
  182. * This function makes us disable or enable CPU fifo underruns for a specific
  183. * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
  184. * reporting for one pipe may also disable all the other CPU error interruts for
  185. * the other pipes, due to the fact that there's just one interrupt mask/enable
  186. * bit for all the pipes.
  187. *
  188. * Returns the previous state of underrun reporting.
  189. */
  190. bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
  191. enum pipe pipe, bool enable)
  192. {
  193. struct drm_i915_private *dev_priv = dev->dev_private;
  194. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  195. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  196. unsigned long flags;
  197. bool ret;
  198. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  199. ret = !intel_crtc->cpu_fifo_underrun_disabled;
  200. if (enable == ret)
  201. goto done;
  202. intel_crtc->cpu_fifo_underrun_disabled = !enable;
  203. if (IS_GEN5(dev) || IS_GEN6(dev))
  204. ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
  205. else if (IS_GEN7(dev))
  206. ivybridge_set_fifo_underrun_reporting(dev, enable);
  207. done:
  208. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  209. return ret;
  210. }
  211. /**
  212. * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
  213. * @dev: drm device
  214. * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
  215. * @enable: true if we want to report FIFO underrun errors, false otherwise
  216. *
  217. * This function makes us disable or enable PCH fifo underruns for a specific
  218. * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
  219. * underrun reporting for one transcoder may also disable all the other PCH
  220. * error interruts for the other transcoders, due to the fact that there's just
  221. * one interrupt mask/enable bit for all the transcoders.
  222. *
  223. * Returns the previous state of underrun reporting.
  224. */
  225. bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
  226. enum transcoder pch_transcoder,
  227. bool enable)
  228. {
  229. struct drm_i915_private *dev_priv = dev->dev_private;
  230. enum pipe p;
  231. struct drm_crtc *crtc;
  232. struct intel_crtc *intel_crtc;
  233. unsigned long flags;
  234. bool ret;
  235. if (HAS_PCH_LPT(dev)) {
  236. crtc = NULL;
  237. for_each_pipe(p) {
  238. struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
  239. if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
  240. crtc = c;
  241. break;
  242. }
  243. }
  244. if (!crtc) {
  245. DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
  246. return false;
  247. }
  248. } else {
  249. crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
  250. }
  251. intel_crtc = to_intel_crtc(crtc);
  252. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  253. ret = !intel_crtc->pch_fifo_underrun_disabled;
  254. if (enable == ret)
  255. goto done;
  256. intel_crtc->pch_fifo_underrun_disabled = !enable;
  257. if (HAS_PCH_IBX(dev))
  258. ibx_set_fifo_underrun_reporting(intel_crtc, enable);
  259. else
  260. cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
  261. done:
  262. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  263. return ret;
  264. }
  265. void
  266. i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  267. {
  268. u32 reg = PIPESTAT(pipe);
  269. u32 pipestat = I915_READ(reg) & 0x7fff0000;
  270. if ((pipestat & mask) == mask)
  271. return;
  272. /* Enable the interrupt, clear any pending status */
  273. pipestat |= mask | (mask >> 16);
  274. I915_WRITE(reg, pipestat);
  275. POSTING_READ(reg);
  276. }
  277. void
  278. i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  279. {
  280. u32 reg = PIPESTAT(pipe);
  281. u32 pipestat = I915_READ(reg) & 0x7fff0000;
  282. if ((pipestat & mask) == 0)
  283. return;
  284. pipestat &= ~mask;
  285. I915_WRITE(reg, pipestat);
  286. POSTING_READ(reg);
  287. }
  288. /**
  289. * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
  290. */
  291. static void i915_enable_asle_pipestat(struct drm_device *dev)
  292. {
  293. drm_i915_private_t *dev_priv = dev->dev_private;
  294. unsigned long irqflags;
  295. if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
  296. return;
  297. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  298. i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
  299. if (INTEL_INFO(dev)->gen >= 4)
  300. i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
  301. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  302. }
  303. /**
  304. * i915_pipe_enabled - check if a pipe is enabled
  305. * @dev: DRM device
  306. * @pipe: pipe to check
  307. *
  308. * Reading certain registers when the pipe is disabled can hang the chip.
  309. * Use this routine to make sure the PLL is running and the pipe is active
  310. * before reading such registers if unsure.
  311. */
  312. static int
  313. i915_pipe_enabled(struct drm_device *dev, int pipe)
  314. {
  315. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  316. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  317. /* Locking is horribly broken here, but whatever. */
  318. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  319. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  320. return intel_crtc->active;
  321. } else {
  322. return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
  323. }
  324. }
  325. /* Called from drm generic code, passed a 'crtc', which
  326. * we use as a pipe index
  327. */
  328. static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
  329. {
  330. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  331. unsigned long high_frame;
  332. unsigned long low_frame;
  333. u32 high1, high2, low;
  334. if (!i915_pipe_enabled(dev, pipe)) {
  335. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  336. "pipe %c\n", pipe_name(pipe));
  337. return 0;
  338. }
  339. high_frame = PIPEFRAME(pipe);
  340. low_frame = PIPEFRAMEPIXEL(pipe);
  341. /*
  342. * High & low register fields aren't synchronized, so make sure
  343. * we get a low value that's stable across two reads of the high
  344. * register.
  345. */
  346. do {
  347. high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  348. low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
  349. high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  350. } while (high1 != high2);
  351. high1 >>= PIPE_FRAME_HIGH_SHIFT;
  352. low >>= PIPE_FRAME_LOW_SHIFT;
  353. return (high1 << 8) | low;
  354. }
  355. static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
  356. {
  357. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  358. int reg = PIPE_FRMCOUNT_GM45(pipe);
  359. if (!i915_pipe_enabled(dev, pipe)) {
  360. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  361. "pipe %c\n", pipe_name(pipe));
  362. return 0;
  363. }
  364. return I915_READ(reg);
  365. }
  366. static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
  367. int *vpos, int *hpos)
  368. {
  369. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  370. u32 vbl = 0, position = 0;
  371. int vbl_start, vbl_end, htotal, vtotal;
  372. bool in_vbl = true;
  373. int ret = 0;
  374. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  375. pipe);
  376. if (!i915_pipe_enabled(dev, pipe)) {
  377. DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
  378. "pipe %c\n", pipe_name(pipe));
  379. return 0;
  380. }
  381. /* Get vtotal. */
  382. vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  383. if (INTEL_INFO(dev)->gen >= 4) {
  384. /* No obvious pixelcount register. Only query vertical
  385. * scanout position from Display scan line register.
  386. */
  387. position = I915_READ(PIPEDSL(pipe));
  388. /* Decode into vertical scanout position. Don't have
  389. * horizontal scanout position.
  390. */
  391. *vpos = position & 0x1fff;
  392. *hpos = 0;
  393. } else {
  394. /* Have access to pixelcount since start of frame.
  395. * We can split this into vertical and horizontal
  396. * scanout position.
  397. */
  398. position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
  399. htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  400. *vpos = position / htotal;
  401. *hpos = position - (*vpos * htotal);
  402. }
  403. /* Query vblank area. */
  404. vbl = I915_READ(VBLANK(cpu_transcoder));
  405. /* Test position against vblank region. */
  406. vbl_start = vbl & 0x1fff;
  407. vbl_end = (vbl >> 16) & 0x1fff;
  408. if ((*vpos < vbl_start) || (*vpos > vbl_end))
  409. in_vbl = false;
  410. /* Inside "upper part" of vblank area? Apply corrective offset: */
  411. if (in_vbl && (*vpos >= vbl_start))
  412. *vpos = *vpos - vtotal;
  413. /* Readouts valid? */
  414. if (vbl > 0)
  415. ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
  416. /* In vblank? */
  417. if (in_vbl)
  418. ret |= DRM_SCANOUTPOS_INVBL;
  419. return ret;
  420. }
  421. static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
  422. int *max_error,
  423. struct timeval *vblank_time,
  424. unsigned flags)
  425. {
  426. struct drm_crtc *crtc;
  427. if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
  428. DRM_ERROR("Invalid crtc %d\n", pipe);
  429. return -EINVAL;
  430. }
  431. /* Get drm_crtc to timestamp: */
  432. crtc = intel_get_crtc_for_pipe(dev, pipe);
  433. if (crtc == NULL) {
  434. DRM_ERROR("Invalid crtc %d\n", pipe);
  435. return -EINVAL;
  436. }
  437. if (!crtc->enabled) {
  438. DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
  439. return -EBUSY;
  440. }
  441. /* Helper routine in DRM core does all the work: */
  442. return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
  443. vblank_time, flags,
  444. crtc);
  445. }
  446. static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
  447. {
  448. enum drm_connector_status old_status;
  449. WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
  450. old_status = connector->status;
  451. connector->status = connector->funcs->detect(connector, false);
  452. DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
  453. connector->base.id,
  454. drm_get_connector_name(connector),
  455. old_status, connector->status);
  456. return (old_status != connector->status);
  457. }
  458. /*
  459. * Handle hotplug events outside the interrupt handler proper.
  460. */
  461. #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
  462. static void i915_hotplug_work_func(struct work_struct *work)
  463. {
  464. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  465. hotplug_work);
  466. struct drm_device *dev = dev_priv->dev;
  467. struct drm_mode_config *mode_config = &dev->mode_config;
  468. struct intel_connector *intel_connector;
  469. struct intel_encoder *intel_encoder;
  470. struct drm_connector *connector;
  471. unsigned long irqflags;
  472. bool hpd_disabled = false;
  473. bool changed = false;
  474. u32 hpd_event_bits;
  475. /* HPD irq before everything is fully set up. */
  476. if (!dev_priv->enable_hotplug_processing)
  477. return;
  478. mutex_lock(&mode_config->mutex);
  479. DRM_DEBUG_KMS("running encoder hotplug functions\n");
  480. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  481. hpd_event_bits = dev_priv->hpd_event_bits;
  482. dev_priv->hpd_event_bits = 0;
  483. list_for_each_entry(connector, &mode_config->connector_list, head) {
  484. intel_connector = to_intel_connector(connector);
  485. intel_encoder = intel_connector->encoder;
  486. if (intel_encoder->hpd_pin > HPD_NONE &&
  487. dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
  488. connector->polled == DRM_CONNECTOR_POLL_HPD) {
  489. DRM_INFO("HPD interrupt storm detected on connector %s: "
  490. "switching from hotplug detection to polling\n",
  491. drm_get_connector_name(connector));
  492. dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
  493. connector->polled = DRM_CONNECTOR_POLL_CONNECT
  494. | DRM_CONNECTOR_POLL_DISCONNECT;
  495. hpd_disabled = true;
  496. }
  497. if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
  498. DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
  499. drm_get_connector_name(connector), intel_encoder->hpd_pin);
  500. }
  501. }
  502. /* if there were no outputs to poll, poll was disabled,
  503. * therefore make sure it's enabled when disabling HPD on
  504. * some connectors */
  505. if (hpd_disabled) {
  506. drm_kms_helper_poll_enable(dev);
  507. mod_timer(&dev_priv->hotplug_reenable_timer,
  508. jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
  509. }
  510. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  511. list_for_each_entry(connector, &mode_config->connector_list, head) {
  512. intel_connector = to_intel_connector(connector);
  513. intel_encoder = intel_connector->encoder;
  514. if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
  515. if (intel_encoder->hot_plug)
  516. intel_encoder->hot_plug(intel_encoder);
  517. if (intel_hpd_irq_event(dev, connector))
  518. changed = true;
  519. }
  520. }
  521. mutex_unlock(&mode_config->mutex);
  522. if (changed)
  523. drm_kms_helper_hotplug_event(dev);
  524. }
  525. static void ironlake_handle_rps_change(struct drm_device *dev)
  526. {
  527. drm_i915_private_t *dev_priv = dev->dev_private;
  528. u32 busy_up, busy_down, max_avg, min_avg;
  529. u8 new_delay;
  530. unsigned long flags;
  531. spin_lock_irqsave(&mchdev_lock, flags);
  532. I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  533. new_delay = dev_priv->ips.cur_delay;
  534. I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
  535. busy_up = I915_READ(RCPREVBSYTUPAVG);
  536. busy_down = I915_READ(RCPREVBSYTDNAVG);
  537. max_avg = I915_READ(RCBMAXAVG);
  538. min_avg = I915_READ(RCBMINAVG);
  539. /* Handle RCS change request from hw */
  540. if (busy_up > max_avg) {
  541. if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
  542. new_delay = dev_priv->ips.cur_delay - 1;
  543. if (new_delay < dev_priv->ips.max_delay)
  544. new_delay = dev_priv->ips.max_delay;
  545. } else if (busy_down < min_avg) {
  546. if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
  547. new_delay = dev_priv->ips.cur_delay + 1;
  548. if (new_delay > dev_priv->ips.min_delay)
  549. new_delay = dev_priv->ips.min_delay;
  550. }
  551. if (ironlake_set_drps(dev, new_delay))
  552. dev_priv->ips.cur_delay = new_delay;
  553. spin_unlock_irqrestore(&mchdev_lock, flags);
  554. return;
  555. }
  556. static void notify_ring(struct drm_device *dev,
  557. struct intel_ring_buffer *ring)
  558. {
  559. struct drm_i915_private *dev_priv = dev->dev_private;
  560. if (ring->obj == NULL)
  561. return;
  562. trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
  563. wake_up_all(&ring->irq_queue);
  564. if (i915_enable_hangcheck) {
  565. mod_timer(&dev_priv->gpu_error.hangcheck_timer,
  566. round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  567. }
  568. }
  569. static void gen6_pm_rps_work(struct work_struct *work)
  570. {
  571. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  572. rps.work);
  573. u32 pm_iir, pm_imr;
  574. u8 new_delay;
  575. spin_lock_irq(&dev_priv->rps.lock);
  576. pm_iir = dev_priv->rps.pm_iir;
  577. dev_priv->rps.pm_iir = 0;
  578. pm_imr = I915_READ(GEN6_PMIMR);
  579. /* Make sure not to corrupt PMIMR state used by ringbuffer code */
  580. I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
  581. spin_unlock_irq(&dev_priv->rps.lock);
  582. if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
  583. return;
  584. mutex_lock(&dev_priv->rps.hw_lock);
  585. if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
  586. new_delay = dev_priv->rps.cur_delay + 1;
  587. /*
  588. * For better performance, jump directly
  589. * to RPe if we're below it.
  590. */
  591. if (IS_VALLEYVIEW(dev_priv->dev) &&
  592. dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
  593. new_delay = dev_priv->rps.rpe_delay;
  594. } else
  595. new_delay = dev_priv->rps.cur_delay - 1;
  596. /* sysfs frequency interfaces may have snuck in while servicing the
  597. * interrupt
  598. */
  599. if (new_delay >= dev_priv->rps.min_delay &&
  600. new_delay <= dev_priv->rps.max_delay) {
  601. if (IS_VALLEYVIEW(dev_priv->dev))
  602. valleyview_set_rps(dev_priv->dev, new_delay);
  603. else
  604. gen6_set_rps(dev_priv->dev, new_delay);
  605. }
  606. if (IS_VALLEYVIEW(dev_priv->dev)) {
  607. /*
  608. * On VLV, when we enter RC6 we may not be at the minimum
  609. * voltage level, so arm a timer to check. It should only
  610. * fire when there's activity or once after we've entered
  611. * RC6, and then won't be re-armed until the next RPS interrupt.
  612. */
  613. mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
  614. msecs_to_jiffies(100));
  615. }
  616. mutex_unlock(&dev_priv->rps.hw_lock);
  617. }
  618. /**
  619. * ivybridge_parity_work - Workqueue called when a parity error interrupt
  620. * occurred.
  621. * @work: workqueue struct
  622. *
  623. * Doesn't actually do anything except notify userspace. As a consequence of
  624. * this event, userspace should try to remap the bad rows since statistically
  625. * it is likely the same row is more likely to go bad again.
  626. */
  627. static void ivybridge_parity_work(struct work_struct *work)
  628. {
  629. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  630. l3_parity.error_work);
  631. u32 error_status, row, bank, subbank;
  632. char *parity_event[5];
  633. uint32_t misccpctl;
  634. unsigned long flags;
  635. /* We must turn off DOP level clock gating to access the L3 registers.
  636. * In order to prevent a get/put style interface, acquire struct mutex
  637. * any time we access those registers.
  638. */
  639. mutex_lock(&dev_priv->dev->struct_mutex);
  640. misccpctl = I915_READ(GEN7_MISCCPCTL);
  641. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  642. POSTING_READ(GEN7_MISCCPCTL);
  643. error_status = I915_READ(GEN7_L3CDERRST1);
  644. row = GEN7_PARITY_ERROR_ROW(error_status);
  645. bank = GEN7_PARITY_ERROR_BANK(error_status);
  646. subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
  647. I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
  648. GEN7_L3CDERRST1_ENABLE);
  649. POSTING_READ(GEN7_L3CDERRST1);
  650. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  651. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  652. dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  653. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  654. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  655. mutex_unlock(&dev_priv->dev->struct_mutex);
  656. parity_event[0] = "L3_PARITY_ERROR=1";
  657. parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
  658. parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
  659. parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
  660. parity_event[4] = NULL;
  661. kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
  662. KOBJ_CHANGE, parity_event);
  663. DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
  664. row, bank, subbank);
  665. kfree(parity_event[3]);
  666. kfree(parity_event[2]);
  667. kfree(parity_event[1]);
  668. }
  669. static void ivybridge_handle_parity_error(struct drm_device *dev)
  670. {
  671. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  672. unsigned long flags;
  673. if (!HAS_L3_GPU_CACHE(dev))
  674. return;
  675. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  676. dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  677. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  678. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  679. queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
  680. }
  681. static void snb_gt_irq_handler(struct drm_device *dev,
  682. struct drm_i915_private *dev_priv,
  683. u32 gt_iir)
  684. {
  685. if (gt_iir &
  686. (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
  687. notify_ring(dev, &dev_priv->ring[RCS]);
  688. if (gt_iir & GT_BSD_USER_INTERRUPT)
  689. notify_ring(dev, &dev_priv->ring[VCS]);
  690. if (gt_iir & GT_BLT_USER_INTERRUPT)
  691. notify_ring(dev, &dev_priv->ring[BCS]);
  692. if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
  693. GT_BSD_CS_ERROR_INTERRUPT |
  694. GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
  695. DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
  696. i915_handle_error(dev, false);
  697. }
  698. if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
  699. ivybridge_handle_parity_error(dev);
  700. }
  701. /* Legacy way of handling PM interrupts */
  702. static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
  703. u32 pm_iir)
  704. {
  705. unsigned long flags;
  706. /*
  707. * IIR bits should never already be set because IMR should
  708. * prevent an interrupt from being shown in IIR. The warning
  709. * displays a case where we've unsafely cleared
  710. * dev_priv->rps.pm_iir. Although missing an interrupt of the same
  711. * type is not a problem, it displays a problem in the logic.
  712. *
  713. * The mask bit in IMR is cleared by dev_priv->rps.work.
  714. */
  715. spin_lock_irqsave(&dev_priv->rps.lock, flags);
  716. dev_priv->rps.pm_iir |= pm_iir;
  717. I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
  718. POSTING_READ(GEN6_PMIMR);
  719. spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
  720. queue_work(dev_priv->wq, &dev_priv->rps.work);
  721. }
  722. #define HPD_STORM_DETECT_PERIOD 1000
  723. #define HPD_STORM_THRESHOLD 5
  724. static inline void intel_hpd_irq_handler(struct drm_device *dev,
  725. u32 hotplug_trigger,
  726. const u32 *hpd)
  727. {
  728. drm_i915_private_t *dev_priv = dev->dev_private;
  729. int i;
  730. bool storm_detected = false;
  731. if (!hotplug_trigger)
  732. return;
  733. spin_lock(&dev_priv->irq_lock);
  734. for (i = 1; i < HPD_NUM_PINS; i++) {
  735. if (!(hpd[i] & hotplug_trigger) ||
  736. dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
  737. continue;
  738. dev_priv->hpd_event_bits |= (1 << i);
  739. if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
  740. dev_priv->hpd_stats[i].hpd_last_jiffies
  741. + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
  742. dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
  743. dev_priv->hpd_stats[i].hpd_cnt = 0;
  744. } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
  745. dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
  746. dev_priv->hpd_event_bits &= ~(1 << i);
  747. DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
  748. storm_detected = true;
  749. } else {
  750. dev_priv->hpd_stats[i].hpd_cnt++;
  751. }
  752. }
  753. if (storm_detected)
  754. dev_priv->display.hpd_irq_setup(dev);
  755. spin_unlock(&dev_priv->irq_lock);
  756. queue_work(dev_priv->wq,
  757. &dev_priv->hotplug_work);
  758. }
  759. static void gmbus_irq_handler(struct drm_device *dev)
  760. {
  761. struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
  762. wake_up_all(&dev_priv->gmbus_wait_queue);
  763. }
  764. static void dp_aux_irq_handler(struct drm_device *dev)
  765. {
  766. struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
  767. wake_up_all(&dev_priv->gmbus_wait_queue);
  768. }
  769. /* Unlike gen6_queue_rps_work() from which this function is originally derived,
  770. * we must be able to deal with other PM interrupts. This is complicated because
  771. * of the way in which we use the masks to defer the RPS work (which for
  772. * posterity is necessary because of forcewake).
  773. */
  774. static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
  775. u32 pm_iir)
  776. {
  777. unsigned long flags;
  778. spin_lock_irqsave(&dev_priv->rps.lock, flags);
  779. dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
  780. if (dev_priv->rps.pm_iir) {
  781. I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
  782. /* never want to mask useful interrupts. (also posting read) */
  783. WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
  784. /* TODO: if queue_work is slow, move it out of the spinlock */
  785. queue_work(dev_priv->wq, &dev_priv->rps.work);
  786. }
  787. spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
  788. if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
  789. if (pm_iir & PM_VEBOX_USER_INTERRUPT)
  790. notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
  791. if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
  792. DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
  793. i915_handle_error(dev_priv->dev, false);
  794. }
  795. }
  796. }
  797. static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  798. {
  799. struct drm_device *dev = (struct drm_device *) arg;
  800. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  801. u32 iir, gt_iir, pm_iir;
  802. irqreturn_t ret = IRQ_NONE;
  803. unsigned long irqflags;
  804. int pipe;
  805. u32 pipe_stats[I915_MAX_PIPES];
  806. atomic_inc(&dev_priv->irq_received);
  807. while (true) {
  808. iir = I915_READ(VLV_IIR);
  809. gt_iir = I915_READ(GTIIR);
  810. pm_iir = I915_READ(GEN6_PMIIR);
  811. if (gt_iir == 0 && pm_iir == 0 && iir == 0)
  812. goto out;
  813. ret = IRQ_HANDLED;
  814. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  815. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  816. for_each_pipe(pipe) {
  817. int reg = PIPESTAT(pipe);
  818. pipe_stats[pipe] = I915_READ(reg);
  819. /*
  820. * Clear the PIPE*STAT regs before the IIR
  821. */
  822. if (pipe_stats[pipe] & 0x8000ffff) {
  823. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  824. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  825. pipe_name(pipe));
  826. I915_WRITE(reg, pipe_stats[pipe]);
  827. }
  828. }
  829. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  830. for_each_pipe(pipe) {
  831. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
  832. drm_handle_vblank(dev, pipe);
  833. if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
  834. intel_prepare_page_flip(dev, pipe);
  835. intel_finish_page_flip(dev, pipe);
  836. }
  837. }
  838. /* Consume port. Then clear IIR or we'll miss events */
  839. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  840. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  841. u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  842. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  843. hotplug_status);
  844. intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
  845. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  846. I915_READ(PORT_HOTPLUG_STAT);
  847. }
  848. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  849. gmbus_irq_handler(dev);
  850. if (pm_iir & GEN6_PM_RPS_EVENTS)
  851. gen6_queue_rps_work(dev_priv, pm_iir);
  852. I915_WRITE(GTIIR, gt_iir);
  853. I915_WRITE(GEN6_PMIIR, pm_iir);
  854. I915_WRITE(VLV_IIR, iir);
  855. }
  856. out:
  857. return ret;
  858. }
  859. static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
  860. {
  861. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  862. int pipe;
  863. u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
  864. intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
  865. if (pch_iir & SDE_AUDIO_POWER_MASK) {
  866. int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
  867. SDE_AUDIO_POWER_SHIFT);
  868. DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  869. port_name(port));
  870. }
  871. if (pch_iir & SDE_AUX_MASK)
  872. dp_aux_irq_handler(dev);
  873. if (pch_iir & SDE_GMBUS)
  874. gmbus_irq_handler(dev);
  875. if (pch_iir & SDE_AUDIO_HDCP_MASK)
  876. DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
  877. if (pch_iir & SDE_AUDIO_TRANS_MASK)
  878. DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
  879. if (pch_iir & SDE_POISON)
  880. DRM_ERROR("PCH poison interrupt\n");
  881. if (pch_iir & SDE_FDI_MASK)
  882. for_each_pipe(pipe)
  883. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  884. pipe_name(pipe),
  885. I915_READ(FDI_RX_IIR(pipe)));
  886. if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
  887. DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
  888. if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
  889. DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
  890. if (pch_iir & SDE_TRANSA_FIFO_UNDER)
  891. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
  892. false))
  893. DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
  894. if (pch_iir & SDE_TRANSB_FIFO_UNDER)
  895. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
  896. false))
  897. DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
  898. }
  899. static void ivb_err_int_handler(struct drm_device *dev)
  900. {
  901. struct drm_i915_private *dev_priv = dev->dev_private;
  902. u32 err_int = I915_READ(GEN7_ERR_INT);
  903. if (err_int & ERR_INT_POISON)
  904. DRM_ERROR("Poison interrupt\n");
  905. if (err_int & ERR_INT_FIFO_UNDERRUN_A)
  906. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
  907. DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
  908. if (err_int & ERR_INT_FIFO_UNDERRUN_B)
  909. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
  910. DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
  911. if (err_int & ERR_INT_FIFO_UNDERRUN_C)
  912. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
  913. DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
  914. I915_WRITE(GEN7_ERR_INT, err_int);
  915. }
  916. static void cpt_serr_int_handler(struct drm_device *dev)
  917. {
  918. struct drm_i915_private *dev_priv = dev->dev_private;
  919. u32 serr_int = I915_READ(SERR_INT);
  920. if (serr_int & SERR_INT_POISON)
  921. DRM_ERROR("PCH poison interrupt\n");
  922. if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
  923. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
  924. false))
  925. DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
  926. if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
  927. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
  928. false))
  929. DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
  930. if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
  931. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
  932. false))
  933. DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
  934. I915_WRITE(SERR_INT, serr_int);
  935. }
  936. static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
  937. {
  938. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  939. int pipe;
  940. u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
  941. intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
  942. if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
  943. int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
  944. SDE_AUDIO_POWER_SHIFT_CPT);
  945. DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
  946. port_name(port));
  947. }
  948. if (pch_iir & SDE_AUX_MASK_CPT)
  949. dp_aux_irq_handler(dev);
  950. if (pch_iir & SDE_GMBUS_CPT)
  951. gmbus_irq_handler(dev);
  952. if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
  953. DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
  954. if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
  955. DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
  956. if (pch_iir & SDE_FDI_MASK_CPT)
  957. for_each_pipe(pipe)
  958. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  959. pipe_name(pipe),
  960. I915_READ(FDI_RX_IIR(pipe)));
  961. if (pch_iir & SDE_ERROR_CPT)
  962. cpt_serr_int_handler(dev);
  963. }
  964. static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
  965. {
  966. struct drm_device *dev = (struct drm_device *) arg;
  967. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  968. u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
  969. irqreturn_t ret = IRQ_NONE;
  970. int i;
  971. atomic_inc(&dev_priv->irq_received);
  972. /* We get interrupts on unclaimed registers, so check for this before we
  973. * do any I915_{READ,WRITE}. */
  974. if (IS_HASWELL(dev) &&
  975. (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  976. DRM_ERROR("Unclaimed register before interrupt\n");
  977. I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  978. }
  979. /* disable master interrupt before clearing iir */
  980. de_ier = I915_READ(DEIER);
  981. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  982. /* Disable south interrupts. We'll only write to SDEIIR once, so further
  983. * interrupts will will be stored on its back queue, and then we'll be
  984. * able to process them after we restore SDEIER (as soon as we restore
  985. * it, we'll get an interrupt if SDEIIR still has something to process
  986. * due to its back queue). */
  987. if (!HAS_PCH_NOP(dev)) {
  988. sde_ier = I915_READ(SDEIER);
  989. I915_WRITE(SDEIER, 0);
  990. POSTING_READ(SDEIER);
  991. }
  992. /* On Haswell, also mask ERR_INT because we don't want to risk
  993. * generating "unclaimed register" interrupts from inside the interrupt
  994. * handler. */
  995. if (IS_HASWELL(dev)) {
  996. spin_lock(&dev_priv->irq_lock);
  997. ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
  998. spin_unlock(&dev_priv->irq_lock);
  999. }
  1000. gt_iir = I915_READ(GTIIR);
  1001. if (gt_iir) {
  1002. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  1003. I915_WRITE(GTIIR, gt_iir);
  1004. ret = IRQ_HANDLED;
  1005. }
  1006. de_iir = I915_READ(DEIIR);
  1007. if (de_iir) {
  1008. if (de_iir & DE_ERR_INT_IVB)
  1009. ivb_err_int_handler(dev);
  1010. if (de_iir & DE_AUX_CHANNEL_A_IVB)
  1011. dp_aux_irq_handler(dev);
  1012. if (de_iir & DE_GSE_IVB)
  1013. intel_opregion_asle_intr(dev);
  1014. for (i = 0; i < 3; i++) {
  1015. if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
  1016. drm_handle_vblank(dev, i);
  1017. if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
  1018. intel_prepare_page_flip(dev, i);
  1019. intel_finish_page_flip_plane(dev, i);
  1020. }
  1021. }
  1022. /* check event from PCH */
  1023. if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
  1024. u32 pch_iir = I915_READ(SDEIIR);
  1025. cpt_irq_handler(dev, pch_iir);
  1026. /* clear PCH hotplug event before clear CPU irq */
  1027. I915_WRITE(SDEIIR, pch_iir);
  1028. }
  1029. I915_WRITE(DEIIR, de_iir);
  1030. ret = IRQ_HANDLED;
  1031. }
  1032. pm_iir = I915_READ(GEN6_PMIIR);
  1033. if (pm_iir) {
  1034. if (IS_HASWELL(dev))
  1035. hsw_pm_irq_handler(dev_priv, pm_iir);
  1036. else if (pm_iir & GEN6_PM_RPS_EVENTS)
  1037. gen6_queue_rps_work(dev_priv, pm_iir);
  1038. I915_WRITE(GEN6_PMIIR, pm_iir);
  1039. ret = IRQ_HANDLED;
  1040. }
  1041. if (IS_HASWELL(dev)) {
  1042. spin_lock(&dev_priv->irq_lock);
  1043. if (ivb_can_enable_err_int(dev))
  1044. ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
  1045. spin_unlock(&dev_priv->irq_lock);
  1046. }
  1047. I915_WRITE(DEIER, de_ier);
  1048. POSTING_READ(DEIER);
  1049. if (!HAS_PCH_NOP(dev)) {
  1050. I915_WRITE(SDEIER, sde_ier);
  1051. POSTING_READ(SDEIER);
  1052. }
  1053. return ret;
  1054. }
  1055. static void ilk_gt_irq_handler(struct drm_device *dev,
  1056. struct drm_i915_private *dev_priv,
  1057. u32 gt_iir)
  1058. {
  1059. if (gt_iir &
  1060. (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
  1061. notify_ring(dev, &dev_priv->ring[RCS]);
  1062. if (gt_iir & ILK_BSD_USER_INTERRUPT)
  1063. notify_ring(dev, &dev_priv->ring[VCS]);
  1064. }
  1065. static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  1066. {
  1067. struct drm_device *dev = (struct drm_device *) arg;
  1068. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1069. int ret = IRQ_NONE;
  1070. u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
  1071. atomic_inc(&dev_priv->irq_received);
  1072. /* disable master interrupt before clearing iir */
  1073. de_ier = I915_READ(DEIER);
  1074. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  1075. POSTING_READ(DEIER);
  1076. /* Disable south interrupts. We'll only write to SDEIIR once, so further
  1077. * interrupts will will be stored on its back queue, and then we'll be
  1078. * able to process them after we restore SDEIER (as soon as we restore
  1079. * it, we'll get an interrupt if SDEIIR still has something to process
  1080. * due to its back queue). */
  1081. sde_ier = I915_READ(SDEIER);
  1082. I915_WRITE(SDEIER, 0);
  1083. POSTING_READ(SDEIER);
  1084. de_iir = I915_READ(DEIIR);
  1085. gt_iir = I915_READ(GTIIR);
  1086. pm_iir = I915_READ(GEN6_PMIIR);
  1087. if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
  1088. goto done;
  1089. ret = IRQ_HANDLED;
  1090. if (IS_GEN5(dev))
  1091. ilk_gt_irq_handler(dev, dev_priv, gt_iir);
  1092. else
  1093. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  1094. if (de_iir & DE_AUX_CHANNEL_A)
  1095. dp_aux_irq_handler(dev);
  1096. if (de_iir & DE_GSE)
  1097. intel_opregion_asle_intr(dev);
  1098. if (de_iir & DE_PIPEA_VBLANK)
  1099. drm_handle_vblank(dev, 0);
  1100. if (de_iir & DE_PIPEB_VBLANK)
  1101. drm_handle_vblank(dev, 1);
  1102. if (de_iir & DE_POISON)
  1103. DRM_ERROR("Poison interrupt\n");
  1104. if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
  1105. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
  1106. DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
  1107. if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
  1108. if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
  1109. DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
  1110. if (de_iir & DE_PLANEA_FLIP_DONE) {
  1111. intel_prepare_page_flip(dev, 0);
  1112. intel_finish_page_flip_plane(dev, 0);
  1113. }
  1114. if (de_iir & DE_PLANEB_FLIP_DONE) {
  1115. intel_prepare_page_flip(dev, 1);
  1116. intel_finish_page_flip_plane(dev, 1);
  1117. }
  1118. /* check event from PCH */
  1119. if (de_iir & DE_PCH_EVENT) {
  1120. u32 pch_iir = I915_READ(SDEIIR);
  1121. if (HAS_PCH_CPT(dev))
  1122. cpt_irq_handler(dev, pch_iir);
  1123. else
  1124. ibx_irq_handler(dev, pch_iir);
  1125. /* should clear PCH hotplug event before clear CPU irq */
  1126. I915_WRITE(SDEIIR, pch_iir);
  1127. }
  1128. if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
  1129. ironlake_handle_rps_change(dev);
  1130. if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
  1131. gen6_queue_rps_work(dev_priv, pm_iir);
  1132. I915_WRITE(GTIIR, gt_iir);
  1133. I915_WRITE(DEIIR, de_iir);
  1134. I915_WRITE(GEN6_PMIIR, pm_iir);
  1135. done:
  1136. I915_WRITE(DEIER, de_ier);
  1137. POSTING_READ(DEIER);
  1138. I915_WRITE(SDEIER, sde_ier);
  1139. POSTING_READ(SDEIER);
  1140. return ret;
  1141. }
  1142. /**
  1143. * i915_error_work_func - do process context error handling work
  1144. * @work: work struct
  1145. *
  1146. * Fire an error uevent so userspace can see that a hang or error
  1147. * was detected.
  1148. */
  1149. static void i915_error_work_func(struct work_struct *work)
  1150. {
  1151. struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
  1152. work);
  1153. drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
  1154. gpu_error);
  1155. struct drm_device *dev = dev_priv->dev;
  1156. struct intel_ring_buffer *ring;
  1157. char *error_event[] = { "ERROR=1", NULL };
  1158. char *reset_event[] = { "RESET=1", NULL };
  1159. char *reset_done_event[] = { "ERROR=0", NULL };
  1160. int i, ret;
  1161. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
  1162. /*
  1163. * Note that there's only one work item which does gpu resets, so we
  1164. * need not worry about concurrent gpu resets potentially incrementing
  1165. * error->reset_counter twice. We only need to take care of another
  1166. * racing irq/hangcheck declaring the gpu dead for a second time. A
  1167. * quick check for that is good enough: schedule_work ensures the
  1168. * correct ordering between hang detection and this work item, and since
  1169. * the reset in-progress bit is only ever set by code outside of this
  1170. * work we don't need to worry about any other races.
  1171. */
  1172. if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
  1173. DRM_DEBUG_DRIVER("resetting chip\n");
  1174. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
  1175. reset_event);
  1176. ret = i915_reset(dev);
  1177. if (ret == 0) {
  1178. /*
  1179. * After all the gem state is reset, increment the reset
  1180. * counter and wake up everyone waiting for the reset to
  1181. * complete.
  1182. *
  1183. * Since unlock operations are a one-sided barrier only,
  1184. * we need to insert a barrier here to order any seqno
  1185. * updates before
  1186. * the counter increment.
  1187. */
  1188. smp_mb__before_atomic_inc();
  1189. atomic_inc(&dev_priv->gpu_error.reset_counter);
  1190. kobject_uevent_env(&dev->primary->kdev.kobj,
  1191. KOBJ_CHANGE, reset_done_event);
  1192. } else {
  1193. atomic_set(&error->reset_counter, I915_WEDGED);
  1194. }
  1195. for_each_ring(ring, dev_priv, i)
  1196. wake_up_all(&ring->irq_queue);
  1197. intel_display_handle_reset(dev);
  1198. wake_up_all(&dev_priv->gpu_error.reset_queue);
  1199. }
  1200. }
  1201. /* NB: please notice the memset */
  1202. static void i915_get_extra_instdone(struct drm_device *dev,
  1203. uint32_t *instdone)
  1204. {
  1205. struct drm_i915_private *dev_priv = dev->dev_private;
  1206. memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
  1207. switch(INTEL_INFO(dev)->gen) {
  1208. case 2:
  1209. case 3:
  1210. instdone[0] = I915_READ(INSTDONE);
  1211. break;
  1212. case 4:
  1213. case 5:
  1214. case 6:
  1215. instdone[0] = I915_READ(INSTDONE_I965);
  1216. instdone[1] = I915_READ(INSTDONE1);
  1217. break;
  1218. default:
  1219. WARN_ONCE(1, "Unsupported platform\n");
  1220. case 7:
  1221. instdone[0] = I915_READ(GEN7_INSTDONE_1);
  1222. instdone[1] = I915_READ(GEN7_SC_INSTDONE);
  1223. instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
  1224. instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
  1225. break;
  1226. }
  1227. }
  1228. #ifdef CONFIG_DEBUG_FS
  1229. static struct drm_i915_error_object *
  1230. i915_error_object_create_sized(struct drm_i915_private *dev_priv,
  1231. struct drm_i915_gem_object *src,
  1232. const int num_pages)
  1233. {
  1234. struct drm_i915_error_object *dst;
  1235. int i;
  1236. u32 reloc_offset;
  1237. if (src == NULL || src->pages == NULL)
  1238. return NULL;
  1239. dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
  1240. if (dst == NULL)
  1241. return NULL;
  1242. reloc_offset = src->gtt_offset;
  1243. for (i = 0; i < num_pages; i++) {
  1244. unsigned long flags;
  1245. void *d;
  1246. d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
  1247. if (d == NULL)
  1248. goto unwind;
  1249. local_irq_save(flags);
  1250. if (reloc_offset < dev_priv->gtt.mappable_end &&
  1251. src->has_global_gtt_mapping) {
  1252. void __iomem *s;
  1253. /* Simply ignore tiling or any overlapping fence.
  1254. * It's part of the error state, and this hopefully
  1255. * captures what the GPU read.
  1256. */
  1257. s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
  1258. reloc_offset);
  1259. memcpy_fromio(d, s, PAGE_SIZE);
  1260. io_mapping_unmap_atomic(s);
  1261. } else if (src->stolen) {
  1262. unsigned long offset;
  1263. offset = dev_priv->mm.stolen_base;
  1264. offset += src->stolen->start;
  1265. offset += i << PAGE_SHIFT;
  1266. memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
  1267. } else {
  1268. struct page *page;
  1269. void *s;
  1270. page = i915_gem_object_get_page(src, i);
  1271. drm_clflush_pages(&page, 1);
  1272. s = kmap_atomic(page);
  1273. memcpy(d, s, PAGE_SIZE);
  1274. kunmap_atomic(s);
  1275. drm_clflush_pages(&page, 1);
  1276. }
  1277. local_irq_restore(flags);
  1278. dst->pages[i] = d;
  1279. reloc_offset += PAGE_SIZE;
  1280. }
  1281. dst->page_count = num_pages;
  1282. dst->gtt_offset = src->gtt_offset;
  1283. return dst;
  1284. unwind:
  1285. while (i--)
  1286. kfree(dst->pages[i]);
  1287. kfree(dst);
  1288. return NULL;
  1289. }
  1290. #define i915_error_object_create(dev_priv, src) \
  1291. i915_error_object_create_sized((dev_priv), (src), \
  1292. (src)->base.size>>PAGE_SHIFT)
  1293. static void
  1294. i915_error_object_free(struct drm_i915_error_object *obj)
  1295. {
  1296. int page;
  1297. if (obj == NULL)
  1298. return;
  1299. for (page = 0; page < obj->page_count; page++)
  1300. kfree(obj->pages[page]);
  1301. kfree(obj);
  1302. }
  1303. void
  1304. i915_error_state_free(struct kref *error_ref)
  1305. {
  1306. struct drm_i915_error_state *error = container_of(error_ref,
  1307. typeof(*error), ref);
  1308. int i;
  1309. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  1310. i915_error_object_free(error->ring[i].batchbuffer);
  1311. i915_error_object_free(error->ring[i].ringbuffer);
  1312. i915_error_object_free(error->ring[i].ctx);
  1313. kfree(error->ring[i].requests);
  1314. }
  1315. kfree(error->active_bo);
  1316. kfree(error->overlay);
  1317. kfree(error->display);
  1318. kfree(error);
  1319. }
  1320. static void capture_bo(struct drm_i915_error_buffer *err,
  1321. struct drm_i915_gem_object *obj)
  1322. {
  1323. err->size = obj->base.size;
  1324. err->name = obj->base.name;
  1325. err->rseqno = obj->last_read_seqno;
  1326. err->wseqno = obj->last_write_seqno;
  1327. err->gtt_offset = obj->gtt_offset;
  1328. err->read_domains = obj->base.read_domains;
  1329. err->write_domain = obj->base.write_domain;
  1330. err->fence_reg = obj->fence_reg;
  1331. err->pinned = 0;
  1332. if (obj->pin_count > 0)
  1333. err->pinned = 1;
  1334. if (obj->user_pin_count > 0)
  1335. err->pinned = -1;
  1336. err->tiling = obj->tiling_mode;
  1337. err->dirty = obj->dirty;
  1338. err->purgeable = obj->madv != I915_MADV_WILLNEED;
  1339. err->ring = obj->ring ? obj->ring->id : -1;
  1340. err->cache_level = obj->cache_level;
  1341. }
  1342. static u32 capture_active_bo(struct drm_i915_error_buffer *err,
  1343. int count, struct list_head *head)
  1344. {
  1345. struct drm_i915_gem_object *obj;
  1346. int i = 0;
  1347. list_for_each_entry(obj, head, mm_list) {
  1348. capture_bo(err++, obj);
  1349. if (++i == count)
  1350. break;
  1351. }
  1352. return i;
  1353. }
  1354. static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
  1355. int count, struct list_head *head)
  1356. {
  1357. struct drm_i915_gem_object *obj;
  1358. int i = 0;
  1359. list_for_each_entry(obj, head, global_list) {
  1360. if (obj->pin_count == 0)
  1361. continue;
  1362. capture_bo(err++, obj);
  1363. if (++i == count)
  1364. break;
  1365. }
  1366. return i;
  1367. }
  1368. static void i915_gem_record_fences(struct drm_device *dev,
  1369. struct drm_i915_error_state *error)
  1370. {
  1371. struct drm_i915_private *dev_priv = dev->dev_private;
  1372. int i;
  1373. /* Fences */
  1374. switch (INTEL_INFO(dev)->gen) {
  1375. case 7:
  1376. case 6:
  1377. for (i = 0; i < dev_priv->num_fence_regs; i++)
  1378. error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
  1379. break;
  1380. case 5:
  1381. case 4:
  1382. for (i = 0; i < 16; i++)
  1383. error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
  1384. break;
  1385. case 3:
  1386. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  1387. for (i = 0; i < 8; i++)
  1388. error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
  1389. case 2:
  1390. for (i = 0; i < 8; i++)
  1391. error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
  1392. break;
  1393. default:
  1394. BUG();
  1395. }
  1396. }
  1397. static struct drm_i915_error_object *
  1398. i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
  1399. struct intel_ring_buffer *ring)
  1400. {
  1401. struct drm_i915_gem_object *obj;
  1402. u32 seqno;
  1403. if (!ring->get_seqno)
  1404. return NULL;
  1405. if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
  1406. u32 acthd = I915_READ(ACTHD);
  1407. if (WARN_ON(ring->id != RCS))
  1408. return NULL;
  1409. obj = ring->private;
  1410. if (acthd >= obj->gtt_offset &&
  1411. acthd < obj->gtt_offset + obj->base.size)
  1412. return i915_error_object_create(dev_priv, obj);
  1413. }
  1414. seqno = ring->get_seqno(ring, false);
  1415. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
  1416. if (obj->ring != ring)
  1417. continue;
  1418. if (i915_seqno_passed(seqno, obj->last_read_seqno))
  1419. continue;
  1420. if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
  1421. continue;
  1422. /* We need to copy these to an anonymous buffer as the simplest
  1423. * method to avoid being overwritten by userspace.
  1424. */
  1425. return i915_error_object_create(dev_priv, obj);
  1426. }
  1427. return NULL;
  1428. }
  1429. static void i915_record_ring_state(struct drm_device *dev,
  1430. struct drm_i915_error_state *error,
  1431. struct intel_ring_buffer *ring)
  1432. {
  1433. struct drm_i915_private *dev_priv = dev->dev_private;
  1434. if (INTEL_INFO(dev)->gen >= 6) {
  1435. error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
  1436. error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
  1437. error->semaphore_mboxes[ring->id][0]
  1438. = I915_READ(RING_SYNC_0(ring->mmio_base));
  1439. error->semaphore_mboxes[ring->id][1]
  1440. = I915_READ(RING_SYNC_1(ring->mmio_base));
  1441. error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
  1442. error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
  1443. }
  1444. if (INTEL_INFO(dev)->gen >= 4) {
  1445. error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
  1446. error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
  1447. error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
  1448. error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
  1449. error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
  1450. if (ring->id == RCS)
  1451. error->bbaddr = I915_READ64(BB_ADDR);
  1452. } else {
  1453. error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
  1454. error->ipeir[ring->id] = I915_READ(IPEIR);
  1455. error->ipehr[ring->id] = I915_READ(IPEHR);
  1456. error->instdone[ring->id] = I915_READ(INSTDONE);
  1457. }
  1458. error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
  1459. error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
  1460. error->seqno[ring->id] = ring->get_seqno(ring, false);
  1461. error->acthd[ring->id] = intel_ring_get_active_head(ring);
  1462. error->head[ring->id] = I915_READ_HEAD(ring);
  1463. error->tail[ring->id] = I915_READ_TAIL(ring);
  1464. error->ctl[ring->id] = I915_READ_CTL(ring);
  1465. error->cpu_ring_head[ring->id] = ring->head;
  1466. error->cpu_ring_tail[ring->id] = ring->tail;
  1467. }
  1468. static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
  1469. struct drm_i915_error_state *error,
  1470. struct drm_i915_error_ring *ering)
  1471. {
  1472. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1473. struct drm_i915_gem_object *obj;
  1474. /* Currently render ring is the only HW context user */
  1475. if (ring->id != RCS || !error->ccid)
  1476. return;
  1477. list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
  1478. if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
  1479. ering->ctx = i915_error_object_create_sized(dev_priv,
  1480. obj, 1);
  1481. break;
  1482. }
  1483. }
  1484. }
  1485. static void i915_gem_record_rings(struct drm_device *dev,
  1486. struct drm_i915_error_state *error)
  1487. {
  1488. struct drm_i915_private *dev_priv = dev->dev_private;
  1489. struct intel_ring_buffer *ring;
  1490. struct drm_i915_gem_request *request;
  1491. int i, count;
  1492. for_each_ring(ring, dev_priv, i) {
  1493. i915_record_ring_state(dev, error, ring);
  1494. error->ring[i].batchbuffer =
  1495. i915_error_first_batchbuffer(dev_priv, ring);
  1496. error->ring[i].ringbuffer =
  1497. i915_error_object_create(dev_priv, ring->obj);
  1498. i915_gem_record_active_context(ring, error, &error->ring[i]);
  1499. count = 0;
  1500. list_for_each_entry(request, &ring->request_list, list)
  1501. count++;
  1502. error->ring[i].num_requests = count;
  1503. error->ring[i].requests =
  1504. kmalloc(count*sizeof(struct drm_i915_error_request),
  1505. GFP_ATOMIC);
  1506. if (error->ring[i].requests == NULL) {
  1507. error->ring[i].num_requests = 0;
  1508. continue;
  1509. }
  1510. count = 0;
  1511. list_for_each_entry(request, &ring->request_list, list) {
  1512. struct drm_i915_error_request *erq;
  1513. erq = &error->ring[i].requests[count++];
  1514. erq->seqno = request->seqno;
  1515. erq->jiffies = request->emitted_jiffies;
  1516. erq->tail = request->tail;
  1517. }
  1518. }
  1519. }
  1520. /**
  1521. * i915_capture_error_state - capture an error record for later analysis
  1522. * @dev: drm device
  1523. *
  1524. * Should be called when an error is detected (either a hang or an error
  1525. * interrupt) to capture error state from the time of the error. Fills
  1526. * out a structure which becomes available in debugfs for user level tools
  1527. * to pick up.
  1528. */
  1529. static void i915_capture_error_state(struct drm_device *dev)
  1530. {
  1531. struct drm_i915_private *dev_priv = dev->dev_private;
  1532. struct drm_i915_gem_object *obj;
  1533. struct drm_i915_error_state *error;
  1534. unsigned long flags;
  1535. int i, pipe;
  1536. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1537. error = dev_priv->gpu_error.first_error;
  1538. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1539. if (error)
  1540. return;
  1541. /* Account for pipe specific data like PIPE*STAT */
  1542. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  1543. if (!error) {
  1544. DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
  1545. return;
  1546. }
  1547. DRM_INFO("capturing error event; look for more information in "
  1548. "/sys/kernel/debug/dri/%d/i915_error_state\n",
  1549. dev->primary->index);
  1550. kref_init(&error->ref);
  1551. error->eir = I915_READ(EIR);
  1552. error->pgtbl_er = I915_READ(PGTBL_ER);
  1553. if (HAS_HW_CONTEXTS(dev))
  1554. error->ccid = I915_READ(CCID);
  1555. if (HAS_PCH_SPLIT(dev))
  1556. error->ier = I915_READ(DEIER) | I915_READ(GTIER);
  1557. else if (IS_VALLEYVIEW(dev))
  1558. error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
  1559. else if (IS_GEN2(dev))
  1560. error->ier = I915_READ16(IER);
  1561. else
  1562. error->ier = I915_READ(IER);
  1563. if (INTEL_INFO(dev)->gen >= 6)
  1564. error->derrmr = I915_READ(DERRMR);
  1565. if (IS_VALLEYVIEW(dev))
  1566. error->forcewake = I915_READ(FORCEWAKE_VLV);
  1567. else if (INTEL_INFO(dev)->gen >= 7)
  1568. error->forcewake = I915_READ(FORCEWAKE_MT);
  1569. else if (INTEL_INFO(dev)->gen == 6)
  1570. error->forcewake = I915_READ(FORCEWAKE);
  1571. if (!HAS_PCH_SPLIT(dev))
  1572. for_each_pipe(pipe)
  1573. error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
  1574. if (INTEL_INFO(dev)->gen >= 6) {
  1575. error->error = I915_READ(ERROR_GEN6);
  1576. error->done_reg = I915_READ(DONE_REG);
  1577. }
  1578. if (INTEL_INFO(dev)->gen == 7)
  1579. error->err_int = I915_READ(GEN7_ERR_INT);
  1580. i915_get_extra_instdone(dev, error->extra_instdone);
  1581. i915_gem_record_fences(dev, error);
  1582. i915_gem_record_rings(dev, error);
  1583. /* Record buffers on the active and pinned lists. */
  1584. error->active_bo = NULL;
  1585. error->pinned_bo = NULL;
  1586. i = 0;
  1587. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
  1588. i++;
  1589. error->active_bo_count = i;
  1590. list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
  1591. if (obj->pin_count)
  1592. i++;
  1593. error->pinned_bo_count = i - error->active_bo_count;
  1594. error->active_bo = NULL;
  1595. error->pinned_bo = NULL;
  1596. if (i) {
  1597. error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
  1598. GFP_ATOMIC);
  1599. if (error->active_bo)
  1600. error->pinned_bo =
  1601. error->active_bo + error->active_bo_count;
  1602. }
  1603. if (error->active_bo)
  1604. error->active_bo_count =
  1605. capture_active_bo(error->active_bo,
  1606. error->active_bo_count,
  1607. &dev_priv->mm.active_list);
  1608. if (error->pinned_bo)
  1609. error->pinned_bo_count =
  1610. capture_pinned_bo(error->pinned_bo,
  1611. error->pinned_bo_count,
  1612. &dev_priv->mm.bound_list);
  1613. do_gettimeofday(&error->time);
  1614. error->overlay = intel_overlay_capture_error_state(dev);
  1615. error->display = intel_display_capture_error_state(dev);
  1616. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1617. if (dev_priv->gpu_error.first_error == NULL) {
  1618. dev_priv->gpu_error.first_error = error;
  1619. error = NULL;
  1620. }
  1621. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1622. if (error)
  1623. i915_error_state_free(&error->ref);
  1624. }
  1625. void i915_destroy_error_state(struct drm_device *dev)
  1626. {
  1627. struct drm_i915_private *dev_priv = dev->dev_private;
  1628. struct drm_i915_error_state *error;
  1629. unsigned long flags;
  1630. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1631. error = dev_priv->gpu_error.first_error;
  1632. dev_priv->gpu_error.first_error = NULL;
  1633. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1634. if (error)
  1635. kref_put(&error->ref, i915_error_state_free);
  1636. }
  1637. #else
  1638. #define i915_capture_error_state(x)
  1639. #endif
  1640. static void i915_report_and_clear_eir(struct drm_device *dev)
  1641. {
  1642. struct drm_i915_private *dev_priv = dev->dev_private;
  1643. uint32_t instdone[I915_NUM_INSTDONE_REG];
  1644. u32 eir = I915_READ(EIR);
  1645. int pipe, i;
  1646. if (!eir)
  1647. return;
  1648. pr_err("render error detected, EIR: 0x%08x\n", eir);
  1649. i915_get_extra_instdone(dev, instdone);
  1650. if (IS_G4X(dev)) {
  1651. if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
  1652. u32 ipeir = I915_READ(IPEIR_I965);
  1653. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1654. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1655. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1656. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1657. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1658. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1659. I915_WRITE(IPEIR_I965, ipeir);
  1660. POSTING_READ(IPEIR_I965);
  1661. }
  1662. if (eir & GM45_ERROR_PAGE_TABLE) {
  1663. u32 pgtbl_err = I915_READ(PGTBL_ER);
  1664. pr_err("page table error\n");
  1665. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  1666. I915_WRITE(PGTBL_ER, pgtbl_err);
  1667. POSTING_READ(PGTBL_ER);
  1668. }
  1669. }
  1670. if (!IS_GEN2(dev)) {
  1671. if (eir & I915_ERROR_PAGE_TABLE) {
  1672. u32 pgtbl_err = I915_READ(PGTBL_ER);
  1673. pr_err("page table error\n");
  1674. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  1675. I915_WRITE(PGTBL_ER, pgtbl_err);
  1676. POSTING_READ(PGTBL_ER);
  1677. }
  1678. }
  1679. if (eir & I915_ERROR_MEMORY_REFRESH) {
  1680. pr_err("memory refresh error:\n");
  1681. for_each_pipe(pipe)
  1682. pr_err("pipe %c stat: 0x%08x\n",
  1683. pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
  1684. /* pipestat has already been acked */
  1685. }
  1686. if (eir & I915_ERROR_INSTRUCTION) {
  1687. pr_err("instruction error\n");
  1688. pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
  1689. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1690. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1691. if (INTEL_INFO(dev)->gen < 4) {
  1692. u32 ipeir = I915_READ(IPEIR);
  1693. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
  1694. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
  1695. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
  1696. I915_WRITE(IPEIR, ipeir);
  1697. POSTING_READ(IPEIR);
  1698. } else {
  1699. u32 ipeir = I915_READ(IPEIR_I965);
  1700. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1701. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1702. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1703. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1704. I915_WRITE(IPEIR_I965, ipeir);
  1705. POSTING_READ(IPEIR_I965);
  1706. }
  1707. }
  1708. I915_WRITE(EIR, eir);
  1709. POSTING_READ(EIR);
  1710. eir = I915_READ(EIR);
  1711. if (eir) {
  1712. /*
  1713. * some errors might have become stuck,
  1714. * mask them.
  1715. */
  1716. DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
  1717. I915_WRITE(EMR, I915_READ(EMR) | eir);
  1718. I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1719. }
  1720. }
  1721. /**
  1722. * i915_handle_error - handle an error interrupt
  1723. * @dev: drm device
  1724. *
  1725. * Do some basic checking of regsiter state at error interrupt time and
  1726. * dump it to the syslog. Also call i915_capture_error_state() to make
  1727. * sure we get a record and make it available in debugfs. Fire a uevent
  1728. * so userspace knows something bad happened (should trigger collection
  1729. * of a ring dump etc.).
  1730. */
  1731. void i915_handle_error(struct drm_device *dev, bool wedged)
  1732. {
  1733. struct drm_i915_private *dev_priv = dev->dev_private;
  1734. struct intel_ring_buffer *ring;
  1735. int i;
  1736. i915_capture_error_state(dev);
  1737. i915_report_and_clear_eir(dev);
  1738. if (wedged) {
  1739. atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
  1740. &dev_priv->gpu_error.reset_counter);
  1741. /*
  1742. * Wakeup waiting processes so that the reset work item
  1743. * doesn't deadlock trying to grab various locks.
  1744. */
  1745. for_each_ring(ring, dev_priv, i)
  1746. wake_up_all(&ring->irq_queue);
  1747. }
  1748. queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
  1749. }
  1750. static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
  1751. {
  1752. drm_i915_private_t *dev_priv = dev->dev_private;
  1753. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  1754. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1755. struct drm_i915_gem_object *obj;
  1756. struct intel_unpin_work *work;
  1757. unsigned long flags;
  1758. bool stall_detected;
  1759. /* Ignore early vblank irqs */
  1760. if (intel_crtc == NULL)
  1761. return;
  1762. spin_lock_irqsave(&dev->event_lock, flags);
  1763. work = intel_crtc->unpin_work;
  1764. if (work == NULL ||
  1765. atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
  1766. !work->enable_stall_check) {
  1767. /* Either the pending flip IRQ arrived, or we're too early. Don't check */
  1768. spin_unlock_irqrestore(&dev->event_lock, flags);
  1769. return;
  1770. }
  1771. /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
  1772. obj = work->pending_flip_obj;
  1773. if (INTEL_INFO(dev)->gen >= 4) {
  1774. int dspsurf = DSPSURF(intel_crtc->plane);
  1775. stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
  1776. obj->gtt_offset;
  1777. } else {
  1778. int dspaddr = DSPADDR(intel_crtc->plane);
  1779. stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
  1780. crtc->y * crtc->fb->pitches[0] +
  1781. crtc->x * crtc->fb->bits_per_pixel/8);
  1782. }
  1783. spin_unlock_irqrestore(&dev->event_lock, flags);
  1784. if (stall_detected) {
  1785. DRM_DEBUG_DRIVER("Pageflip stall detected\n");
  1786. intel_prepare_page_flip(dev, intel_crtc->plane);
  1787. }
  1788. }
  1789. /* Called from drm generic code, passed 'crtc' which
  1790. * we use as a pipe index
  1791. */
  1792. static int i915_enable_vblank(struct drm_device *dev, int pipe)
  1793. {
  1794. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1795. unsigned long irqflags;
  1796. if (!i915_pipe_enabled(dev, pipe))
  1797. return -EINVAL;
  1798. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1799. if (INTEL_INFO(dev)->gen >= 4)
  1800. i915_enable_pipestat(dev_priv, pipe,
  1801. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1802. else
  1803. i915_enable_pipestat(dev_priv, pipe,
  1804. PIPE_VBLANK_INTERRUPT_ENABLE);
  1805. /* maintain vblank delivery even in deep C-states */
  1806. if (dev_priv->info->gen == 3)
  1807. I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
  1808. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1809. return 0;
  1810. }
  1811. static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
  1812. {
  1813. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1814. unsigned long irqflags;
  1815. if (!i915_pipe_enabled(dev, pipe))
  1816. return -EINVAL;
  1817. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1818. ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
  1819. DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
  1820. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1821. return 0;
  1822. }
  1823. static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
  1824. {
  1825. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1826. unsigned long irqflags;
  1827. if (!i915_pipe_enabled(dev, pipe))
  1828. return -EINVAL;
  1829. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1830. ironlake_enable_display_irq(dev_priv,
  1831. DE_PIPEA_VBLANK_IVB << (5 * pipe));
  1832. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1833. return 0;
  1834. }
  1835. static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
  1836. {
  1837. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1838. unsigned long irqflags;
  1839. u32 imr;
  1840. if (!i915_pipe_enabled(dev, pipe))
  1841. return -EINVAL;
  1842. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1843. imr = I915_READ(VLV_IMR);
  1844. if (pipe == 0)
  1845. imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1846. else
  1847. imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1848. I915_WRITE(VLV_IMR, imr);
  1849. i915_enable_pipestat(dev_priv, pipe,
  1850. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1851. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1852. return 0;
  1853. }
  1854. /* Called from drm generic code, passed 'crtc' which
  1855. * we use as a pipe index
  1856. */
  1857. static void i915_disable_vblank(struct drm_device *dev, int pipe)
  1858. {
  1859. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1860. unsigned long irqflags;
  1861. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1862. if (dev_priv->info->gen == 3)
  1863. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
  1864. i915_disable_pipestat(dev_priv, pipe,
  1865. PIPE_VBLANK_INTERRUPT_ENABLE |
  1866. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1867. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1868. }
  1869. static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
  1870. {
  1871. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1872. unsigned long irqflags;
  1873. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1874. ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
  1875. DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
  1876. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1877. }
  1878. static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
  1879. {
  1880. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1881. unsigned long irqflags;
  1882. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1883. ironlake_disable_display_irq(dev_priv,
  1884. DE_PIPEA_VBLANK_IVB << (pipe * 5));
  1885. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1886. }
  1887. static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
  1888. {
  1889. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1890. unsigned long irqflags;
  1891. u32 imr;
  1892. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1893. i915_disable_pipestat(dev_priv, pipe,
  1894. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1895. imr = I915_READ(VLV_IMR);
  1896. if (pipe == 0)
  1897. imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1898. else
  1899. imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1900. I915_WRITE(VLV_IMR, imr);
  1901. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1902. }
  1903. static u32
  1904. ring_last_seqno(struct intel_ring_buffer *ring)
  1905. {
  1906. return list_entry(ring->request_list.prev,
  1907. struct drm_i915_gem_request, list)->seqno;
  1908. }
  1909. static bool
  1910. ring_idle(struct intel_ring_buffer *ring, u32 seqno)
  1911. {
  1912. return (list_empty(&ring->request_list) ||
  1913. i915_seqno_passed(seqno, ring_last_seqno(ring)));
  1914. }
  1915. static struct intel_ring_buffer *
  1916. semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
  1917. {
  1918. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1919. u32 cmd, ipehr, acthd, acthd_min;
  1920. ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
  1921. if ((ipehr & ~(0x3 << 16)) !=
  1922. (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
  1923. return NULL;
  1924. /* ACTHD is likely pointing to the dword after the actual command,
  1925. * so scan backwards until we find the MBOX.
  1926. */
  1927. acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
  1928. acthd_min = max((int)acthd - 3 * 4, 0);
  1929. do {
  1930. cmd = ioread32(ring->virtual_start + acthd);
  1931. if (cmd == ipehr)
  1932. break;
  1933. acthd -= 4;
  1934. if (acthd < acthd_min)
  1935. return NULL;
  1936. } while (1);
  1937. *seqno = ioread32(ring->virtual_start+acthd+4)+1;
  1938. return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
  1939. }
  1940. static int semaphore_passed(struct intel_ring_buffer *ring)
  1941. {
  1942. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1943. struct intel_ring_buffer *signaller;
  1944. u32 seqno, ctl;
  1945. ring->hangcheck.deadlock = true;
  1946. signaller = semaphore_waits_for(ring, &seqno);
  1947. if (signaller == NULL || signaller->hangcheck.deadlock)
  1948. return -1;
  1949. /* cursory check for an unkickable deadlock */
  1950. ctl = I915_READ_CTL(signaller);
  1951. if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
  1952. return -1;
  1953. return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
  1954. }
  1955. static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
  1956. {
  1957. struct intel_ring_buffer *ring;
  1958. int i;
  1959. for_each_ring(ring, dev_priv, i)
  1960. ring->hangcheck.deadlock = false;
  1961. }
  1962. static enum intel_ring_hangcheck_action
  1963. ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
  1964. {
  1965. struct drm_device *dev = ring->dev;
  1966. struct drm_i915_private *dev_priv = dev->dev_private;
  1967. u32 tmp;
  1968. if (ring->hangcheck.acthd != acthd)
  1969. return active;
  1970. if (IS_GEN2(dev))
  1971. return hung;
  1972. /* Is the chip hanging on a WAIT_FOR_EVENT?
  1973. * If so we can simply poke the RB_WAIT bit
  1974. * and break the hang. This should work on
  1975. * all but the second generation chipsets.
  1976. */
  1977. tmp = I915_READ_CTL(ring);
  1978. if (tmp & RING_WAIT) {
  1979. DRM_ERROR("Kicking stuck wait on %s\n",
  1980. ring->name);
  1981. I915_WRITE_CTL(ring, tmp);
  1982. return kick;
  1983. }
  1984. if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
  1985. switch (semaphore_passed(ring)) {
  1986. default:
  1987. return hung;
  1988. case 1:
  1989. DRM_ERROR("Kicking stuck semaphore on %s\n",
  1990. ring->name);
  1991. I915_WRITE_CTL(ring, tmp);
  1992. return kick;
  1993. case 0:
  1994. return wait;
  1995. }
  1996. }
  1997. return hung;
  1998. }
  1999. /**
  2000. * This is called when the chip hasn't reported back with completed
  2001. * batchbuffers in a long time. We keep track per ring seqno progress and
  2002. * if there are no progress, hangcheck score for that ring is increased.
  2003. * Further, acthd is inspected to see if the ring is stuck. On stuck case
  2004. * we kick the ring. If we see no progress on three subsequent calls
  2005. * we assume chip is wedged and try to fix it by resetting the chip.
  2006. */
  2007. void i915_hangcheck_elapsed(unsigned long data)
  2008. {
  2009. struct drm_device *dev = (struct drm_device *)data;
  2010. drm_i915_private_t *dev_priv = dev->dev_private;
  2011. struct intel_ring_buffer *ring;
  2012. int i;
  2013. int busy_count = 0, rings_hung = 0;
  2014. bool stuck[I915_NUM_RINGS] = { 0 };
  2015. #define BUSY 1
  2016. #define KICK 5
  2017. #define HUNG 20
  2018. #define FIRE 30
  2019. if (!i915_enable_hangcheck)
  2020. return;
  2021. for_each_ring(ring, dev_priv, i) {
  2022. u32 seqno, acthd;
  2023. bool busy = true;
  2024. semaphore_clear_deadlocks(dev_priv);
  2025. seqno = ring->get_seqno(ring, false);
  2026. acthd = intel_ring_get_active_head(ring);
  2027. if (ring->hangcheck.seqno == seqno) {
  2028. if (ring_idle(ring, seqno)) {
  2029. if (waitqueue_active(&ring->irq_queue)) {
  2030. /* Issue a wake-up to catch stuck h/w. */
  2031. DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
  2032. ring->name);
  2033. wake_up_all(&ring->irq_queue);
  2034. ring->hangcheck.score += HUNG;
  2035. } else
  2036. busy = false;
  2037. } else {
  2038. int score;
  2039. /* We always increment the hangcheck score
  2040. * if the ring is busy and still processing
  2041. * the same request, so that no single request
  2042. * can run indefinitely (such as a chain of
  2043. * batches). The only time we do not increment
  2044. * the hangcheck score on this ring, if this
  2045. * ring is in a legitimate wait for another
  2046. * ring. In that case the waiting ring is a
  2047. * victim and we want to be sure we catch the
  2048. * right culprit. Then every time we do kick
  2049. * the ring, add a small increment to the
  2050. * score so that we can catch a batch that is
  2051. * being repeatedly kicked and so responsible
  2052. * for stalling the machine.
  2053. */
  2054. ring->hangcheck.action = ring_stuck(ring,
  2055. acthd);
  2056. switch (ring->hangcheck.action) {
  2057. case wait:
  2058. score = 0;
  2059. break;
  2060. case active:
  2061. score = BUSY;
  2062. break;
  2063. case kick:
  2064. score = KICK;
  2065. break;
  2066. case hung:
  2067. score = HUNG;
  2068. stuck[i] = true;
  2069. break;
  2070. }
  2071. ring->hangcheck.score += score;
  2072. }
  2073. } else {
  2074. /* Gradually reduce the count so that we catch DoS
  2075. * attempts across multiple batches.
  2076. */
  2077. if (ring->hangcheck.score > 0)
  2078. ring->hangcheck.score--;
  2079. }
  2080. ring->hangcheck.seqno = seqno;
  2081. ring->hangcheck.acthd = acthd;
  2082. busy_count += busy;
  2083. }
  2084. for_each_ring(ring, dev_priv, i) {
  2085. if (ring->hangcheck.score > FIRE) {
  2086. DRM_ERROR("%s on %s\n",
  2087. stuck[i] ? "stuck" : "no progress",
  2088. ring->name);
  2089. rings_hung++;
  2090. }
  2091. }
  2092. if (rings_hung)
  2093. return i915_handle_error(dev, true);
  2094. if (busy_count)
  2095. /* Reset timer case chip hangs without another request
  2096. * being added */
  2097. mod_timer(&dev_priv->gpu_error.hangcheck_timer,
  2098. round_jiffies_up(jiffies +
  2099. DRM_I915_HANGCHECK_JIFFIES));
  2100. }
  2101. static void ibx_irq_preinstall(struct drm_device *dev)
  2102. {
  2103. struct drm_i915_private *dev_priv = dev->dev_private;
  2104. if (HAS_PCH_NOP(dev))
  2105. return;
  2106. /* south display irq */
  2107. I915_WRITE(SDEIMR, 0xffffffff);
  2108. /*
  2109. * SDEIER is also touched by the interrupt handler to work around missed
  2110. * PCH interrupts. Hence we can't update it after the interrupt handler
  2111. * is enabled - instead we unconditionally enable all PCH interrupt
  2112. * sources here, but then only unmask them as needed with SDEIMR.
  2113. */
  2114. I915_WRITE(SDEIER, 0xffffffff);
  2115. POSTING_READ(SDEIER);
  2116. }
  2117. /* drm_dma.h hooks
  2118. */
  2119. static void ironlake_irq_preinstall(struct drm_device *dev)
  2120. {
  2121. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2122. atomic_set(&dev_priv->irq_received, 0);
  2123. I915_WRITE(HWSTAM, 0xeffe);
  2124. /* XXX hotplug from PCH */
  2125. I915_WRITE(DEIMR, 0xffffffff);
  2126. I915_WRITE(DEIER, 0x0);
  2127. POSTING_READ(DEIER);
  2128. /* and GT */
  2129. I915_WRITE(GTIMR, 0xffffffff);
  2130. I915_WRITE(GTIER, 0x0);
  2131. POSTING_READ(GTIER);
  2132. ibx_irq_preinstall(dev);
  2133. }
  2134. static void ivybridge_irq_preinstall(struct drm_device *dev)
  2135. {
  2136. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2137. atomic_set(&dev_priv->irq_received, 0);
  2138. I915_WRITE(HWSTAM, 0xeffe);
  2139. /* XXX hotplug from PCH */
  2140. I915_WRITE(DEIMR, 0xffffffff);
  2141. I915_WRITE(DEIER, 0x0);
  2142. POSTING_READ(DEIER);
  2143. /* and GT */
  2144. I915_WRITE(GTIMR, 0xffffffff);
  2145. I915_WRITE(GTIER, 0x0);
  2146. POSTING_READ(GTIER);
  2147. /* Power management */
  2148. I915_WRITE(GEN6_PMIMR, 0xffffffff);
  2149. I915_WRITE(GEN6_PMIER, 0x0);
  2150. POSTING_READ(GEN6_PMIER);
  2151. ibx_irq_preinstall(dev);
  2152. }
  2153. static void valleyview_irq_preinstall(struct drm_device *dev)
  2154. {
  2155. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2156. int pipe;
  2157. atomic_set(&dev_priv->irq_received, 0);
  2158. /* VLV magic */
  2159. I915_WRITE(VLV_IMR, 0);
  2160. I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
  2161. I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
  2162. I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
  2163. /* and GT */
  2164. I915_WRITE(GTIIR, I915_READ(GTIIR));
  2165. I915_WRITE(GTIIR, I915_READ(GTIIR));
  2166. I915_WRITE(GTIMR, 0xffffffff);
  2167. I915_WRITE(GTIER, 0x0);
  2168. POSTING_READ(GTIER);
  2169. I915_WRITE(DPINVGTT, 0xff);
  2170. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2171. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2172. for_each_pipe(pipe)
  2173. I915_WRITE(PIPESTAT(pipe), 0xffff);
  2174. I915_WRITE(VLV_IIR, 0xffffffff);
  2175. I915_WRITE(VLV_IMR, 0xffffffff);
  2176. I915_WRITE(VLV_IER, 0x0);
  2177. POSTING_READ(VLV_IER);
  2178. }
  2179. static void ibx_hpd_irq_setup(struct drm_device *dev)
  2180. {
  2181. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2182. struct drm_mode_config *mode_config = &dev->mode_config;
  2183. struct intel_encoder *intel_encoder;
  2184. u32 mask = ~I915_READ(SDEIMR);
  2185. u32 hotplug;
  2186. if (HAS_PCH_IBX(dev)) {
  2187. mask &= ~SDE_HOTPLUG_MASK;
  2188. list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  2189. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  2190. mask |= hpd_ibx[intel_encoder->hpd_pin];
  2191. } else {
  2192. mask &= ~SDE_HOTPLUG_MASK_CPT;
  2193. list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  2194. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  2195. mask |= hpd_cpt[intel_encoder->hpd_pin];
  2196. }
  2197. I915_WRITE(SDEIMR, ~mask);
  2198. /*
  2199. * Enable digital hotplug on the PCH, and configure the DP short pulse
  2200. * duration to 2ms (which is the minimum in the Display Port spec)
  2201. *
  2202. * This register is the same on all known PCH chips.
  2203. */
  2204. hotplug = I915_READ(PCH_PORT_HOTPLUG);
  2205. hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
  2206. hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
  2207. hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
  2208. hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
  2209. I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  2210. }
  2211. static void ibx_irq_postinstall(struct drm_device *dev)
  2212. {
  2213. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2214. u32 mask;
  2215. if (HAS_PCH_NOP(dev))
  2216. return;
  2217. if (HAS_PCH_IBX(dev)) {
  2218. mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
  2219. SDE_TRANSA_FIFO_UNDER | SDE_POISON;
  2220. } else {
  2221. mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
  2222. I915_WRITE(SERR_INT, I915_READ(SERR_INT));
  2223. }
  2224. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  2225. I915_WRITE(SDEIMR, ~mask);
  2226. }
  2227. static int ironlake_irq_postinstall(struct drm_device *dev)
  2228. {
  2229. unsigned long irqflags;
  2230. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2231. /* enable kind of interrupts always enabled */
  2232. u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  2233. DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
  2234. DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
  2235. DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
  2236. u32 gt_irqs;
  2237. dev_priv->irq_mask = ~display_mask;
  2238. /* should always can generate irq */
  2239. I915_WRITE(DEIIR, I915_READ(DEIIR));
  2240. I915_WRITE(DEIMR, dev_priv->irq_mask);
  2241. I915_WRITE(DEIER, display_mask |
  2242. DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
  2243. POSTING_READ(DEIER);
  2244. dev_priv->gt_irq_mask = ~0;
  2245. I915_WRITE(GTIIR, I915_READ(GTIIR));
  2246. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  2247. gt_irqs = GT_RENDER_USER_INTERRUPT;
  2248. if (IS_GEN6(dev))
  2249. gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
  2250. else
  2251. gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
  2252. ILK_BSD_USER_INTERRUPT;
  2253. I915_WRITE(GTIER, gt_irqs);
  2254. POSTING_READ(GTIER);
  2255. ibx_irq_postinstall(dev);
  2256. if (IS_IRONLAKE_M(dev)) {
  2257. /* Enable PCU event interrupts
  2258. *
  2259. * spinlocking not required here for correctness since interrupt
  2260. * setup is guaranteed to run in single-threaded context. But we
  2261. * need it to make the assert_spin_locked happy. */
  2262. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2263. ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
  2264. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2265. }
  2266. return 0;
  2267. }
  2268. static int ivybridge_irq_postinstall(struct drm_device *dev)
  2269. {
  2270. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2271. /* enable kind of interrupts always enabled */
  2272. u32 display_mask =
  2273. DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
  2274. DE_PLANEC_FLIP_DONE_IVB |
  2275. DE_PLANEB_FLIP_DONE_IVB |
  2276. DE_PLANEA_FLIP_DONE_IVB |
  2277. DE_AUX_CHANNEL_A_IVB |
  2278. DE_ERR_INT_IVB;
  2279. u32 pm_irqs = GEN6_PM_RPS_EVENTS;
  2280. u32 gt_irqs;
  2281. dev_priv->irq_mask = ~display_mask;
  2282. /* should always can generate irq */
  2283. I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
  2284. I915_WRITE(DEIIR, I915_READ(DEIIR));
  2285. I915_WRITE(DEIMR, dev_priv->irq_mask);
  2286. I915_WRITE(DEIER,
  2287. display_mask |
  2288. DE_PIPEC_VBLANK_IVB |
  2289. DE_PIPEB_VBLANK_IVB |
  2290. DE_PIPEA_VBLANK_IVB);
  2291. POSTING_READ(DEIER);
  2292. dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  2293. I915_WRITE(GTIIR, I915_READ(GTIIR));
  2294. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  2295. gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
  2296. GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  2297. I915_WRITE(GTIER, gt_irqs);
  2298. POSTING_READ(GTIER);
  2299. I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
  2300. if (HAS_VEBOX(dev))
  2301. pm_irqs |= PM_VEBOX_USER_INTERRUPT |
  2302. PM_VEBOX_CS_ERROR_INTERRUPT;
  2303. /* Our enable/disable rps functions may touch these registers so
  2304. * make sure to set a known state for only the non-RPS bits.
  2305. * The RMW is extra paranoia since this should be called after being set
  2306. * to a known state in preinstall.
  2307. * */
  2308. I915_WRITE(GEN6_PMIMR,
  2309. (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
  2310. I915_WRITE(GEN6_PMIER,
  2311. (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
  2312. POSTING_READ(GEN6_PMIER);
  2313. ibx_irq_postinstall(dev);
  2314. return 0;
  2315. }
  2316. static int valleyview_irq_postinstall(struct drm_device *dev)
  2317. {
  2318. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2319. u32 gt_irqs;
  2320. u32 enable_mask;
  2321. u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
  2322. enable_mask = I915_DISPLAY_PORT_INTERRUPT;
  2323. enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2324. I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  2325. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2326. I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  2327. /*
  2328. *Leave vblank interrupts masked initially. enable/disable will
  2329. * toggle them based on usage.
  2330. */
  2331. dev_priv->irq_mask = (~enable_mask) |
  2332. I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  2333. I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  2334. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2335. POSTING_READ(PORT_HOTPLUG_EN);
  2336. I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  2337. I915_WRITE(VLV_IER, enable_mask);
  2338. I915_WRITE(VLV_IIR, 0xffffffff);
  2339. I915_WRITE(PIPESTAT(0), 0xffff);
  2340. I915_WRITE(PIPESTAT(1), 0xffff);
  2341. POSTING_READ(VLV_IER);
  2342. i915_enable_pipestat(dev_priv, 0, pipestat_enable);
  2343. i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  2344. i915_enable_pipestat(dev_priv, 1, pipestat_enable);
  2345. I915_WRITE(VLV_IIR, 0xffffffff);
  2346. I915_WRITE(VLV_IIR, 0xffffffff);
  2347. I915_WRITE(GTIIR, I915_READ(GTIIR));
  2348. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  2349. gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
  2350. GT_BLT_USER_INTERRUPT;
  2351. I915_WRITE(GTIER, gt_irqs);
  2352. POSTING_READ(GTIER);
  2353. /* ack & enable invalid PTE error interrupts */
  2354. #if 0 /* FIXME: add support to irq handler for checking these bits */
  2355. I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  2356. I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
  2357. #endif
  2358. I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  2359. return 0;
  2360. }
  2361. static void valleyview_irq_uninstall(struct drm_device *dev)
  2362. {
  2363. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2364. int pipe;
  2365. if (!dev_priv)
  2366. return;
  2367. del_timer_sync(&dev_priv->hotplug_reenable_timer);
  2368. for_each_pipe(pipe)
  2369. I915_WRITE(PIPESTAT(pipe), 0xffff);
  2370. I915_WRITE(HWSTAM, 0xffffffff);
  2371. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2372. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2373. for_each_pipe(pipe)
  2374. I915_WRITE(PIPESTAT(pipe), 0xffff);
  2375. I915_WRITE(VLV_IIR, 0xffffffff);
  2376. I915_WRITE(VLV_IMR, 0xffffffff);
  2377. I915_WRITE(VLV_IER, 0x0);
  2378. POSTING_READ(VLV_IER);
  2379. }
  2380. static void ironlake_irq_uninstall(struct drm_device *dev)
  2381. {
  2382. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2383. if (!dev_priv)
  2384. return;
  2385. del_timer_sync(&dev_priv->hotplug_reenable_timer);
  2386. I915_WRITE(HWSTAM, 0xffffffff);
  2387. I915_WRITE(DEIMR, 0xffffffff);
  2388. I915_WRITE(DEIER, 0x0);
  2389. I915_WRITE(DEIIR, I915_READ(DEIIR));
  2390. if (IS_GEN7(dev))
  2391. I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
  2392. I915_WRITE(GTIMR, 0xffffffff);
  2393. I915_WRITE(GTIER, 0x0);
  2394. I915_WRITE(GTIIR, I915_READ(GTIIR));
  2395. if (HAS_PCH_NOP(dev))
  2396. return;
  2397. I915_WRITE(SDEIMR, 0xffffffff);
  2398. I915_WRITE(SDEIER, 0x0);
  2399. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  2400. if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
  2401. I915_WRITE(SERR_INT, I915_READ(SERR_INT));
  2402. }
  2403. static void i8xx_irq_preinstall(struct drm_device * dev)
  2404. {
  2405. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2406. int pipe;
  2407. atomic_set(&dev_priv->irq_received, 0);
  2408. for_each_pipe(pipe)
  2409. I915_WRITE(PIPESTAT(pipe), 0);
  2410. I915_WRITE16(IMR, 0xffff);
  2411. I915_WRITE16(IER, 0x0);
  2412. POSTING_READ16(IER);
  2413. }
  2414. static int i8xx_irq_postinstall(struct drm_device *dev)
  2415. {
  2416. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2417. I915_WRITE16(EMR,
  2418. ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  2419. /* Unmask the interrupts that we always want on. */
  2420. dev_priv->irq_mask =
  2421. ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2422. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2423. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2424. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2425. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2426. I915_WRITE16(IMR, dev_priv->irq_mask);
  2427. I915_WRITE16(IER,
  2428. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2429. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2430. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  2431. I915_USER_INTERRUPT);
  2432. POSTING_READ16(IER);
  2433. return 0;
  2434. }
  2435. /*
  2436. * Returns true when a page flip has completed.
  2437. */
  2438. static bool i8xx_handle_vblank(struct drm_device *dev,
  2439. int pipe, u16 iir)
  2440. {
  2441. drm_i915_private_t *dev_priv = dev->dev_private;
  2442. u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
  2443. if (!drm_handle_vblank(dev, pipe))
  2444. return false;
  2445. if ((iir & flip_pending) == 0)
  2446. return false;
  2447. intel_prepare_page_flip(dev, pipe);
  2448. /* We detect FlipDone by looking for the change in PendingFlip from '1'
  2449. * to '0' on the following vblank, i.e. IIR has the Pendingflip
  2450. * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  2451. * the flip is completed (no longer pending). Since this doesn't raise
  2452. * an interrupt per se, we watch for the change at vblank.
  2453. */
  2454. if (I915_READ16(ISR) & flip_pending)
  2455. return false;
  2456. intel_finish_page_flip(dev, pipe);
  2457. return true;
  2458. }
  2459. static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  2460. {
  2461. struct drm_device *dev = (struct drm_device *) arg;
  2462. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2463. u16 iir, new_iir;
  2464. u32 pipe_stats[2];
  2465. unsigned long irqflags;
  2466. int irq_received;
  2467. int pipe;
  2468. u16 flip_mask =
  2469. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2470. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2471. atomic_inc(&dev_priv->irq_received);
  2472. iir = I915_READ16(IIR);
  2473. if (iir == 0)
  2474. return IRQ_NONE;
  2475. while (iir & ~flip_mask) {
  2476. /* Can't rely on pipestat interrupt bit in iir as it might
  2477. * have been cleared after the pipestat interrupt was received.
  2478. * It doesn't set the bit in iir again, but it still produces
  2479. * interrupts (for non-MSI).
  2480. */
  2481. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2482. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2483. i915_handle_error(dev, false);
  2484. for_each_pipe(pipe) {
  2485. int reg = PIPESTAT(pipe);
  2486. pipe_stats[pipe] = I915_READ(reg);
  2487. /*
  2488. * Clear the PIPE*STAT regs before the IIR
  2489. */
  2490. if (pipe_stats[pipe] & 0x8000ffff) {
  2491. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2492. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2493. pipe_name(pipe));
  2494. I915_WRITE(reg, pipe_stats[pipe]);
  2495. irq_received = 1;
  2496. }
  2497. }
  2498. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2499. I915_WRITE16(IIR, iir & ~flip_mask);
  2500. new_iir = I915_READ16(IIR); /* Flush posted writes */
  2501. i915_update_dri1_breadcrumb(dev);
  2502. if (iir & I915_USER_INTERRUPT)
  2503. notify_ring(dev, &dev_priv->ring[RCS]);
  2504. if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2505. i8xx_handle_vblank(dev, 0, iir))
  2506. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
  2507. if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2508. i8xx_handle_vblank(dev, 1, iir))
  2509. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
  2510. iir = new_iir;
  2511. }
  2512. return IRQ_HANDLED;
  2513. }
  2514. static void i8xx_irq_uninstall(struct drm_device * dev)
  2515. {
  2516. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2517. int pipe;
  2518. for_each_pipe(pipe) {
  2519. /* Clear enable bits; then clear status bits */
  2520. I915_WRITE(PIPESTAT(pipe), 0);
  2521. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  2522. }
  2523. I915_WRITE16(IMR, 0xffff);
  2524. I915_WRITE16(IER, 0x0);
  2525. I915_WRITE16(IIR, I915_READ16(IIR));
  2526. }
  2527. static void i915_irq_preinstall(struct drm_device * dev)
  2528. {
  2529. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2530. int pipe;
  2531. atomic_set(&dev_priv->irq_received, 0);
  2532. if (I915_HAS_HOTPLUG(dev)) {
  2533. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2534. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2535. }
  2536. I915_WRITE16(HWSTAM, 0xeffe);
  2537. for_each_pipe(pipe)
  2538. I915_WRITE(PIPESTAT(pipe), 0);
  2539. I915_WRITE(IMR, 0xffffffff);
  2540. I915_WRITE(IER, 0x0);
  2541. POSTING_READ(IER);
  2542. }
  2543. static int i915_irq_postinstall(struct drm_device *dev)
  2544. {
  2545. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2546. u32 enable_mask;
  2547. I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  2548. /* Unmask the interrupts that we always want on. */
  2549. dev_priv->irq_mask =
  2550. ~(I915_ASLE_INTERRUPT |
  2551. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2552. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2553. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2554. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2555. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2556. enable_mask =
  2557. I915_ASLE_INTERRUPT |
  2558. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2559. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2560. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  2561. I915_USER_INTERRUPT;
  2562. if (I915_HAS_HOTPLUG(dev)) {
  2563. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2564. POSTING_READ(PORT_HOTPLUG_EN);
  2565. /* Enable in IER... */
  2566. enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  2567. /* and unmask in IMR */
  2568. dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  2569. }
  2570. I915_WRITE(IMR, dev_priv->irq_mask);
  2571. I915_WRITE(IER, enable_mask);
  2572. POSTING_READ(IER);
  2573. i915_enable_asle_pipestat(dev);
  2574. return 0;
  2575. }
  2576. /*
  2577. * Returns true when a page flip has completed.
  2578. */
  2579. static bool i915_handle_vblank(struct drm_device *dev,
  2580. int plane, int pipe, u32 iir)
  2581. {
  2582. drm_i915_private_t *dev_priv = dev->dev_private;
  2583. u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  2584. if (!drm_handle_vblank(dev, pipe))
  2585. return false;
  2586. if ((iir & flip_pending) == 0)
  2587. return false;
  2588. intel_prepare_page_flip(dev, plane);
  2589. /* We detect FlipDone by looking for the change in PendingFlip from '1'
  2590. * to '0' on the following vblank, i.e. IIR has the Pendingflip
  2591. * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  2592. * the flip is completed (no longer pending). Since this doesn't raise
  2593. * an interrupt per se, we watch for the change at vblank.
  2594. */
  2595. if (I915_READ(ISR) & flip_pending)
  2596. return false;
  2597. intel_finish_page_flip(dev, pipe);
  2598. return true;
  2599. }
  2600. static irqreturn_t i915_irq_handler(int irq, void *arg)
  2601. {
  2602. struct drm_device *dev = (struct drm_device *) arg;
  2603. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2604. u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
  2605. unsigned long irqflags;
  2606. u32 flip_mask =
  2607. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2608. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2609. int pipe, ret = IRQ_NONE;
  2610. atomic_inc(&dev_priv->irq_received);
  2611. iir = I915_READ(IIR);
  2612. do {
  2613. bool irq_received = (iir & ~flip_mask) != 0;
  2614. bool blc_event = false;
  2615. /* Can't rely on pipestat interrupt bit in iir as it might
  2616. * have been cleared after the pipestat interrupt was received.
  2617. * It doesn't set the bit in iir again, but it still produces
  2618. * interrupts (for non-MSI).
  2619. */
  2620. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2621. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2622. i915_handle_error(dev, false);
  2623. for_each_pipe(pipe) {
  2624. int reg = PIPESTAT(pipe);
  2625. pipe_stats[pipe] = I915_READ(reg);
  2626. /* Clear the PIPE*STAT regs before the IIR */
  2627. if (pipe_stats[pipe] & 0x8000ffff) {
  2628. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2629. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2630. pipe_name(pipe));
  2631. I915_WRITE(reg, pipe_stats[pipe]);
  2632. irq_received = true;
  2633. }
  2634. }
  2635. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2636. if (!irq_received)
  2637. break;
  2638. /* Consume port. Then clear IIR or we'll miss events */
  2639. if ((I915_HAS_HOTPLUG(dev)) &&
  2640. (iir & I915_DISPLAY_PORT_INTERRUPT)) {
  2641. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2642. u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  2643. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2644. hotplug_status);
  2645. intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
  2646. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2647. POSTING_READ(PORT_HOTPLUG_STAT);
  2648. }
  2649. I915_WRITE(IIR, iir & ~flip_mask);
  2650. new_iir = I915_READ(IIR); /* Flush posted writes */
  2651. if (iir & I915_USER_INTERRUPT)
  2652. notify_ring(dev, &dev_priv->ring[RCS]);
  2653. for_each_pipe(pipe) {
  2654. int plane = pipe;
  2655. if (IS_MOBILE(dev))
  2656. plane = !plane;
  2657. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2658. i915_handle_vblank(dev, plane, pipe, iir))
  2659. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  2660. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  2661. blc_event = true;
  2662. }
  2663. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  2664. intel_opregion_asle_intr(dev);
  2665. /* With MSI, interrupts are only generated when iir
  2666. * transitions from zero to nonzero. If another bit got
  2667. * set while we were handling the existing iir bits, then
  2668. * we would never get another interrupt.
  2669. *
  2670. * This is fine on non-MSI as well, as if we hit this path
  2671. * we avoid exiting the interrupt handler only to generate
  2672. * another one.
  2673. *
  2674. * Note that for MSI this could cause a stray interrupt report
  2675. * if an interrupt landed in the time between writing IIR and
  2676. * the posting read. This should be rare enough to never
  2677. * trigger the 99% of 100,000 interrupts test for disabling
  2678. * stray interrupts.
  2679. */
  2680. ret = IRQ_HANDLED;
  2681. iir = new_iir;
  2682. } while (iir & ~flip_mask);
  2683. i915_update_dri1_breadcrumb(dev);
  2684. return ret;
  2685. }
  2686. static void i915_irq_uninstall(struct drm_device * dev)
  2687. {
  2688. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2689. int pipe;
  2690. del_timer_sync(&dev_priv->hotplug_reenable_timer);
  2691. if (I915_HAS_HOTPLUG(dev)) {
  2692. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2693. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2694. }
  2695. I915_WRITE16(HWSTAM, 0xffff);
  2696. for_each_pipe(pipe) {
  2697. /* Clear enable bits; then clear status bits */
  2698. I915_WRITE(PIPESTAT(pipe), 0);
  2699. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  2700. }
  2701. I915_WRITE(IMR, 0xffffffff);
  2702. I915_WRITE(IER, 0x0);
  2703. I915_WRITE(IIR, I915_READ(IIR));
  2704. }
  2705. static void i965_irq_preinstall(struct drm_device * dev)
  2706. {
  2707. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2708. int pipe;
  2709. atomic_set(&dev_priv->irq_received, 0);
  2710. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2711. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2712. I915_WRITE(HWSTAM, 0xeffe);
  2713. for_each_pipe(pipe)
  2714. I915_WRITE(PIPESTAT(pipe), 0);
  2715. I915_WRITE(IMR, 0xffffffff);
  2716. I915_WRITE(IER, 0x0);
  2717. POSTING_READ(IER);
  2718. }
  2719. static int i965_irq_postinstall(struct drm_device *dev)
  2720. {
  2721. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2722. u32 enable_mask;
  2723. u32 error_mask;
  2724. /* Unmask the interrupts that we always want on. */
  2725. dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
  2726. I915_DISPLAY_PORT_INTERRUPT |
  2727. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2728. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2729. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2730. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2731. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2732. enable_mask = ~dev_priv->irq_mask;
  2733. enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2734. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  2735. enable_mask |= I915_USER_INTERRUPT;
  2736. if (IS_G4X(dev))
  2737. enable_mask |= I915_BSD_USER_INTERRUPT;
  2738. i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  2739. /*
  2740. * Enable some error detection, note the instruction error mask
  2741. * bit is reserved, so we leave it masked.
  2742. */
  2743. if (IS_G4X(dev)) {
  2744. error_mask = ~(GM45_ERROR_PAGE_TABLE |
  2745. GM45_ERROR_MEM_PRIV |
  2746. GM45_ERROR_CP_PRIV |
  2747. I915_ERROR_MEMORY_REFRESH);
  2748. } else {
  2749. error_mask = ~(I915_ERROR_PAGE_TABLE |
  2750. I915_ERROR_MEMORY_REFRESH);
  2751. }
  2752. I915_WRITE(EMR, error_mask);
  2753. I915_WRITE(IMR, dev_priv->irq_mask);
  2754. I915_WRITE(IER, enable_mask);
  2755. POSTING_READ(IER);
  2756. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2757. POSTING_READ(PORT_HOTPLUG_EN);
  2758. i915_enable_asle_pipestat(dev);
  2759. return 0;
  2760. }
  2761. static void i915_hpd_irq_setup(struct drm_device *dev)
  2762. {
  2763. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2764. struct drm_mode_config *mode_config = &dev->mode_config;
  2765. struct intel_encoder *intel_encoder;
  2766. u32 hotplug_en;
  2767. assert_spin_locked(&dev_priv->irq_lock);
  2768. if (I915_HAS_HOTPLUG(dev)) {
  2769. hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  2770. hotplug_en &= ~HOTPLUG_INT_EN_MASK;
  2771. /* Note HDMI and DP share hotplug bits */
  2772. /* enable bits are the same for all generations */
  2773. list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  2774. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  2775. hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
  2776. /* Programming the CRT detection parameters tends
  2777. to generate a spurious hotplug event about three
  2778. seconds later. So just do it once.
  2779. */
  2780. if (IS_G4X(dev))
  2781. hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  2782. hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
  2783. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  2784. /* Ignore TV since it's buggy */
  2785. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  2786. }
  2787. }
  2788. static irqreturn_t i965_irq_handler(int irq, void *arg)
  2789. {
  2790. struct drm_device *dev = (struct drm_device *) arg;
  2791. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2792. u32 iir, new_iir;
  2793. u32 pipe_stats[I915_MAX_PIPES];
  2794. unsigned long irqflags;
  2795. int irq_received;
  2796. int ret = IRQ_NONE, pipe;
  2797. u32 flip_mask =
  2798. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2799. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2800. atomic_inc(&dev_priv->irq_received);
  2801. iir = I915_READ(IIR);
  2802. for (;;) {
  2803. bool blc_event = false;
  2804. irq_received = (iir & ~flip_mask) != 0;
  2805. /* Can't rely on pipestat interrupt bit in iir as it might
  2806. * have been cleared after the pipestat interrupt was received.
  2807. * It doesn't set the bit in iir again, but it still produces
  2808. * interrupts (for non-MSI).
  2809. */
  2810. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2811. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2812. i915_handle_error(dev, false);
  2813. for_each_pipe(pipe) {
  2814. int reg = PIPESTAT(pipe);
  2815. pipe_stats[pipe] = I915_READ(reg);
  2816. /*
  2817. * Clear the PIPE*STAT regs before the IIR
  2818. */
  2819. if (pipe_stats[pipe] & 0x8000ffff) {
  2820. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2821. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2822. pipe_name(pipe));
  2823. I915_WRITE(reg, pipe_stats[pipe]);
  2824. irq_received = 1;
  2825. }
  2826. }
  2827. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2828. if (!irq_received)
  2829. break;
  2830. ret = IRQ_HANDLED;
  2831. /* Consume port. Then clear IIR or we'll miss events */
  2832. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  2833. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2834. u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
  2835. HOTPLUG_INT_STATUS_G4X :
  2836. HOTPLUG_INT_STATUS_I915);
  2837. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2838. hotplug_status);
  2839. intel_hpd_irq_handler(dev, hotplug_trigger,
  2840. IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
  2841. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2842. I915_READ(PORT_HOTPLUG_STAT);
  2843. }
  2844. I915_WRITE(IIR, iir & ~flip_mask);
  2845. new_iir = I915_READ(IIR); /* Flush posted writes */
  2846. if (iir & I915_USER_INTERRUPT)
  2847. notify_ring(dev, &dev_priv->ring[RCS]);
  2848. if (iir & I915_BSD_USER_INTERRUPT)
  2849. notify_ring(dev, &dev_priv->ring[VCS]);
  2850. for_each_pipe(pipe) {
  2851. if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  2852. i915_handle_vblank(dev, pipe, pipe, iir))
  2853. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
  2854. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  2855. blc_event = true;
  2856. }
  2857. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  2858. intel_opregion_asle_intr(dev);
  2859. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  2860. gmbus_irq_handler(dev);
  2861. /* With MSI, interrupts are only generated when iir
  2862. * transitions from zero to nonzero. If another bit got
  2863. * set while we were handling the existing iir bits, then
  2864. * we would never get another interrupt.
  2865. *
  2866. * This is fine on non-MSI as well, as if we hit this path
  2867. * we avoid exiting the interrupt handler only to generate
  2868. * another one.
  2869. *
  2870. * Note that for MSI this could cause a stray interrupt report
  2871. * if an interrupt landed in the time between writing IIR and
  2872. * the posting read. This should be rare enough to never
  2873. * trigger the 99% of 100,000 interrupts test for disabling
  2874. * stray interrupts.
  2875. */
  2876. iir = new_iir;
  2877. }
  2878. i915_update_dri1_breadcrumb(dev);
  2879. return ret;
  2880. }
  2881. static void i965_irq_uninstall(struct drm_device * dev)
  2882. {
  2883. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2884. int pipe;
  2885. if (!dev_priv)
  2886. return;
  2887. del_timer_sync(&dev_priv->hotplug_reenable_timer);
  2888. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2889. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2890. I915_WRITE(HWSTAM, 0xffffffff);
  2891. for_each_pipe(pipe)
  2892. I915_WRITE(PIPESTAT(pipe), 0);
  2893. I915_WRITE(IMR, 0xffffffff);
  2894. I915_WRITE(IER, 0x0);
  2895. for_each_pipe(pipe)
  2896. I915_WRITE(PIPESTAT(pipe),
  2897. I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
  2898. I915_WRITE(IIR, I915_READ(IIR));
  2899. }
  2900. static void i915_reenable_hotplug_timer_func(unsigned long data)
  2901. {
  2902. drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
  2903. struct drm_device *dev = dev_priv->dev;
  2904. struct drm_mode_config *mode_config = &dev->mode_config;
  2905. unsigned long irqflags;
  2906. int i;
  2907. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2908. for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
  2909. struct drm_connector *connector;
  2910. if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
  2911. continue;
  2912. dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  2913. list_for_each_entry(connector, &mode_config->connector_list, head) {
  2914. struct intel_connector *intel_connector = to_intel_connector(connector);
  2915. if (intel_connector->encoder->hpd_pin == i) {
  2916. if (connector->polled != intel_connector->polled)
  2917. DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
  2918. drm_get_connector_name(connector));
  2919. connector->polled = intel_connector->polled;
  2920. if (!connector->polled)
  2921. connector->polled = DRM_CONNECTOR_POLL_HPD;
  2922. }
  2923. }
  2924. }
  2925. if (dev_priv->display.hpd_irq_setup)
  2926. dev_priv->display.hpd_irq_setup(dev);
  2927. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2928. }
  2929. void intel_irq_init(struct drm_device *dev)
  2930. {
  2931. struct drm_i915_private *dev_priv = dev->dev_private;
  2932. INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
  2933. INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
  2934. INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
  2935. INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
  2936. setup_timer(&dev_priv->gpu_error.hangcheck_timer,
  2937. i915_hangcheck_elapsed,
  2938. (unsigned long) dev);
  2939. setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
  2940. (unsigned long) dev_priv);
  2941. pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
  2942. dev->driver->get_vblank_counter = i915_get_vblank_counter;
  2943. dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  2944. if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  2945. dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
  2946. dev->driver->get_vblank_counter = gm45_get_vblank_counter;
  2947. }
  2948. if (drm_core_check_feature(dev, DRIVER_MODESET))
  2949. dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
  2950. else
  2951. dev->driver->get_vblank_timestamp = NULL;
  2952. dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  2953. if (IS_VALLEYVIEW(dev)) {
  2954. dev->driver->irq_handler = valleyview_irq_handler;
  2955. dev->driver->irq_preinstall = valleyview_irq_preinstall;
  2956. dev->driver->irq_postinstall = valleyview_irq_postinstall;
  2957. dev->driver->irq_uninstall = valleyview_irq_uninstall;
  2958. dev->driver->enable_vblank = valleyview_enable_vblank;
  2959. dev->driver->disable_vblank = valleyview_disable_vblank;
  2960. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  2961. } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
  2962. /* Share uninstall handlers with ILK/SNB */
  2963. dev->driver->irq_handler = ivybridge_irq_handler;
  2964. dev->driver->irq_preinstall = ivybridge_irq_preinstall;
  2965. dev->driver->irq_postinstall = ivybridge_irq_postinstall;
  2966. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  2967. dev->driver->enable_vblank = ivybridge_enable_vblank;
  2968. dev->driver->disable_vblank = ivybridge_disable_vblank;
  2969. dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
  2970. } else if (HAS_PCH_SPLIT(dev)) {
  2971. dev->driver->irq_handler = ironlake_irq_handler;
  2972. dev->driver->irq_preinstall = ironlake_irq_preinstall;
  2973. dev->driver->irq_postinstall = ironlake_irq_postinstall;
  2974. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  2975. dev->driver->enable_vblank = ironlake_enable_vblank;
  2976. dev->driver->disable_vblank = ironlake_disable_vblank;
  2977. dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
  2978. } else {
  2979. if (INTEL_INFO(dev)->gen == 2) {
  2980. dev->driver->irq_preinstall = i8xx_irq_preinstall;
  2981. dev->driver->irq_postinstall = i8xx_irq_postinstall;
  2982. dev->driver->irq_handler = i8xx_irq_handler;
  2983. dev->driver->irq_uninstall = i8xx_irq_uninstall;
  2984. } else if (INTEL_INFO(dev)->gen == 3) {
  2985. dev->driver->irq_preinstall = i915_irq_preinstall;
  2986. dev->driver->irq_postinstall = i915_irq_postinstall;
  2987. dev->driver->irq_uninstall = i915_irq_uninstall;
  2988. dev->driver->irq_handler = i915_irq_handler;
  2989. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  2990. } else {
  2991. dev->driver->irq_preinstall = i965_irq_preinstall;
  2992. dev->driver->irq_postinstall = i965_irq_postinstall;
  2993. dev->driver->irq_uninstall = i965_irq_uninstall;
  2994. dev->driver->irq_handler = i965_irq_handler;
  2995. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  2996. }
  2997. dev->driver->enable_vblank = i915_enable_vblank;
  2998. dev->driver->disable_vblank = i915_disable_vblank;
  2999. }
  3000. }
  3001. void intel_hpd_init(struct drm_device *dev)
  3002. {
  3003. struct drm_i915_private *dev_priv = dev->dev_private;
  3004. struct drm_mode_config *mode_config = &dev->mode_config;
  3005. struct drm_connector *connector;
  3006. unsigned long irqflags;
  3007. int i;
  3008. for (i = 1; i < HPD_NUM_PINS; i++) {
  3009. dev_priv->hpd_stats[i].hpd_cnt = 0;
  3010. dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  3011. }
  3012. list_for_each_entry(connector, &mode_config->connector_list, head) {
  3013. struct intel_connector *intel_connector = to_intel_connector(connector);
  3014. connector->polled = intel_connector->polled;
  3015. if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
  3016. connector->polled = DRM_CONNECTOR_POLL_HPD;
  3017. }
  3018. /* Interrupt setup is already guaranteed to be single-threaded, this is
  3019. * just to make the assert_spin_locked checks happy. */
  3020. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3021. if (dev_priv->display.hpd_irq_setup)
  3022. dev_priv->display.hpd_irq_setup(dev);
  3023. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3024. }