i915_irq.c 80 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/sysrq.h>
  30. #include <linux/slab.h>
  31. #include <drm/drmP.h>
  32. #include <drm/i915_drm.h>
  33. #include "i915_drv.h"
  34. #include "i915_trace.h"
  35. #include "intel_drv.h"
  36. /* For display hotplug interrupt */
  37. static void
  38. ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  39. {
  40. if ((dev_priv->irq_mask & mask) != 0) {
  41. dev_priv->irq_mask &= ~mask;
  42. I915_WRITE(DEIMR, dev_priv->irq_mask);
  43. POSTING_READ(DEIMR);
  44. }
  45. }
  46. static inline void
  47. ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  48. {
  49. if ((dev_priv->irq_mask & mask) != mask) {
  50. dev_priv->irq_mask |= mask;
  51. I915_WRITE(DEIMR, dev_priv->irq_mask);
  52. POSTING_READ(DEIMR);
  53. }
  54. }
  55. void
  56. i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  57. {
  58. if ((dev_priv->pipestat[pipe] & mask) != mask) {
  59. u32 reg = PIPESTAT(pipe);
  60. dev_priv->pipestat[pipe] |= mask;
  61. /* Enable the interrupt, clear any pending status */
  62. I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
  63. POSTING_READ(reg);
  64. }
  65. }
  66. void
  67. i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  68. {
  69. if ((dev_priv->pipestat[pipe] & mask) != 0) {
  70. u32 reg = PIPESTAT(pipe);
  71. dev_priv->pipestat[pipe] &= ~mask;
  72. I915_WRITE(reg, dev_priv->pipestat[pipe]);
  73. POSTING_READ(reg);
  74. }
  75. }
  76. /**
  77. * intel_enable_asle - enable ASLE interrupt for OpRegion
  78. */
  79. void intel_enable_asle(struct drm_device *dev)
  80. {
  81. drm_i915_private_t *dev_priv = dev->dev_private;
  82. unsigned long irqflags;
  83. /* FIXME: opregion/asle for VLV */
  84. if (IS_VALLEYVIEW(dev))
  85. return;
  86. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  87. if (HAS_PCH_SPLIT(dev))
  88. ironlake_enable_display_irq(dev_priv, DE_GSE);
  89. else {
  90. i915_enable_pipestat(dev_priv, 1,
  91. PIPE_LEGACY_BLC_EVENT_ENABLE);
  92. if (INTEL_INFO(dev)->gen >= 4)
  93. i915_enable_pipestat(dev_priv, 0,
  94. PIPE_LEGACY_BLC_EVENT_ENABLE);
  95. }
  96. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  97. }
  98. /**
  99. * i915_pipe_enabled - check if a pipe is enabled
  100. * @dev: DRM device
  101. * @pipe: pipe to check
  102. *
  103. * Reading certain registers when the pipe is disabled can hang the chip.
  104. * Use this routine to make sure the PLL is running and the pipe is active
  105. * before reading such registers if unsure.
  106. */
  107. static int
  108. i915_pipe_enabled(struct drm_device *dev, int pipe)
  109. {
  110. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  111. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  112. pipe);
  113. return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
  114. }
  115. /* Called from drm generic code, passed a 'crtc', which
  116. * we use as a pipe index
  117. */
  118. static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
  119. {
  120. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  121. unsigned long high_frame;
  122. unsigned long low_frame;
  123. u32 high1, high2, low;
  124. if (!i915_pipe_enabled(dev, pipe)) {
  125. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  126. "pipe %c\n", pipe_name(pipe));
  127. return 0;
  128. }
  129. high_frame = PIPEFRAME(pipe);
  130. low_frame = PIPEFRAMEPIXEL(pipe);
  131. /*
  132. * High & low register fields aren't synchronized, so make sure
  133. * we get a low value that's stable across two reads of the high
  134. * register.
  135. */
  136. do {
  137. high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  138. low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
  139. high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  140. } while (high1 != high2);
  141. high1 >>= PIPE_FRAME_HIGH_SHIFT;
  142. low >>= PIPE_FRAME_LOW_SHIFT;
  143. return (high1 << 8) | low;
  144. }
  145. static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
  146. {
  147. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  148. int reg = PIPE_FRMCOUNT_GM45(pipe);
  149. if (!i915_pipe_enabled(dev, pipe)) {
  150. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  151. "pipe %c\n", pipe_name(pipe));
  152. return 0;
  153. }
  154. return I915_READ(reg);
  155. }
  156. static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
  157. int *vpos, int *hpos)
  158. {
  159. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  160. u32 vbl = 0, position = 0;
  161. int vbl_start, vbl_end, htotal, vtotal;
  162. bool in_vbl = true;
  163. int ret = 0;
  164. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  165. pipe);
  166. if (!i915_pipe_enabled(dev, pipe)) {
  167. DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
  168. "pipe %c\n", pipe_name(pipe));
  169. return 0;
  170. }
  171. /* Get vtotal. */
  172. vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  173. if (INTEL_INFO(dev)->gen >= 4) {
  174. /* No obvious pixelcount register. Only query vertical
  175. * scanout position from Display scan line register.
  176. */
  177. position = I915_READ(PIPEDSL(pipe));
  178. /* Decode into vertical scanout position. Don't have
  179. * horizontal scanout position.
  180. */
  181. *vpos = position & 0x1fff;
  182. *hpos = 0;
  183. } else {
  184. /* Have access to pixelcount since start of frame.
  185. * We can split this into vertical and horizontal
  186. * scanout position.
  187. */
  188. position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
  189. htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  190. *vpos = position / htotal;
  191. *hpos = position - (*vpos * htotal);
  192. }
  193. /* Query vblank area. */
  194. vbl = I915_READ(VBLANK(cpu_transcoder));
  195. /* Test position against vblank region. */
  196. vbl_start = vbl & 0x1fff;
  197. vbl_end = (vbl >> 16) & 0x1fff;
  198. if ((*vpos < vbl_start) || (*vpos > vbl_end))
  199. in_vbl = false;
  200. /* Inside "upper part" of vblank area? Apply corrective offset: */
  201. if (in_vbl && (*vpos >= vbl_start))
  202. *vpos = *vpos - vtotal;
  203. /* Readouts valid? */
  204. if (vbl > 0)
  205. ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
  206. /* In vblank? */
  207. if (in_vbl)
  208. ret |= DRM_SCANOUTPOS_INVBL;
  209. return ret;
  210. }
  211. static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
  212. int *max_error,
  213. struct timeval *vblank_time,
  214. unsigned flags)
  215. {
  216. struct drm_i915_private *dev_priv = dev->dev_private;
  217. struct drm_crtc *crtc;
  218. if (pipe < 0 || pipe >= dev_priv->num_pipe) {
  219. DRM_ERROR("Invalid crtc %d\n", pipe);
  220. return -EINVAL;
  221. }
  222. /* Get drm_crtc to timestamp: */
  223. crtc = intel_get_crtc_for_pipe(dev, pipe);
  224. if (crtc == NULL) {
  225. DRM_ERROR("Invalid crtc %d\n", pipe);
  226. return -EINVAL;
  227. }
  228. if (!crtc->enabled) {
  229. DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
  230. return -EBUSY;
  231. }
  232. /* Helper routine in DRM core does all the work: */
  233. return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
  234. vblank_time, flags,
  235. crtc);
  236. }
  237. /*
  238. * Handle hotplug events outside the interrupt handler proper.
  239. */
  240. static void i915_hotplug_work_func(struct work_struct *work)
  241. {
  242. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  243. hotplug_work);
  244. struct drm_device *dev = dev_priv->dev;
  245. struct drm_mode_config *mode_config = &dev->mode_config;
  246. struct intel_encoder *encoder;
  247. /* HPD irq before everything is fully set up. */
  248. if (!dev_priv->enable_hotplug_processing)
  249. return;
  250. mutex_lock(&mode_config->mutex);
  251. DRM_DEBUG_KMS("running encoder hotplug functions\n");
  252. list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
  253. if (encoder->hot_plug)
  254. encoder->hot_plug(encoder);
  255. mutex_unlock(&mode_config->mutex);
  256. /* Just fire off a uevent and let userspace tell us what to do */
  257. drm_helper_hpd_irq_event(dev);
  258. }
  259. static void ironlake_handle_rps_change(struct drm_device *dev)
  260. {
  261. drm_i915_private_t *dev_priv = dev->dev_private;
  262. u32 busy_up, busy_down, max_avg, min_avg;
  263. u8 new_delay;
  264. unsigned long flags;
  265. spin_lock_irqsave(&mchdev_lock, flags);
  266. I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  267. new_delay = dev_priv->ips.cur_delay;
  268. I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
  269. busy_up = I915_READ(RCPREVBSYTUPAVG);
  270. busy_down = I915_READ(RCPREVBSYTDNAVG);
  271. max_avg = I915_READ(RCBMAXAVG);
  272. min_avg = I915_READ(RCBMINAVG);
  273. /* Handle RCS change request from hw */
  274. if (busy_up > max_avg) {
  275. if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
  276. new_delay = dev_priv->ips.cur_delay - 1;
  277. if (new_delay < dev_priv->ips.max_delay)
  278. new_delay = dev_priv->ips.max_delay;
  279. } else if (busy_down < min_avg) {
  280. if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
  281. new_delay = dev_priv->ips.cur_delay + 1;
  282. if (new_delay > dev_priv->ips.min_delay)
  283. new_delay = dev_priv->ips.min_delay;
  284. }
  285. if (ironlake_set_drps(dev, new_delay))
  286. dev_priv->ips.cur_delay = new_delay;
  287. spin_unlock_irqrestore(&mchdev_lock, flags);
  288. return;
  289. }
  290. static void notify_ring(struct drm_device *dev,
  291. struct intel_ring_buffer *ring)
  292. {
  293. struct drm_i915_private *dev_priv = dev->dev_private;
  294. if (ring->obj == NULL)
  295. return;
  296. trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
  297. wake_up_all(&ring->irq_queue);
  298. if (i915_enable_hangcheck) {
  299. dev_priv->gpu_error.hangcheck_count = 0;
  300. mod_timer(&dev_priv->gpu_error.hangcheck_timer,
  301. round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  302. }
  303. }
  304. static void gen6_pm_rps_work(struct work_struct *work)
  305. {
  306. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  307. rps.work);
  308. u32 pm_iir, pm_imr;
  309. u8 new_delay;
  310. spin_lock_irq(&dev_priv->rps.lock);
  311. pm_iir = dev_priv->rps.pm_iir;
  312. dev_priv->rps.pm_iir = 0;
  313. pm_imr = I915_READ(GEN6_PMIMR);
  314. I915_WRITE(GEN6_PMIMR, 0);
  315. spin_unlock_irq(&dev_priv->rps.lock);
  316. if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
  317. return;
  318. mutex_lock(&dev_priv->rps.hw_lock);
  319. if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
  320. new_delay = dev_priv->rps.cur_delay + 1;
  321. else
  322. new_delay = dev_priv->rps.cur_delay - 1;
  323. /* sysfs frequency interfaces may have snuck in while servicing the
  324. * interrupt
  325. */
  326. if (!(new_delay > dev_priv->rps.max_delay ||
  327. new_delay < dev_priv->rps.min_delay)) {
  328. gen6_set_rps(dev_priv->dev, new_delay);
  329. }
  330. mutex_unlock(&dev_priv->rps.hw_lock);
  331. }
  332. /**
  333. * ivybridge_parity_work - Workqueue called when a parity error interrupt
  334. * occurred.
  335. * @work: workqueue struct
  336. *
  337. * Doesn't actually do anything except notify userspace. As a consequence of
  338. * this event, userspace should try to remap the bad rows since statistically
  339. * it is likely the same row is more likely to go bad again.
  340. */
  341. static void ivybridge_parity_work(struct work_struct *work)
  342. {
  343. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  344. l3_parity.error_work);
  345. u32 error_status, row, bank, subbank;
  346. char *parity_event[5];
  347. uint32_t misccpctl;
  348. unsigned long flags;
  349. /* We must turn off DOP level clock gating to access the L3 registers.
  350. * In order to prevent a get/put style interface, acquire struct mutex
  351. * any time we access those registers.
  352. */
  353. mutex_lock(&dev_priv->dev->struct_mutex);
  354. misccpctl = I915_READ(GEN7_MISCCPCTL);
  355. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  356. POSTING_READ(GEN7_MISCCPCTL);
  357. error_status = I915_READ(GEN7_L3CDERRST1);
  358. row = GEN7_PARITY_ERROR_ROW(error_status);
  359. bank = GEN7_PARITY_ERROR_BANK(error_status);
  360. subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
  361. I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
  362. GEN7_L3CDERRST1_ENABLE);
  363. POSTING_READ(GEN7_L3CDERRST1);
  364. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  365. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  366. dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  367. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  368. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  369. mutex_unlock(&dev_priv->dev->struct_mutex);
  370. parity_event[0] = "L3_PARITY_ERROR=1";
  371. parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
  372. parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
  373. parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
  374. parity_event[4] = NULL;
  375. kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
  376. KOBJ_CHANGE, parity_event);
  377. DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
  378. row, bank, subbank);
  379. kfree(parity_event[3]);
  380. kfree(parity_event[2]);
  381. kfree(parity_event[1]);
  382. }
  383. static void ivybridge_handle_parity_error(struct drm_device *dev)
  384. {
  385. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  386. unsigned long flags;
  387. if (!HAS_L3_GPU_CACHE(dev))
  388. return;
  389. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  390. dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  391. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  392. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  393. queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
  394. }
  395. static void snb_gt_irq_handler(struct drm_device *dev,
  396. struct drm_i915_private *dev_priv,
  397. u32 gt_iir)
  398. {
  399. if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
  400. GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
  401. notify_ring(dev, &dev_priv->ring[RCS]);
  402. if (gt_iir & GEN6_BSD_USER_INTERRUPT)
  403. notify_ring(dev, &dev_priv->ring[VCS]);
  404. if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
  405. notify_ring(dev, &dev_priv->ring[BCS]);
  406. if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
  407. GT_GEN6_BSD_CS_ERROR_INTERRUPT |
  408. GT_RENDER_CS_ERROR_INTERRUPT)) {
  409. DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
  410. i915_handle_error(dev, false);
  411. }
  412. if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
  413. ivybridge_handle_parity_error(dev);
  414. }
  415. static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
  416. u32 pm_iir)
  417. {
  418. unsigned long flags;
  419. /*
  420. * IIR bits should never already be set because IMR should
  421. * prevent an interrupt from being shown in IIR. The warning
  422. * displays a case where we've unsafely cleared
  423. * dev_priv->rps.pm_iir. Although missing an interrupt of the same
  424. * type is not a problem, it displays a problem in the logic.
  425. *
  426. * The mask bit in IMR is cleared by dev_priv->rps.work.
  427. */
  428. spin_lock_irqsave(&dev_priv->rps.lock, flags);
  429. dev_priv->rps.pm_iir |= pm_iir;
  430. I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
  431. POSTING_READ(GEN6_PMIMR);
  432. spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
  433. queue_work(dev_priv->wq, &dev_priv->rps.work);
  434. }
  435. static void gmbus_irq_handler(struct drm_device *dev)
  436. {
  437. struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
  438. wake_up_all(&dev_priv->gmbus_wait_queue);
  439. }
  440. static void dp_aux_irq_handler(struct drm_device *dev)
  441. {
  442. struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
  443. wake_up_all(&dev_priv->gmbus_wait_queue);
  444. }
  445. static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  446. {
  447. struct drm_device *dev = (struct drm_device *) arg;
  448. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  449. u32 iir, gt_iir, pm_iir;
  450. irqreturn_t ret = IRQ_NONE;
  451. unsigned long irqflags;
  452. int pipe;
  453. u32 pipe_stats[I915_MAX_PIPES];
  454. atomic_inc(&dev_priv->irq_received);
  455. while (true) {
  456. iir = I915_READ(VLV_IIR);
  457. gt_iir = I915_READ(GTIIR);
  458. pm_iir = I915_READ(GEN6_PMIIR);
  459. if (gt_iir == 0 && pm_iir == 0 && iir == 0)
  460. goto out;
  461. ret = IRQ_HANDLED;
  462. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  463. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  464. for_each_pipe(pipe) {
  465. int reg = PIPESTAT(pipe);
  466. pipe_stats[pipe] = I915_READ(reg);
  467. /*
  468. * Clear the PIPE*STAT regs before the IIR
  469. */
  470. if (pipe_stats[pipe] & 0x8000ffff) {
  471. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  472. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  473. pipe_name(pipe));
  474. I915_WRITE(reg, pipe_stats[pipe]);
  475. }
  476. }
  477. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  478. for_each_pipe(pipe) {
  479. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
  480. drm_handle_vblank(dev, pipe);
  481. if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
  482. intel_prepare_page_flip(dev, pipe);
  483. intel_finish_page_flip(dev, pipe);
  484. }
  485. }
  486. /* Consume port. Then clear IIR or we'll miss events */
  487. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  488. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  489. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  490. hotplug_status);
  491. if (hotplug_status & dev_priv->hotplug_supported_mask)
  492. queue_work(dev_priv->wq,
  493. &dev_priv->hotplug_work);
  494. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  495. I915_READ(PORT_HOTPLUG_STAT);
  496. }
  497. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  498. gmbus_irq_handler(dev);
  499. if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
  500. gen6_queue_rps_work(dev_priv, pm_iir);
  501. I915_WRITE(GTIIR, gt_iir);
  502. I915_WRITE(GEN6_PMIIR, pm_iir);
  503. I915_WRITE(VLV_IIR, iir);
  504. }
  505. out:
  506. return ret;
  507. }
  508. static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
  509. {
  510. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  511. int pipe;
  512. if (pch_iir & SDE_HOTPLUG_MASK)
  513. queue_work(dev_priv->wq, &dev_priv->hotplug_work);
  514. if (pch_iir & SDE_AUDIO_POWER_MASK)
  515. DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  516. (pch_iir & SDE_AUDIO_POWER_MASK) >>
  517. SDE_AUDIO_POWER_SHIFT);
  518. if (pch_iir & SDE_AUX_MASK)
  519. dp_aux_irq_handler(dev);
  520. if (pch_iir & SDE_GMBUS)
  521. gmbus_irq_handler(dev);
  522. if (pch_iir & SDE_AUDIO_HDCP_MASK)
  523. DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
  524. if (pch_iir & SDE_AUDIO_TRANS_MASK)
  525. DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
  526. if (pch_iir & SDE_POISON)
  527. DRM_ERROR("PCH poison interrupt\n");
  528. if (pch_iir & SDE_FDI_MASK)
  529. for_each_pipe(pipe)
  530. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  531. pipe_name(pipe),
  532. I915_READ(FDI_RX_IIR(pipe)));
  533. if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
  534. DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
  535. if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
  536. DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
  537. if (pch_iir & SDE_TRANSB_FIFO_UNDER)
  538. DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
  539. if (pch_iir & SDE_TRANSA_FIFO_UNDER)
  540. DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
  541. }
  542. static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
  543. {
  544. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  545. int pipe;
  546. if (pch_iir & SDE_HOTPLUG_MASK_CPT)
  547. queue_work(dev_priv->wq, &dev_priv->hotplug_work);
  548. if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
  549. DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  550. (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
  551. SDE_AUDIO_POWER_SHIFT_CPT);
  552. if (pch_iir & SDE_AUX_MASK_CPT)
  553. dp_aux_irq_handler(dev);
  554. if (pch_iir & SDE_GMBUS_CPT)
  555. gmbus_irq_handler(dev);
  556. if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
  557. DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
  558. if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
  559. DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
  560. if (pch_iir & SDE_FDI_MASK_CPT)
  561. for_each_pipe(pipe)
  562. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  563. pipe_name(pipe),
  564. I915_READ(FDI_RX_IIR(pipe)));
  565. }
  566. static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
  567. {
  568. struct drm_device *dev = (struct drm_device *) arg;
  569. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  570. u32 de_iir, gt_iir, de_ier, pm_iir;
  571. irqreturn_t ret = IRQ_NONE;
  572. int i;
  573. atomic_inc(&dev_priv->irq_received);
  574. /* disable master interrupt before clearing iir */
  575. de_ier = I915_READ(DEIER);
  576. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  577. gt_iir = I915_READ(GTIIR);
  578. if (gt_iir) {
  579. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  580. I915_WRITE(GTIIR, gt_iir);
  581. ret = IRQ_HANDLED;
  582. }
  583. de_iir = I915_READ(DEIIR);
  584. if (de_iir) {
  585. if (de_iir & DE_AUX_CHANNEL_A_IVB)
  586. dp_aux_irq_handler(dev);
  587. if (de_iir & DE_GSE_IVB)
  588. intel_opregion_gse_intr(dev);
  589. for (i = 0; i < 3; i++) {
  590. if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
  591. drm_handle_vblank(dev, i);
  592. if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
  593. intel_prepare_page_flip(dev, i);
  594. intel_finish_page_flip_plane(dev, i);
  595. }
  596. }
  597. /* check event from PCH */
  598. if (de_iir & DE_PCH_EVENT_IVB) {
  599. u32 pch_iir = I915_READ(SDEIIR);
  600. cpt_irq_handler(dev, pch_iir);
  601. /* clear PCH hotplug event before clear CPU irq */
  602. I915_WRITE(SDEIIR, pch_iir);
  603. }
  604. I915_WRITE(DEIIR, de_iir);
  605. ret = IRQ_HANDLED;
  606. }
  607. pm_iir = I915_READ(GEN6_PMIIR);
  608. if (pm_iir) {
  609. if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
  610. gen6_queue_rps_work(dev_priv, pm_iir);
  611. I915_WRITE(GEN6_PMIIR, pm_iir);
  612. ret = IRQ_HANDLED;
  613. }
  614. I915_WRITE(DEIER, de_ier);
  615. POSTING_READ(DEIER);
  616. return ret;
  617. }
  618. static void ilk_gt_irq_handler(struct drm_device *dev,
  619. struct drm_i915_private *dev_priv,
  620. u32 gt_iir)
  621. {
  622. if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
  623. notify_ring(dev, &dev_priv->ring[RCS]);
  624. if (gt_iir & GT_BSD_USER_INTERRUPT)
  625. notify_ring(dev, &dev_priv->ring[VCS]);
  626. }
  627. static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  628. {
  629. struct drm_device *dev = (struct drm_device *) arg;
  630. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  631. int ret = IRQ_NONE;
  632. u32 de_iir, gt_iir, de_ier, pm_iir;
  633. atomic_inc(&dev_priv->irq_received);
  634. /* disable master interrupt before clearing iir */
  635. de_ier = I915_READ(DEIER);
  636. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  637. POSTING_READ(DEIER);
  638. de_iir = I915_READ(DEIIR);
  639. gt_iir = I915_READ(GTIIR);
  640. pm_iir = I915_READ(GEN6_PMIIR);
  641. if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
  642. goto done;
  643. ret = IRQ_HANDLED;
  644. if (IS_GEN5(dev))
  645. ilk_gt_irq_handler(dev, dev_priv, gt_iir);
  646. else
  647. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  648. if (de_iir & DE_AUX_CHANNEL_A)
  649. dp_aux_irq_handler(dev);
  650. if (de_iir & DE_GSE)
  651. intel_opregion_gse_intr(dev);
  652. if (de_iir & DE_PIPEA_VBLANK)
  653. drm_handle_vblank(dev, 0);
  654. if (de_iir & DE_PIPEB_VBLANK)
  655. drm_handle_vblank(dev, 1);
  656. if (de_iir & DE_PLANEA_FLIP_DONE) {
  657. intel_prepare_page_flip(dev, 0);
  658. intel_finish_page_flip_plane(dev, 0);
  659. }
  660. if (de_iir & DE_PLANEB_FLIP_DONE) {
  661. intel_prepare_page_flip(dev, 1);
  662. intel_finish_page_flip_plane(dev, 1);
  663. }
  664. /* check event from PCH */
  665. if (de_iir & DE_PCH_EVENT) {
  666. u32 pch_iir = I915_READ(SDEIIR);
  667. if (HAS_PCH_CPT(dev))
  668. cpt_irq_handler(dev, pch_iir);
  669. else
  670. ibx_irq_handler(dev, pch_iir);
  671. /* should clear PCH hotplug event before clear CPU irq */
  672. I915_WRITE(SDEIIR, pch_iir);
  673. }
  674. if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
  675. ironlake_handle_rps_change(dev);
  676. if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
  677. gen6_queue_rps_work(dev_priv, pm_iir);
  678. I915_WRITE(GTIIR, gt_iir);
  679. I915_WRITE(DEIIR, de_iir);
  680. I915_WRITE(GEN6_PMIIR, pm_iir);
  681. done:
  682. I915_WRITE(DEIER, de_ier);
  683. POSTING_READ(DEIER);
  684. return ret;
  685. }
  686. /**
  687. * i915_error_work_func - do process context error handling work
  688. * @work: work struct
  689. *
  690. * Fire an error uevent so userspace can see that a hang or error
  691. * was detected.
  692. */
  693. static void i915_error_work_func(struct work_struct *work)
  694. {
  695. struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
  696. work);
  697. drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
  698. gpu_error);
  699. struct drm_device *dev = dev_priv->dev;
  700. struct intel_ring_buffer *ring;
  701. char *error_event[] = { "ERROR=1", NULL };
  702. char *reset_event[] = { "RESET=1", NULL };
  703. char *reset_done_event[] = { "ERROR=0", NULL };
  704. int i, ret;
  705. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
  706. /*
  707. * Note that there's only one work item which does gpu resets, so we
  708. * need not worry about concurrent gpu resets potentially incrementing
  709. * error->reset_counter twice. We only need to take care of another
  710. * racing irq/hangcheck declaring the gpu dead for a second time. A
  711. * quick check for that is good enough: schedule_work ensures the
  712. * correct ordering between hang detection and this work item, and since
  713. * the reset in-progress bit is only ever set by code outside of this
  714. * work we don't need to worry about any other races.
  715. */
  716. if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
  717. DRM_DEBUG_DRIVER("resetting chip\n");
  718. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
  719. reset_event);
  720. ret = i915_reset(dev);
  721. if (ret == 0) {
  722. /*
  723. * After all the gem state is reset, increment the reset
  724. * counter and wake up everyone waiting for the reset to
  725. * complete.
  726. *
  727. * Since unlock operations are a one-sided barrier only,
  728. * we need to insert a barrier here to order any seqno
  729. * updates before
  730. * the counter increment.
  731. */
  732. smp_mb__before_atomic_inc();
  733. atomic_inc(&dev_priv->gpu_error.reset_counter);
  734. kobject_uevent_env(&dev->primary->kdev.kobj,
  735. KOBJ_CHANGE, reset_done_event);
  736. } else {
  737. atomic_set(&error->reset_counter, I915_WEDGED);
  738. }
  739. for_each_ring(ring, dev_priv, i)
  740. wake_up_all(&ring->irq_queue);
  741. intel_display_handle_reset(dev);
  742. wake_up_all(&dev_priv->gpu_error.reset_queue);
  743. }
  744. }
  745. /* NB: please notice the memset */
  746. static void i915_get_extra_instdone(struct drm_device *dev,
  747. uint32_t *instdone)
  748. {
  749. struct drm_i915_private *dev_priv = dev->dev_private;
  750. memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
  751. switch(INTEL_INFO(dev)->gen) {
  752. case 2:
  753. case 3:
  754. instdone[0] = I915_READ(INSTDONE);
  755. break;
  756. case 4:
  757. case 5:
  758. case 6:
  759. instdone[0] = I915_READ(INSTDONE_I965);
  760. instdone[1] = I915_READ(INSTDONE1);
  761. break;
  762. default:
  763. WARN_ONCE(1, "Unsupported platform\n");
  764. case 7:
  765. instdone[0] = I915_READ(GEN7_INSTDONE_1);
  766. instdone[1] = I915_READ(GEN7_SC_INSTDONE);
  767. instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
  768. instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
  769. break;
  770. }
  771. }
  772. #ifdef CONFIG_DEBUG_FS
  773. static struct drm_i915_error_object *
  774. i915_error_object_create(struct drm_i915_private *dev_priv,
  775. struct drm_i915_gem_object *src)
  776. {
  777. struct drm_i915_error_object *dst;
  778. int i, count;
  779. u32 reloc_offset;
  780. if (src == NULL || src->pages == NULL)
  781. return NULL;
  782. count = src->base.size / PAGE_SIZE;
  783. dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
  784. if (dst == NULL)
  785. return NULL;
  786. reloc_offset = src->gtt_offset;
  787. for (i = 0; i < count; i++) {
  788. unsigned long flags;
  789. void *d;
  790. d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
  791. if (d == NULL)
  792. goto unwind;
  793. local_irq_save(flags);
  794. if (reloc_offset < dev_priv->gtt.mappable_end &&
  795. src->has_global_gtt_mapping) {
  796. void __iomem *s;
  797. /* Simply ignore tiling or any overlapping fence.
  798. * It's part of the error state, and this hopefully
  799. * captures what the GPU read.
  800. */
  801. s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
  802. reloc_offset);
  803. memcpy_fromio(d, s, PAGE_SIZE);
  804. io_mapping_unmap_atomic(s);
  805. } else if (src->stolen) {
  806. unsigned long offset;
  807. offset = dev_priv->mm.stolen_base;
  808. offset += src->stolen->start;
  809. offset += i << PAGE_SHIFT;
  810. memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
  811. } else {
  812. struct page *page;
  813. void *s;
  814. page = i915_gem_object_get_page(src, i);
  815. drm_clflush_pages(&page, 1);
  816. s = kmap_atomic(page);
  817. memcpy(d, s, PAGE_SIZE);
  818. kunmap_atomic(s);
  819. drm_clflush_pages(&page, 1);
  820. }
  821. local_irq_restore(flags);
  822. dst->pages[i] = d;
  823. reloc_offset += PAGE_SIZE;
  824. }
  825. dst->page_count = count;
  826. dst->gtt_offset = src->gtt_offset;
  827. return dst;
  828. unwind:
  829. while (i--)
  830. kfree(dst->pages[i]);
  831. kfree(dst);
  832. return NULL;
  833. }
  834. static void
  835. i915_error_object_free(struct drm_i915_error_object *obj)
  836. {
  837. int page;
  838. if (obj == NULL)
  839. return;
  840. for (page = 0; page < obj->page_count; page++)
  841. kfree(obj->pages[page]);
  842. kfree(obj);
  843. }
  844. void
  845. i915_error_state_free(struct kref *error_ref)
  846. {
  847. struct drm_i915_error_state *error = container_of(error_ref,
  848. typeof(*error), ref);
  849. int i;
  850. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  851. i915_error_object_free(error->ring[i].batchbuffer);
  852. i915_error_object_free(error->ring[i].ringbuffer);
  853. kfree(error->ring[i].requests);
  854. }
  855. kfree(error->active_bo);
  856. kfree(error->overlay);
  857. kfree(error);
  858. }
  859. static void capture_bo(struct drm_i915_error_buffer *err,
  860. struct drm_i915_gem_object *obj)
  861. {
  862. err->size = obj->base.size;
  863. err->name = obj->base.name;
  864. err->rseqno = obj->last_read_seqno;
  865. err->wseqno = obj->last_write_seqno;
  866. err->gtt_offset = obj->gtt_offset;
  867. err->read_domains = obj->base.read_domains;
  868. err->write_domain = obj->base.write_domain;
  869. err->fence_reg = obj->fence_reg;
  870. err->pinned = 0;
  871. if (obj->pin_count > 0)
  872. err->pinned = 1;
  873. if (obj->user_pin_count > 0)
  874. err->pinned = -1;
  875. err->tiling = obj->tiling_mode;
  876. err->dirty = obj->dirty;
  877. err->purgeable = obj->madv != I915_MADV_WILLNEED;
  878. err->ring = obj->ring ? obj->ring->id : -1;
  879. err->cache_level = obj->cache_level;
  880. }
  881. static u32 capture_active_bo(struct drm_i915_error_buffer *err,
  882. int count, struct list_head *head)
  883. {
  884. struct drm_i915_gem_object *obj;
  885. int i = 0;
  886. list_for_each_entry(obj, head, mm_list) {
  887. capture_bo(err++, obj);
  888. if (++i == count)
  889. break;
  890. }
  891. return i;
  892. }
  893. static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
  894. int count, struct list_head *head)
  895. {
  896. struct drm_i915_gem_object *obj;
  897. int i = 0;
  898. list_for_each_entry(obj, head, gtt_list) {
  899. if (obj->pin_count == 0)
  900. continue;
  901. capture_bo(err++, obj);
  902. if (++i == count)
  903. break;
  904. }
  905. return i;
  906. }
  907. static void i915_gem_record_fences(struct drm_device *dev,
  908. struct drm_i915_error_state *error)
  909. {
  910. struct drm_i915_private *dev_priv = dev->dev_private;
  911. int i;
  912. /* Fences */
  913. switch (INTEL_INFO(dev)->gen) {
  914. case 7:
  915. case 6:
  916. for (i = 0; i < 16; i++)
  917. error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
  918. break;
  919. case 5:
  920. case 4:
  921. for (i = 0; i < 16; i++)
  922. error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
  923. break;
  924. case 3:
  925. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  926. for (i = 0; i < 8; i++)
  927. error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
  928. case 2:
  929. for (i = 0; i < 8; i++)
  930. error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
  931. break;
  932. default:
  933. BUG();
  934. }
  935. }
  936. static struct drm_i915_error_object *
  937. i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
  938. struct intel_ring_buffer *ring)
  939. {
  940. struct drm_i915_gem_object *obj;
  941. u32 seqno;
  942. if (!ring->get_seqno)
  943. return NULL;
  944. if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
  945. u32 acthd = I915_READ(ACTHD);
  946. if (WARN_ON(ring->id != RCS))
  947. return NULL;
  948. obj = ring->private;
  949. if (acthd >= obj->gtt_offset &&
  950. acthd < obj->gtt_offset + obj->base.size)
  951. return i915_error_object_create(dev_priv, obj);
  952. }
  953. seqno = ring->get_seqno(ring, false);
  954. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
  955. if (obj->ring != ring)
  956. continue;
  957. if (i915_seqno_passed(seqno, obj->last_read_seqno))
  958. continue;
  959. if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
  960. continue;
  961. /* We need to copy these to an anonymous buffer as the simplest
  962. * method to avoid being overwritten by userspace.
  963. */
  964. return i915_error_object_create(dev_priv, obj);
  965. }
  966. return NULL;
  967. }
  968. static void i915_record_ring_state(struct drm_device *dev,
  969. struct drm_i915_error_state *error,
  970. struct intel_ring_buffer *ring)
  971. {
  972. struct drm_i915_private *dev_priv = dev->dev_private;
  973. if (INTEL_INFO(dev)->gen >= 6) {
  974. error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
  975. error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
  976. error->semaphore_mboxes[ring->id][0]
  977. = I915_READ(RING_SYNC_0(ring->mmio_base));
  978. error->semaphore_mboxes[ring->id][1]
  979. = I915_READ(RING_SYNC_1(ring->mmio_base));
  980. error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
  981. error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
  982. }
  983. if (INTEL_INFO(dev)->gen >= 4) {
  984. error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
  985. error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
  986. error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
  987. error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
  988. error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
  989. if (ring->id == RCS)
  990. error->bbaddr = I915_READ64(BB_ADDR);
  991. } else {
  992. error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
  993. error->ipeir[ring->id] = I915_READ(IPEIR);
  994. error->ipehr[ring->id] = I915_READ(IPEHR);
  995. error->instdone[ring->id] = I915_READ(INSTDONE);
  996. }
  997. error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
  998. error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
  999. error->seqno[ring->id] = ring->get_seqno(ring, false);
  1000. error->acthd[ring->id] = intel_ring_get_active_head(ring);
  1001. error->head[ring->id] = I915_READ_HEAD(ring);
  1002. error->tail[ring->id] = I915_READ_TAIL(ring);
  1003. error->ctl[ring->id] = I915_READ_CTL(ring);
  1004. error->cpu_ring_head[ring->id] = ring->head;
  1005. error->cpu_ring_tail[ring->id] = ring->tail;
  1006. }
  1007. static void i915_gem_record_rings(struct drm_device *dev,
  1008. struct drm_i915_error_state *error)
  1009. {
  1010. struct drm_i915_private *dev_priv = dev->dev_private;
  1011. struct intel_ring_buffer *ring;
  1012. struct drm_i915_gem_request *request;
  1013. int i, count;
  1014. for_each_ring(ring, dev_priv, i) {
  1015. i915_record_ring_state(dev, error, ring);
  1016. error->ring[i].batchbuffer =
  1017. i915_error_first_batchbuffer(dev_priv, ring);
  1018. error->ring[i].ringbuffer =
  1019. i915_error_object_create(dev_priv, ring->obj);
  1020. count = 0;
  1021. list_for_each_entry(request, &ring->request_list, list)
  1022. count++;
  1023. error->ring[i].num_requests = count;
  1024. error->ring[i].requests =
  1025. kmalloc(count*sizeof(struct drm_i915_error_request),
  1026. GFP_ATOMIC);
  1027. if (error->ring[i].requests == NULL) {
  1028. error->ring[i].num_requests = 0;
  1029. continue;
  1030. }
  1031. count = 0;
  1032. list_for_each_entry(request, &ring->request_list, list) {
  1033. struct drm_i915_error_request *erq;
  1034. erq = &error->ring[i].requests[count++];
  1035. erq->seqno = request->seqno;
  1036. erq->jiffies = request->emitted_jiffies;
  1037. erq->tail = request->tail;
  1038. }
  1039. }
  1040. }
  1041. /**
  1042. * i915_capture_error_state - capture an error record for later analysis
  1043. * @dev: drm device
  1044. *
  1045. * Should be called when an error is detected (either a hang or an error
  1046. * interrupt) to capture error state from the time of the error. Fills
  1047. * out a structure which becomes available in debugfs for user level tools
  1048. * to pick up.
  1049. */
  1050. static void i915_capture_error_state(struct drm_device *dev)
  1051. {
  1052. struct drm_i915_private *dev_priv = dev->dev_private;
  1053. struct drm_i915_gem_object *obj;
  1054. struct drm_i915_error_state *error;
  1055. unsigned long flags;
  1056. int i, pipe;
  1057. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1058. error = dev_priv->gpu_error.first_error;
  1059. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1060. if (error)
  1061. return;
  1062. /* Account for pipe specific data like PIPE*STAT */
  1063. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  1064. if (!error) {
  1065. DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
  1066. return;
  1067. }
  1068. DRM_INFO("capturing error event; look for more information in"
  1069. "/sys/kernel/debug/dri/%d/i915_error_state\n",
  1070. dev->primary->index);
  1071. kref_init(&error->ref);
  1072. error->eir = I915_READ(EIR);
  1073. error->pgtbl_er = I915_READ(PGTBL_ER);
  1074. error->ccid = I915_READ(CCID);
  1075. if (HAS_PCH_SPLIT(dev))
  1076. error->ier = I915_READ(DEIER) | I915_READ(GTIER);
  1077. else if (IS_VALLEYVIEW(dev))
  1078. error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
  1079. else if (IS_GEN2(dev))
  1080. error->ier = I915_READ16(IER);
  1081. else
  1082. error->ier = I915_READ(IER);
  1083. if (INTEL_INFO(dev)->gen >= 6)
  1084. error->derrmr = I915_READ(DERRMR);
  1085. if (IS_VALLEYVIEW(dev))
  1086. error->forcewake = I915_READ(FORCEWAKE_VLV);
  1087. else if (INTEL_INFO(dev)->gen >= 7)
  1088. error->forcewake = I915_READ(FORCEWAKE_MT);
  1089. else if (INTEL_INFO(dev)->gen == 6)
  1090. error->forcewake = I915_READ(FORCEWAKE);
  1091. for_each_pipe(pipe)
  1092. error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
  1093. if (INTEL_INFO(dev)->gen >= 6) {
  1094. error->error = I915_READ(ERROR_GEN6);
  1095. error->done_reg = I915_READ(DONE_REG);
  1096. }
  1097. if (INTEL_INFO(dev)->gen == 7)
  1098. error->err_int = I915_READ(GEN7_ERR_INT);
  1099. i915_get_extra_instdone(dev, error->extra_instdone);
  1100. i915_gem_record_fences(dev, error);
  1101. i915_gem_record_rings(dev, error);
  1102. /* Record buffers on the active and pinned lists. */
  1103. error->active_bo = NULL;
  1104. error->pinned_bo = NULL;
  1105. i = 0;
  1106. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
  1107. i++;
  1108. error->active_bo_count = i;
  1109. list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
  1110. if (obj->pin_count)
  1111. i++;
  1112. error->pinned_bo_count = i - error->active_bo_count;
  1113. error->active_bo = NULL;
  1114. error->pinned_bo = NULL;
  1115. if (i) {
  1116. error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
  1117. GFP_ATOMIC);
  1118. if (error->active_bo)
  1119. error->pinned_bo =
  1120. error->active_bo + error->active_bo_count;
  1121. }
  1122. if (error->active_bo)
  1123. error->active_bo_count =
  1124. capture_active_bo(error->active_bo,
  1125. error->active_bo_count,
  1126. &dev_priv->mm.active_list);
  1127. if (error->pinned_bo)
  1128. error->pinned_bo_count =
  1129. capture_pinned_bo(error->pinned_bo,
  1130. error->pinned_bo_count,
  1131. &dev_priv->mm.bound_list);
  1132. do_gettimeofday(&error->time);
  1133. error->overlay = intel_overlay_capture_error_state(dev);
  1134. error->display = intel_display_capture_error_state(dev);
  1135. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1136. if (dev_priv->gpu_error.first_error == NULL) {
  1137. dev_priv->gpu_error.first_error = error;
  1138. error = NULL;
  1139. }
  1140. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1141. if (error)
  1142. i915_error_state_free(&error->ref);
  1143. }
  1144. void i915_destroy_error_state(struct drm_device *dev)
  1145. {
  1146. struct drm_i915_private *dev_priv = dev->dev_private;
  1147. struct drm_i915_error_state *error;
  1148. unsigned long flags;
  1149. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1150. error = dev_priv->gpu_error.first_error;
  1151. dev_priv->gpu_error.first_error = NULL;
  1152. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1153. if (error)
  1154. kref_put(&error->ref, i915_error_state_free);
  1155. }
  1156. #else
  1157. #define i915_capture_error_state(x)
  1158. #endif
  1159. static void i915_report_and_clear_eir(struct drm_device *dev)
  1160. {
  1161. struct drm_i915_private *dev_priv = dev->dev_private;
  1162. uint32_t instdone[I915_NUM_INSTDONE_REG];
  1163. u32 eir = I915_READ(EIR);
  1164. int pipe, i;
  1165. if (!eir)
  1166. return;
  1167. pr_err("render error detected, EIR: 0x%08x\n", eir);
  1168. i915_get_extra_instdone(dev, instdone);
  1169. if (IS_G4X(dev)) {
  1170. if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
  1171. u32 ipeir = I915_READ(IPEIR_I965);
  1172. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1173. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1174. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1175. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1176. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1177. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1178. I915_WRITE(IPEIR_I965, ipeir);
  1179. POSTING_READ(IPEIR_I965);
  1180. }
  1181. if (eir & GM45_ERROR_PAGE_TABLE) {
  1182. u32 pgtbl_err = I915_READ(PGTBL_ER);
  1183. pr_err("page table error\n");
  1184. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  1185. I915_WRITE(PGTBL_ER, pgtbl_err);
  1186. POSTING_READ(PGTBL_ER);
  1187. }
  1188. }
  1189. if (!IS_GEN2(dev)) {
  1190. if (eir & I915_ERROR_PAGE_TABLE) {
  1191. u32 pgtbl_err = I915_READ(PGTBL_ER);
  1192. pr_err("page table error\n");
  1193. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  1194. I915_WRITE(PGTBL_ER, pgtbl_err);
  1195. POSTING_READ(PGTBL_ER);
  1196. }
  1197. }
  1198. if (eir & I915_ERROR_MEMORY_REFRESH) {
  1199. pr_err("memory refresh error:\n");
  1200. for_each_pipe(pipe)
  1201. pr_err("pipe %c stat: 0x%08x\n",
  1202. pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
  1203. /* pipestat has already been acked */
  1204. }
  1205. if (eir & I915_ERROR_INSTRUCTION) {
  1206. pr_err("instruction error\n");
  1207. pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
  1208. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1209. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1210. if (INTEL_INFO(dev)->gen < 4) {
  1211. u32 ipeir = I915_READ(IPEIR);
  1212. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
  1213. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
  1214. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
  1215. I915_WRITE(IPEIR, ipeir);
  1216. POSTING_READ(IPEIR);
  1217. } else {
  1218. u32 ipeir = I915_READ(IPEIR_I965);
  1219. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1220. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1221. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1222. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1223. I915_WRITE(IPEIR_I965, ipeir);
  1224. POSTING_READ(IPEIR_I965);
  1225. }
  1226. }
  1227. I915_WRITE(EIR, eir);
  1228. POSTING_READ(EIR);
  1229. eir = I915_READ(EIR);
  1230. if (eir) {
  1231. /*
  1232. * some errors might have become stuck,
  1233. * mask them.
  1234. */
  1235. DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
  1236. I915_WRITE(EMR, I915_READ(EMR) | eir);
  1237. I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1238. }
  1239. }
  1240. /**
  1241. * i915_handle_error - handle an error interrupt
  1242. * @dev: drm device
  1243. *
  1244. * Do some basic checking of regsiter state at error interrupt time and
  1245. * dump it to the syslog. Also call i915_capture_error_state() to make
  1246. * sure we get a record and make it available in debugfs. Fire a uevent
  1247. * so userspace knows something bad happened (should trigger collection
  1248. * of a ring dump etc.).
  1249. */
  1250. void i915_handle_error(struct drm_device *dev, bool wedged)
  1251. {
  1252. struct drm_i915_private *dev_priv = dev->dev_private;
  1253. struct intel_ring_buffer *ring;
  1254. int i;
  1255. i915_capture_error_state(dev);
  1256. i915_report_and_clear_eir(dev);
  1257. if (wedged) {
  1258. atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
  1259. &dev_priv->gpu_error.reset_counter);
  1260. /*
  1261. * Wakeup waiting processes so that the reset work item
  1262. * doesn't deadlock trying to grab various locks.
  1263. */
  1264. for_each_ring(ring, dev_priv, i)
  1265. wake_up_all(&ring->irq_queue);
  1266. }
  1267. queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
  1268. }
  1269. static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
  1270. {
  1271. drm_i915_private_t *dev_priv = dev->dev_private;
  1272. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  1273. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1274. struct drm_i915_gem_object *obj;
  1275. struct intel_unpin_work *work;
  1276. unsigned long flags;
  1277. bool stall_detected;
  1278. /* Ignore early vblank irqs */
  1279. if (intel_crtc == NULL)
  1280. return;
  1281. spin_lock_irqsave(&dev->event_lock, flags);
  1282. work = intel_crtc->unpin_work;
  1283. if (work == NULL ||
  1284. atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
  1285. !work->enable_stall_check) {
  1286. /* Either the pending flip IRQ arrived, or we're too early. Don't check */
  1287. spin_unlock_irqrestore(&dev->event_lock, flags);
  1288. return;
  1289. }
  1290. /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
  1291. obj = work->pending_flip_obj;
  1292. if (INTEL_INFO(dev)->gen >= 4) {
  1293. int dspsurf = DSPSURF(intel_crtc->plane);
  1294. stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
  1295. obj->gtt_offset;
  1296. } else {
  1297. int dspaddr = DSPADDR(intel_crtc->plane);
  1298. stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
  1299. crtc->y * crtc->fb->pitches[0] +
  1300. crtc->x * crtc->fb->bits_per_pixel/8);
  1301. }
  1302. spin_unlock_irqrestore(&dev->event_lock, flags);
  1303. if (stall_detected) {
  1304. DRM_DEBUG_DRIVER("Pageflip stall detected\n");
  1305. intel_prepare_page_flip(dev, intel_crtc->plane);
  1306. }
  1307. }
  1308. /* Called from drm generic code, passed 'crtc' which
  1309. * we use as a pipe index
  1310. */
  1311. static int i915_enable_vblank(struct drm_device *dev, int pipe)
  1312. {
  1313. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1314. unsigned long irqflags;
  1315. if (!i915_pipe_enabled(dev, pipe))
  1316. return -EINVAL;
  1317. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1318. if (INTEL_INFO(dev)->gen >= 4)
  1319. i915_enable_pipestat(dev_priv, pipe,
  1320. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1321. else
  1322. i915_enable_pipestat(dev_priv, pipe,
  1323. PIPE_VBLANK_INTERRUPT_ENABLE);
  1324. /* maintain vblank delivery even in deep C-states */
  1325. if (dev_priv->info->gen == 3)
  1326. I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
  1327. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1328. return 0;
  1329. }
  1330. static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
  1331. {
  1332. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1333. unsigned long irqflags;
  1334. if (!i915_pipe_enabled(dev, pipe))
  1335. return -EINVAL;
  1336. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1337. ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
  1338. DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
  1339. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1340. return 0;
  1341. }
  1342. static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
  1343. {
  1344. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1345. unsigned long irqflags;
  1346. if (!i915_pipe_enabled(dev, pipe))
  1347. return -EINVAL;
  1348. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1349. ironlake_enable_display_irq(dev_priv,
  1350. DE_PIPEA_VBLANK_IVB << (5 * pipe));
  1351. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1352. return 0;
  1353. }
  1354. static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
  1355. {
  1356. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1357. unsigned long irqflags;
  1358. u32 imr;
  1359. if (!i915_pipe_enabled(dev, pipe))
  1360. return -EINVAL;
  1361. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1362. imr = I915_READ(VLV_IMR);
  1363. if (pipe == 0)
  1364. imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1365. else
  1366. imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1367. I915_WRITE(VLV_IMR, imr);
  1368. i915_enable_pipestat(dev_priv, pipe,
  1369. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1370. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1371. return 0;
  1372. }
  1373. /* Called from drm generic code, passed 'crtc' which
  1374. * we use as a pipe index
  1375. */
  1376. static void i915_disable_vblank(struct drm_device *dev, int pipe)
  1377. {
  1378. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1379. unsigned long irqflags;
  1380. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1381. if (dev_priv->info->gen == 3)
  1382. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
  1383. i915_disable_pipestat(dev_priv, pipe,
  1384. PIPE_VBLANK_INTERRUPT_ENABLE |
  1385. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1386. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1387. }
  1388. static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
  1389. {
  1390. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1391. unsigned long irqflags;
  1392. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1393. ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
  1394. DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
  1395. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1396. }
  1397. static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
  1398. {
  1399. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1400. unsigned long irqflags;
  1401. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1402. ironlake_disable_display_irq(dev_priv,
  1403. DE_PIPEA_VBLANK_IVB << (pipe * 5));
  1404. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1405. }
  1406. static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
  1407. {
  1408. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1409. unsigned long irqflags;
  1410. u32 imr;
  1411. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1412. i915_disable_pipestat(dev_priv, pipe,
  1413. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1414. imr = I915_READ(VLV_IMR);
  1415. if (pipe == 0)
  1416. imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1417. else
  1418. imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1419. I915_WRITE(VLV_IMR, imr);
  1420. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1421. }
  1422. static u32
  1423. ring_last_seqno(struct intel_ring_buffer *ring)
  1424. {
  1425. return list_entry(ring->request_list.prev,
  1426. struct drm_i915_gem_request, list)->seqno;
  1427. }
  1428. static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
  1429. {
  1430. if (list_empty(&ring->request_list) ||
  1431. i915_seqno_passed(ring->get_seqno(ring, false),
  1432. ring_last_seqno(ring))) {
  1433. /* Issue a wake-up to catch stuck h/w. */
  1434. if (waitqueue_active(&ring->irq_queue)) {
  1435. DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
  1436. ring->name);
  1437. wake_up_all(&ring->irq_queue);
  1438. *err = true;
  1439. }
  1440. return true;
  1441. }
  1442. return false;
  1443. }
  1444. static bool kick_ring(struct intel_ring_buffer *ring)
  1445. {
  1446. struct drm_device *dev = ring->dev;
  1447. struct drm_i915_private *dev_priv = dev->dev_private;
  1448. u32 tmp = I915_READ_CTL(ring);
  1449. if (tmp & RING_WAIT) {
  1450. DRM_ERROR("Kicking stuck wait on %s\n",
  1451. ring->name);
  1452. I915_WRITE_CTL(ring, tmp);
  1453. return true;
  1454. }
  1455. return false;
  1456. }
  1457. static bool i915_hangcheck_hung(struct drm_device *dev)
  1458. {
  1459. drm_i915_private_t *dev_priv = dev->dev_private;
  1460. if (dev_priv->gpu_error.hangcheck_count++ > 1) {
  1461. bool hung = true;
  1462. DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
  1463. i915_handle_error(dev, true);
  1464. if (!IS_GEN2(dev)) {
  1465. struct intel_ring_buffer *ring;
  1466. int i;
  1467. /* Is the chip hanging on a WAIT_FOR_EVENT?
  1468. * If so we can simply poke the RB_WAIT bit
  1469. * and break the hang. This should work on
  1470. * all but the second generation chipsets.
  1471. */
  1472. for_each_ring(ring, dev_priv, i)
  1473. hung &= !kick_ring(ring);
  1474. }
  1475. return hung;
  1476. }
  1477. return false;
  1478. }
  1479. /**
  1480. * This is called when the chip hasn't reported back with completed
  1481. * batchbuffers in a long time. The first time this is called we simply record
  1482. * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
  1483. * again, we assume the chip is wedged and try to fix it.
  1484. */
  1485. void i915_hangcheck_elapsed(unsigned long data)
  1486. {
  1487. struct drm_device *dev = (struct drm_device *)data;
  1488. drm_i915_private_t *dev_priv = dev->dev_private;
  1489. uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
  1490. struct intel_ring_buffer *ring;
  1491. bool err = false, idle;
  1492. int i;
  1493. if (!i915_enable_hangcheck)
  1494. return;
  1495. memset(acthd, 0, sizeof(acthd));
  1496. idle = true;
  1497. for_each_ring(ring, dev_priv, i) {
  1498. idle &= i915_hangcheck_ring_idle(ring, &err);
  1499. acthd[i] = intel_ring_get_active_head(ring);
  1500. }
  1501. /* If all work is done then ACTHD clearly hasn't advanced. */
  1502. if (idle) {
  1503. if (err) {
  1504. if (i915_hangcheck_hung(dev))
  1505. return;
  1506. goto repeat;
  1507. }
  1508. dev_priv->gpu_error.hangcheck_count = 0;
  1509. return;
  1510. }
  1511. i915_get_extra_instdone(dev, instdone);
  1512. if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
  1513. sizeof(acthd)) == 0 &&
  1514. memcmp(dev_priv->gpu_error.prev_instdone, instdone,
  1515. sizeof(instdone)) == 0) {
  1516. if (i915_hangcheck_hung(dev))
  1517. return;
  1518. } else {
  1519. dev_priv->gpu_error.hangcheck_count = 0;
  1520. memcpy(dev_priv->gpu_error.last_acthd, acthd,
  1521. sizeof(acthd));
  1522. memcpy(dev_priv->gpu_error.prev_instdone, instdone,
  1523. sizeof(instdone));
  1524. }
  1525. repeat:
  1526. /* Reset timer case chip hangs without another request being added */
  1527. mod_timer(&dev_priv->gpu_error.hangcheck_timer,
  1528. round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  1529. }
  1530. /* drm_dma.h hooks
  1531. */
  1532. static void ironlake_irq_preinstall(struct drm_device *dev)
  1533. {
  1534. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1535. atomic_set(&dev_priv->irq_received, 0);
  1536. I915_WRITE(HWSTAM, 0xeffe);
  1537. /* XXX hotplug from PCH */
  1538. I915_WRITE(DEIMR, 0xffffffff);
  1539. I915_WRITE(DEIER, 0x0);
  1540. POSTING_READ(DEIER);
  1541. /* and GT */
  1542. I915_WRITE(GTIMR, 0xffffffff);
  1543. I915_WRITE(GTIER, 0x0);
  1544. POSTING_READ(GTIER);
  1545. /* south display irq */
  1546. I915_WRITE(SDEIMR, 0xffffffff);
  1547. I915_WRITE(SDEIER, 0x0);
  1548. POSTING_READ(SDEIER);
  1549. }
  1550. static void valleyview_irq_preinstall(struct drm_device *dev)
  1551. {
  1552. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1553. int pipe;
  1554. atomic_set(&dev_priv->irq_received, 0);
  1555. /* VLV magic */
  1556. I915_WRITE(VLV_IMR, 0);
  1557. I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
  1558. I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
  1559. I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
  1560. /* and GT */
  1561. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1562. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1563. I915_WRITE(GTIMR, 0xffffffff);
  1564. I915_WRITE(GTIER, 0x0);
  1565. POSTING_READ(GTIER);
  1566. I915_WRITE(DPINVGTT, 0xff);
  1567. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1568. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1569. for_each_pipe(pipe)
  1570. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1571. I915_WRITE(VLV_IIR, 0xffffffff);
  1572. I915_WRITE(VLV_IMR, 0xffffffff);
  1573. I915_WRITE(VLV_IER, 0x0);
  1574. POSTING_READ(VLV_IER);
  1575. }
  1576. /*
  1577. * Enable digital hotplug on the PCH, and configure the DP short pulse
  1578. * duration to 2ms (which is the minimum in the Display Port spec)
  1579. *
  1580. * This register is the same on all known PCH chips.
  1581. */
  1582. static void ibx_enable_hotplug(struct drm_device *dev)
  1583. {
  1584. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1585. u32 hotplug;
  1586. hotplug = I915_READ(PCH_PORT_HOTPLUG);
  1587. hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
  1588. hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
  1589. hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
  1590. hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
  1591. I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  1592. }
  1593. static void ibx_irq_postinstall(struct drm_device *dev)
  1594. {
  1595. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1596. u32 mask;
  1597. if (HAS_PCH_IBX(dev))
  1598. mask = SDE_HOTPLUG_MASK |
  1599. SDE_GMBUS |
  1600. SDE_AUX_MASK;
  1601. else
  1602. mask = SDE_HOTPLUG_MASK_CPT |
  1603. SDE_GMBUS_CPT |
  1604. SDE_AUX_MASK_CPT;
  1605. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  1606. I915_WRITE(SDEIMR, ~mask);
  1607. I915_WRITE(SDEIER, mask);
  1608. POSTING_READ(SDEIER);
  1609. ibx_enable_hotplug(dev);
  1610. }
  1611. static int ironlake_irq_postinstall(struct drm_device *dev)
  1612. {
  1613. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1614. /* enable kind of interrupts always enabled */
  1615. u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  1616. DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
  1617. DE_AUX_CHANNEL_A;
  1618. u32 render_irqs;
  1619. dev_priv->irq_mask = ~display_mask;
  1620. /* should always can generate irq */
  1621. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1622. I915_WRITE(DEIMR, dev_priv->irq_mask);
  1623. I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
  1624. POSTING_READ(DEIER);
  1625. dev_priv->gt_irq_mask = ~0;
  1626. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1627. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  1628. if (IS_GEN6(dev))
  1629. render_irqs =
  1630. GT_USER_INTERRUPT |
  1631. GEN6_BSD_USER_INTERRUPT |
  1632. GEN6_BLITTER_USER_INTERRUPT;
  1633. else
  1634. render_irqs =
  1635. GT_USER_INTERRUPT |
  1636. GT_PIPE_NOTIFY |
  1637. GT_BSD_USER_INTERRUPT;
  1638. I915_WRITE(GTIER, render_irqs);
  1639. POSTING_READ(GTIER);
  1640. ibx_irq_postinstall(dev);
  1641. if (IS_IRONLAKE_M(dev)) {
  1642. /* Clear & enable PCU event interrupts */
  1643. I915_WRITE(DEIIR, DE_PCU_EVENT);
  1644. I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
  1645. ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
  1646. }
  1647. return 0;
  1648. }
  1649. static int ivybridge_irq_postinstall(struct drm_device *dev)
  1650. {
  1651. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1652. /* enable kind of interrupts always enabled */
  1653. u32 display_mask =
  1654. DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
  1655. DE_PLANEC_FLIP_DONE_IVB |
  1656. DE_PLANEB_FLIP_DONE_IVB |
  1657. DE_PLANEA_FLIP_DONE_IVB |
  1658. DE_AUX_CHANNEL_A_IVB;
  1659. u32 render_irqs;
  1660. dev_priv->irq_mask = ~display_mask;
  1661. /* should always can generate irq */
  1662. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1663. I915_WRITE(DEIMR, dev_priv->irq_mask);
  1664. I915_WRITE(DEIER,
  1665. display_mask |
  1666. DE_PIPEC_VBLANK_IVB |
  1667. DE_PIPEB_VBLANK_IVB |
  1668. DE_PIPEA_VBLANK_IVB);
  1669. POSTING_READ(DEIER);
  1670. dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  1671. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1672. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  1673. render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
  1674. GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  1675. I915_WRITE(GTIER, render_irqs);
  1676. POSTING_READ(GTIER);
  1677. ibx_irq_postinstall(dev);
  1678. return 0;
  1679. }
  1680. static int valleyview_irq_postinstall(struct drm_device *dev)
  1681. {
  1682. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1683. u32 enable_mask;
  1684. u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
  1685. u32 render_irqs;
  1686. u16 msid;
  1687. enable_mask = I915_DISPLAY_PORT_INTERRUPT;
  1688. enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1689. I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  1690. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1691. I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1692. /*
  1693. *Leave vblank interrupts masked initially. enable/disable will
  1694. * toggle them based on usage.
  1695. */
  1696. dev_priv->irq_mask = (~enable_mask) |
  1697. I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  1698. I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1699. dev_priv->pipestat[0] = 0;
  1700. dev_priv->pipestat[1] = 0;
  1701. /* Hack for broken MSIs on VLV */
  1702. pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
  1703. pci_read_config_word(dev->pdev, 0x98, &msid);
  1704. msid &= 0xff; /* mask out delivery bits */
  1705. msid |= (1<<14);
  1706. pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
  1707. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1708. POSTING_READ(PORT_HOTPLUG_EN);
  1709. I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  1710. I915_WRITE(VLV_IER, enable_mask);
  1711. I915_WRITE(VLV_IIR, 0xffffffff);
  1712. I915_WRITE(PIPESTAT(0), 0xffff);
  1713. I915_WRITE(PIPESTAT(1), 0xffff);
  1714. POSTING_READ(VLV_IER);
  1715. i915_enable_pipestat(dev_priv, 0, pipestat_enable);
  1716. i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  1717. i915_enable_pipestat(dev_priv, 1, pipestat_enable);
  1718. I915_WRITE(VLV_IIR, 0xffffffff);
  1719. I915_WRITE(VLV_IIR, 0xffffffff);
  1720. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1721. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  1722. render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
  1723. GEN6_BLITTER_USER_INTERRUPT;
  1724. I915_WRITE(GTIER, render_irqs);
  1725. POSTING_READ(GTIER);
  1726. /* ack & enable invalid PTE error interrupts */
  1727. #if 0 /* FIXME: add support to irq handler for checking these bits */
  1728. I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  1729. I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
  1730. #endif
  1731. I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  1732. return 0;
  1733. }
  1734. static void valleyview_hpd_irq_setup(struct drm_device *dev)
  1735. {
  1736. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1737. u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  1738. /* Note HDMI and DP share bits */
  1739. if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
  1740. hotplug_en |= PORTB_HOTPLUG_INT_EN;
  1741. if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
  1742. hotplug_en |= PORTC_HOTPLUG_INT_EN;
  1743. if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
  1744. hotplug_en |= PORTD_HOTPLUG_INT_EN;
  1745. if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
  1746. hotplug_en |= SDVOC_HOTPLUG_INT_EN;
  1747. if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
  1748. hotplug_en |= SDVOB_HOTPLUG_INT_EN;
  1749. if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
  1750. hotplug_en |= CRT_HOTPLUG_INT_EN;
  1751. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  1752. }
  1753. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  1754. }
  1755. static void valleyview_irq_uninstall(struct drm_device *dev)
  1756. {
  1757. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1758. int pipe;
  1759. if (!dev_priv)
  1760. return;
  1761. for_each_pipe(pipe)
  1762. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1763. I915_WRITE(HWSTAM, 0xffffffff);
  1764. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1765. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1766. for_each_pipe(pipe)
  1767. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1768. I915_WRITE(VLV_IIR, 0xffffffff);
  1769. I915_WRITE(VLV_IMR, 0xffffffff);
  1770. I915_WRITE(VLV_IER, 0x0);
  1771. POSTING_READ(VLV_IER);
  1772. }
  1773. static void ironlake_irq_uninstall(struct drm_device *dev)
  1774. {
  1775. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1776. if (!dev_priv)
  1777. return;
  1778. I915_WRITE(HWSTAM, 0xffffffff);
  1779. I915_WRITE(DEIMR, 0xffffffff);
  1780. I915_WRITE(DEIER, 0x0);
  1781. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1782. I915_WRITE(GTIMR, 0xffffffff);
  1783. I915_WRITE(GTIER, 0x0);
  1784. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1785. I915_WRITE(SDEIMR, 0xffffffff);
  1786. I915_WRITE(SDEIER, 0x0);
  1787. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  1788. }
  1789. static void i8xx_irq_preinstall(struct drm_device * dev)
  1790. {
  1791. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1792. int pipe;
  1793. atomic_set(&dev_priv->irq_received, 0);
  1794. for_each_pipe(pipe)
  1795. I915_WRITE(PIPESTAT(pipe), 0);
  1796. I915_WRITE16(IMR, 0xffff);
  1797. I915_WRITE16(IER, 0x0);
  1798. POSTING_READ16(IER);
  1799. }
  1800. static int i8xx_irq_postinstall(struct drm_device *dev)
  1801. {
  1802. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1803. dev_priv->pipestat[0] = 0;
  1804. dev_priv->pipestat[1] = 0;
  1805. I915_WRITE16(EMR,
  1806. ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  1807. /* Unmask the interrupts that we always want on. */
  1808. dev_priv->irq_mask =
  1809. ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1810. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1811. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  1812. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  1813. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1814. I915_WRITE16(IMR, dev_priv->irq_mask);
  1815. I915_WRITE16(IER,
  1816. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1817. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1818. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  1819. I915_USER_INTERRUPT);
  1820. POSTING_READ16(IER);
  1821. return 0;
  1822. }
  1823. /*
  1824. * Returns true when a page flip has completed.
  1825. */
  1826. static bool i8xx_handle_vblank(struct drm_device *dev,
  1827. int pipe, u16 iir)
  1828. {
  1829. drm_i915_private_t *dev_priv = dev->dev_private;
  1830. u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
  1831. if (!drm_handle_vblank(dev, pipe))
  1832. return false;
  1833. if ((iir & flip_pending) == 0)
  1834. return false;
  1835. intel_prepare_page_flip(dev, pipe);
  1836. /* We detect FlipDone by looking for the change in PendingFlip from '1'
  1837. * to '0' on the following vblank, i.e. IIR has the Pendingflip
  1838. * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  1839. * the flip is completed (no longer pending). Since this doesn't raise
  1840. * an interrupt per se, we watch for the change at vblank.
  1841. */
  1842. if (I915_READ16(ISR) & flip_pending)
  1843. return false;
  1844. intel_finish_page_flip(dev, pipe);
  1845. return true;
  1846. }
  1847. static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  1848. {
  1849. struct drm_device *dev = (struct drm_device *) arg;
  1850. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1851. u16 iir, new_iir;
  1852. u32 pipe_stats[2];
  1853. unsigned long irqflags;
  1854. int irq_received;
  1855. int pipe;
  1856. u16 flip_mask =
  1857. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  1858. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  1859. atomic_inc(&dev_priv->irq_received);
  1860. iir = I915_READ16(IIR);
  1861. if (iir == 0)
  1862. return IRQ_NONE;
  1863. while (iir & ~flip_mask) {
  1864. /* Can't rely on pipestat interrupt bit in iir as it might
  1865. * have been cleared after the pipestat interrupt was received.
  1866. * It doesn't set the bit in iir again, but it still produces
  1867. * interrupts (for non-MSI).
  1868. */
  1869. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1870. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  1871. i915_handle_error(dev, false);
  1872. for_each_pipe(pipe) {
  1873. int reg = PIPESTAT(pipe);
  1874. pipe_stats[pipe] = I915_READ(reg);
  1875. /*
  1876. * Clear the PIPE*STAT regs before the IIR
  1877. */
  1878. if (pipe_stats[pipe] & 0x8000ffff) {
  1879. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  1880. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  1881. pipe_name(pipe));
  1882. I915_WRITE(reg, pipe_stats[pipe]);
  1883. irq_received = 1;
  1884. }
  1885. }
  1886. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1887. I915_WRITE16(IIR, iir & ~flip_mask);
  1888. new_iir = I915_READ16(IIR); /* Flush posted writes */
  1889. i915_update_dri1_breadcrumb(dev);
  1890. if (iir & I915_USER_INTERRUPT)
  1891. notify_ring(dev, &dev_priv->ring[RCS]);
  1892. if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
  1893. i8xx_handle_vblank(dev, 0, iir))
  1894. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
  1895. if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
  1896. i8xx_handle_vblank(dev, 1, iir))
  1897. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
  1898. iir = new_iir;
  1899. }
  1900. return IRQ_HANDLED;
  1901. }
  1902. static void i8xx_irq_uninstall(struct drm_device * dev)
  1903. {
  1904. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1905. int pipe;
  1906. for_each_pipe(pipe) {
  1907. /* Clear enable bits; then clear status bits */
  1908. I915_WRITE(PIPESTAT(pipe), 0);
  1909. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  1910. }
  1911. I915_WRITE16(IMR, 0xffff);
  1912. I915_WRITE16(IER, 0x0);
  1913. I915_WRITE16(IIR, I915_READ16(IIR));
  1914. }
  1915. static void i915_irq_preinstall(struct drm_device * dev)
  1916. {
  1917. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1918. int pipe;
  1919. atomic_set(&dev_priv->irq_received, 0);
  1920. if (I915_HAS_HOTPLUG(dev)) {
  1921. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1922. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1923. }
  1924. I915_WRITE16(HWSTAM, 0xeffe);
  1925. for_each_pipe(pipe)
  1926. I915_WRITE(PIPESTAT(pipe), 0);
  1927. I915_WRITE(IMR, 0xffffffff);
  1928. I915_WRITE(IER, 0x0);
  1929. POSTING_READ(IER);
  1930. }
  1931. static int i915_irq_postinstall(struct drm_device *dev)
  1932. {
  1933. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1934. u32 enable_mask;
  1935. dev_priv->pipestat[0] = 0;
  1936. dev_priv->pipestat[1] = 0;
  1937. I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  1938. /* Unmask the interrupts that we always want on. */
  1939. dev_priv->irq_mask =
  1940. ~(I915_ASLE_INTERRUPT |
  1941. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1942. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1943. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  1944. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  1945. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1946. enable_mask =
  1947. I915_ASLE_INTERRUPT |
  1948. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1949. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1950. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  1951. I915_USER_INTERRUPT;
  1952. if (I915_HAS_HOTPLUG(dev)) {
  1953. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1954. POSTING_READ(PORT_HOTPLUG_EN);
  1955. /* Enable in IER... */
  1956. enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  1957. /* and unmask in IMR */
  1958. dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  1959. }
  1960. I915_WRITE(IMR, dev_priv->irq_mask);
  1961. I915_WRITE(IER, enable_mask);
  1962. POSTING_READ(IER);
  1963. intel_opregion_enable_asle(dev);
  1964. return 0;
  1965. }
  1966. static void i915_hpd_irq_setup(struct drm_device *dev)
  1967. {
  1968. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1969. u32 hotplug_en;
  1970. if (I915_HAS_HOTPLUG(dev)) {
  1971. hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  1972. if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
  1973. hotplug_en |= PORTB_HOTPLUG_INT_EN;
  1974. if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
  1975. hotplug_en |= PORTC_HOTPLUG_INT_EN;
  1976. if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
  1977. hotplug_en |= PORTD_HOTPLUG_INT_EN;
  1978. if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
  1979. hotplug_en |= SDVOC_HOTPLUG_INT_EN;
  1980. if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
  1981. hotplug_en |= SDVOB_HOTPLUG_INT_EN;
  1982. if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
  1983. hotplug_en |= CRT_HOTPLUG_INT_EN;
  1984. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  1985. }
  1986. /* Ignore TV since it's buggy */
  1987. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  1988. }
  1989. }
  1990. /*
  1991. * Returns true when a page flip has completed.
  1992. */
  1993. static bool i915_handle_vblank(struct drm_device *dev,
  1994. int plane, int pipe, u32 iir)
  1995. {
  1996. drm_i915_private_t *dev_priv = dev->dev_private;
  1997. u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  1998. if (!drm_handle_vblank(dev, pipe))
  1999. return false;
  2000. if ((iir & flip_pending) == 0)
  2001. return false;
  2002. intel_prepare_page_flip(dev, plane);
  2003. /* We detect FlipDone by looking for the change in PendingFlip from '1'
  2004. * to '0' on the following vblank, i.e. IIR has the Pendingflip
  2005. * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  2006. * the flip is completed (no longer pending). Since this doesn't raise
  2007. * an interrupt per se, we watch for the change at vblank.
  2008. */
  2009. if (I915_READ(ISR) & flip_pending)
  2010. return false;
  2011. intel_finish_page_flip(dev, pipe);
  2012. return true;
  2013. }
  2014. static irqreturn_t i915_irq_handler(int irq, void *arg)
  2015. {
  2016. struct drm_device *dev = (struct drm_device *) arg;
  2017. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2018. u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
  2019. unsigned long irqflags;
  2020. u32 flip_mask =
  2021. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2022. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2023. int pipe, ret = IRQ_NONE;
  2024. atomic_inc(&dev_priv->irq_received);
  2025. iir = I915_READ(IIR);
  2026. do {
  2027. bool irq_received = (iir & ~flip_mask) != 0;
  2028. bool blc_event = false;
  2029. /* Can't rely on pipestat interrupt bit in iir as it might
  2030. * have been cleared after the pipestat interrupt was received.
  2031. * It doesn't set the bit in iir again, but it still produces
  2032. * interrupts (for non-MSI).
  2033. */
  2034. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2035. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2036. i915_handle_error(dev, false);
  2037. for_each_pipe(pipe) {
  2038. int reg = PIPESTAT(pipe);
  2039. pipe_stats[pipe] = I915_READ(reg);
  2040. /* Clear the PIPE*STAT regs before the IIR */
  2041. if (pipe_stats[pipe] & 0x8000ffff) {
  2042. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2043. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2044. pipe_name(pipe));
  2045. I915_WRITE(reg, pipe_stats[pipe]);
  2046. irq_received = true;
  2047. }
  2048. }
  2049. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2050. if (!irq_received)
  2051. break;
  2052. /* Consume port. Then clear IIR or we'll miss events */
  2053. if ((I915_HAS_HOTPLUG(dev)) &&
  2054. (iir & I915_DISPLAY_PORT_INTERRUPT)) {
  2055. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2056. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2057. hotplug_status);
  2058. if (hotplug_status & dev_priv->hotplug_supported_mask)
  2059. queue_work(dev_priv->wq,
  2060. &dev_priv->hotplug_work);
  2061. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2062. POSTING_READ(PORT_HOTPLUG_STAT);
  2063. }
  2064. I915_WRITE(IIR, iir & ~flip_mask);
  2065. new_iir = I915_READ(IIR); /* Flush posted writes */
  2066. if (iir & I915_USER_INTERRUPT)
  2067. notify_ring(dev, &dev_priv->ring[RCS]);
  2068. for_each_pipe(pipe) {
  2069. int plane = pipe;
  2070. if (IS_MOBILE(dev))
  2071. plane = !plane;
  2072. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2073. i915_handle_vblank(dev, plane, pipe, iir))
  2074. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  2075. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  2076. blc_event = true;
  2077. }
  2078. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  2079. intel_opregion_asle_intr(dev);
  2080. /* With MSI, interrupts are only generated when iir
  2081. * transitions from zero to nonzero. If another bit got
  2082. * set while we were handling the existing iir bits, then
  2083. * we would never get another interrupt.
  2084. *
  2085. * This is fine on non-MSI as well, as if we hit this path
  2086. * we avoid exiting the interrupt handler only to generate
  2087. * another one.
  2088. *
  2089. * Note that for MSI this could cause a stray interrupt report
  2090. * if an interrupt landed in the time between writing IIR and
  2091. * the posting read. This should be rare enough to never
  2092. * trigger the 99% of 100,000 interrupts test for disabling
  2093. * stray interrupts.
  2094. */
  2095. ret = IRQ_HANDLED;
  2096. iir = new_iir;
  2097. } while (iir & ~flip_mask);
  2098. i915_update_dri1_breadcrumb(dev);
  2099. return ret;
  2100. }
  2101. static void i915_irq_uninstall(struct drm_device * dev)
  2102. {
  2103. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2104. int pipe;
  2105. if (I915_HAS_HOTPLUG(dev)) {
  2106. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2107. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2108. }
  2109. I915_WRITE16(HWSTAM, 0xffff);
  2110. for_each_pipe(pipe) {
  2111. /* Clear enable bits; then clear status bits */
  2112. I915_WRITE(PIPESTAT(pipe), 0);
  2113. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  2114. }
  2115. I915_WRITE(IMR, 0xffffffff);
  2116. I915_WRITE(IER, 0x0);
  2117. I915_WRITE(IIR, I915_READ(IIR));
  2118. }
  2119. static void i965_irq_preinstall(struct drm_device * dev)
  2120. {
  2121. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2122. int pipe;
  2123. atomic_set(&dev_priv->irq_received, 0);
  2124. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2125. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2126. I915_WRITE(HWSTAM, 0xeffe);
  2127. for_each_pipe(pipe)
  2128. I915_WRITE(PIPESTAT(pipe), 0);
  2129. I915_WRITE(IMR, 0xffffffff);
  2130. I915_WRITE(IER, 0x0);
  2131. POSTING_READ(IER);
  2132. }
  2133. static int i965_irq_postinstall(struct drm_device *dev)
  2134. {
  2135. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2136. u32 enable_mask;
  2137. u32 error_mask;
  2138. /* Unmask the interrupts that we always want on. */
  2139. dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
  2140. I915_DISPLAY_PORT_INTERRUPT |
  2141. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2142. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2143. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2144. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2145. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2146. enable_mask = ~dev_priv->irq_mask;
  2147. enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2148. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  2149. enable_mask |= I915_USER_INTERRUPT;
  2150. if (IS_G4X(dev))
  2151. enable_mask |= I915_BSD_USER_INTERRUPT;
  2152. dev_priv->pipestat[0] = 0;
  2153. dev_priv->pipestat[1] = 0;
  2154. i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  2155. /*
  2156. * Enable some error detection, note the instruction error mask
  2157. * bit is reserved, so we leave it masked.
  2158. */
  2159. if (IS_G4X(dev)) {
  2160. error_mask = ~(GM45_ERROR_PAGE_TABLE |
  2161. GM45_ERROR_MEM_PRIV |
  2162. GM45_ERROR_CP_PRIV |
  2163. I915_ERROR_MEMORY_REFRESH);
  2164. } else {
  2165. error_mask = ~(I915_ERROR_PAGE_TABLE |
  2166. I915_ERROR_MEMORY_REFRESH);
  2167. }
  2168. I915_WRITE(EMR, error_mask);
  2169. I915_WRITE(IMR, dev_priv->irq_mask);
  2170. I915_WRITE(IER, enable_mask);
  2171. POSTING_READ(IER);
  2172. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2173. POSTING_READ(PORT_HOTPLUG_EN);
  2174. intel_opregion_enable_asle(dev);
  2175. return 0;
  2176. }
  2177. static void i965_hpd_irq_setup(struct drm_device *dev)
  2178. {
  2179. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2180. u32 hotplug_en;
  2181. /* Note HDMI and DP share hotplug bits */
  2182. hotplug_en = 0;
  2183. if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
  2184. hotplug_en |= PORTB_HOTPLUG_INT_EN;
  2185. if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
  2186. hotplug_en |= PORTC_HOTPLUG_INT_EN;
  2187. if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
  2188. hotplug_en |= PORTD_HOTPLUG_INT_EN;
  2189. if (IS_G4X(dev)) {
  2190. if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
  2191. hotplug_en |= SDVOC_HOTPLUG_INT_EN;
  2192. if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
  2193. hotplug_en |= SDVOB_HOTPLUG_INT_EN;
  2194. } else {
  2195. if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
  2196. hotplug_en |= SDVOC_HOTPLUG_INT_EN;
  2197. if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
  2198. hotplug_en |= SDVOB_HOTPLUG_INT_EN;
  2199. }
  2200. if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
  2201. hotplug_en |= CRT_HOTPLUG_INT_EN;
  2202. /* Programming the CRT detection parameters tends
  2203. to generate a spurious hotplug event about three
  2204. seconds later. So just do it once.
  2205. */
  2206. if (IS_G4X(dev))
  2207. hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  2208. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  2209. }
  2210. /* Ignore TV since it's buggy */
  2211. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  2212. }
  2213. static irqreturn_t i965_irq_handler(int irq, void *arg)
  2214. {
  2215. struct drm_device *dev = (struct drm_device *) arg;
  2216. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2217. u32 iir, new_iir;
  2218. u32 pipe_stats[I915_MAX_PIPES];
  2219. unsigned long irqflags;
  2220. int irq_received;
  2221. int ret = IRQ_NONE, pipe;
  2222. u32 flip_mask =
  2223. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2224. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2225. atomic_inc(&dev_priv->irq_received);
  2226. iir = I915_READ(IIR);
  2227. for (;;) {
  2228. bool blc_event = false;
  2229. irq_received = (iir & ~flip_mask) != 0;
  2230. /* Can't rely on pipestat interrupt bit in iir as it might
  2231. * have been cleared after the pipestat interrupt was received.
  2232. * It doesn't set the bit in iir again, but it still produces
  2233. * interrupts (for non-MSI).
  2234. */
  2235. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2236. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2237. i915_handle_error(dev, false);
  2238. for_each_pipe(pipe) {
  2239. int reg = PIPESTAT(pipe);
  2240. pipe_stats[pipe] = I915_READ(reg);
  2241. /*
  2242. * Clear the PIPE*STAT regs before the IIR
  2243. */
  2244. if (pipe_stats[pipe] & 0x8000ffff) {
  2245. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2246. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2247. pipe_name(pipe));
  2248. I915_WRITE(reg, pipe_stats[pipe]);
  2249. irq_received = 1;
  2250. }
  2251. }
  2252. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2253. if (!irq_received)
  2254. break;
  2255. ret = IRQ_HANDLED;
  2256. /* Consume port. Then clear IIR or we'll miss events */
  2257. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  2258. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2259. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2260. hotplug_status);
  2261. if (hotplug_status & dev_priv->hotplug_supported_mask)
  2262. queue_work(dev_priv->wq,
  2263. &dev_priv->hotplug_work);
  2264. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2265. I915_READ(PORT_HOTPLUG_STAT);
  2266. }
  2267. I915_WRITE(IIR, iir & ~flip_mask);
  2268. new_iir = I915_READ(IIR); /* Flush posted writes */
  2269. if (iir & I915_USER_INTERRUPT)
  2270. notify_ring(dev, &dev_priv->ring[RCS]);
  2271. if (iir & I915_BSD_USER_INTERRUPT)
  2272. notify_ring(dev, &dev_priv->ring[VCS]);
  2273. for_each_pipe(pipe) {
  2274. if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  2275. i915_handle_vblank(dev, pipe, pipe, iir))
  2276. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
  2277. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  2278. blc_event = true;
  2279. }
  2280. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  2281. intel_opregion_asle_intr(dev);
  2282. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  2283. gmbus_irq_handler(dev);
  2284. /* With MSI, interrupts are only generated when iir
  2285. * transitions from zero to nonzero. If another bit got
  2286. * set while we were handling the existing iir bits, then
  2287. * we would never get another interrupt.
  2288. *
  2289. * This is fine on non-MSI as well, as if we hit this path
  2290. * we avoid exiting the interrupt handler only to generate
  2291. * another one.
  2292. *
  2293. * Note that for MSI this could cause a stray interrupt report
  2294. * if an interrupt landed in the time between writing IIR and
  2295. * the posting read. This should be rare enough to never
  2296. * trigger the 99% of 100,000 interrupts test for disabling
  2297. * stray interrupts.
  2298. */
  2299. iir = new_iir;
  2300. }
  2301. i915_update_dri1_breadcrumb(dev);
  2302. return ret;
  2303. }
  2304. static void i965_irq_uninstall(struct drm_device * dev)
  2305. {
  2306. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2307. int pipe;
  2308. if (!dev_priv)
  2309. return;
  2310. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2311. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2312. I915_WRITE(HWSTAM, 0xffffffff);
  2313. for_each_pipe(pipe)
  2314. I915_WRITE(PIPESTAT(pipe), 0);
  2315. I915_WRITE(IMR, 0xffffffff);
  2316. I915_WRITE(IER, 0x0);
  2317. for_each_pipe(pipe)
  2318. I915_WRITE(PIPESTAT(pipe),
  2319. I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
  2320. I915_WRITE(IIR, I915_READ(IIR));
  2321. }
  2322. void intel_irq_init(struct drm_device *dev)
  2323. {
  2324. struct drm_i915_private *dev_priv = dev->dev_private;
  2325. INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
  2326. INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
  2327. INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
  2328. INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
  2329. setup_timer(&dev_priv->gpu_error.hangcheck_timer,
  2330. i915_hangcheck_elapsed,
  2331. (unsigned long) dev);
  2332. pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
  2333. dev->driver->get_vblank_counter = i915_get_vblank_counter;
  2334. dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  2335. if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  2336. dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
  2337. dev->driver->get_vblank_counter = gm45_get_vblank_counter;
  2338. }
  2339. if (drm_core_check_feature(dev, DRIVER_MODESET))
  2340. dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
  2341. else
  2342. dev->driver->get_vblank_timestamp = NULL;
  2343. dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  2344. if (IS_VALLEYVIEW(dev)) {
  2345. dev->driver->irq_handler = valleyview_irq_handler;
  2346. dev->driver->irq_preinstall = valleyview_irq_preinstall;
  2347. dev->driver->irq_postinstall = valleyview_irq_postinstall;
  2348. dev->driver->irq_uninstall = valleyview_irq_uninstall;
  2349. dev->driver->enable_vblank = valleyview_enable_vblank;
  2350. dev->driver->disable_vblank = valleyview_disable_vblank;
  2351. dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
  2352. } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
  2353. /* Share pre & uninstall handlers with ILK/SNB */
  2354. dev->driver->irq_handler = ivybridge_irq_handler;
  2355. dev->driver->irq_preinstall = ironlake_irq_preinstall;
  2356. dev->driver->irq_postinstall = ivybridge_irq_postinstall;
  2357. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  2358. dev->driver->enable_vblank = ivybridge_enable_vblank;
  2359. dev->driver->disable_vblank = ivybridge_disable_vblank;
  2360. } else if (HAS_PCH_SPLIT(dev)) {
  2361. dev->driver->irq_handler = ironlake_irq_handler;
  2362. dev->driver->irq_preinstall = ironlake_irq_preinstall;
  2363. dev->driver->irq_postinstall = ironlake_irq_postinstall;
  2364. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  2365. dev->driver->enable_vblank = ironlake_enable_vblank;
  2366. dev->driver->disable_vblank = ironlake_disable_vblank;
  2367. } else {
  2368. if (INTEL_INFO(dev)->gen == 2) {
  2369. dev->driver->irq_preinstall = i8xx_irq_preinstall;
  2370. dev->driver->irq_postinstall = i8xx_irq_postinstall;
  2371. dev->driver->irq_handler = i8xx_irq_handler;
  2372. dev->driver->irq_uninstall = i8xx_irq_uninstall;
  2373. } else if (INTEL_INFO(dev)->gen == 3) {
  2374. dev->driver->irq_preinstall = i915_irq_preinstall;
  2375. dev->driver->irq_postinstall = i915_irq_postinstall;
  2376. dev->driver->irq_uninstall = i915_irq_uninstall;
  2377. dev->driver->irq_handler = i915_irq_handler;
  2378. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  2379. } else {
  2380. dev->driver->irq_preinstall = i965_irq_preinstall;
  2381. dev->driver->irq_postinstall = i965_irq_postinstall;
  2382. dev->driver->irq_uninstall = i965_irq_uninstall;
  2383. dev->driver->irq_handler = i965_irq_handler;
  2384. dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
  2385. }
  2386. dev->driver->enable_vblank = i915_enable_vblank;
  2387. dev->driver->disable_vblank = i915_disable_vblank;
  2388. }
  2389. }
  2390. void intel_hpd_init(struct drm_device *dev)
  2391. {
  2392. struct drm_i915_private *dev_priv = dev->dev_private;
  2393. if (dev_priv->display.hpd_irq_setup)
  2394. dev_priv->display.hpd_irq_setup(dev);
  2395. }