i915_irq.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #include <linux/sysrq.h>
  29. #include <linux/slab.h>
  30. #include "drmP.h"
  31. #include "drm.h"
  32. #include "i915_drm.h"
  33. #include "i915_drv.h"
  34. #include "i915_trace.h"
  35. #include "intel_drv.h"
  36. #define MAX_NOPID ((u32)~0)
  37. /**
  38. * Interrupts that are always left unmasked.
  39. *
  40. * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
  41. * we leave them always unmasked in IMR and then control enabling them through
  42. * PIPESTAT alone.
  43. */
  44. #define I915_INTERRUPT_ENABLE_FIX \
  45. (I915_ASLE_INTERRUPT | \
  46. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
  47. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
  48. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
  49. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
  50. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  51. /** Interrupts that we mask and unmask at runtime. */
  52. #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
  53. #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
  54. PIPE_VBLANK_INTERRUPT_STATUS)
  55. #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
  56. PIPE_VBLANK_INTERRUPT_ENABLE)
  57. #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
  58. DRM_I915_VBLANK_PIPE_B)
  59. /* For display hotplug interrupt */
  60. static void
  61. ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  62. {
  63. if ((dev_priv->irq_mask & mask) != 0) {
  64. dev_priv->irq_mask &= ~mask;
  65. I915_WRITE(DEIMR, dev_priv->irq_mask);
  66. POSTING_READ(DEIMR);
  67. }
  68. }
  69. static inline void
  70. ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  71. {
  72. if ((dev_priv->irq_mask & mask) != mask) {
  73. dev_priv->irq_mask |= mask;
  74. I915_WRITE(DEIMR, dev_priv->irq_mask);
  75. POSTING_READ(DEIMR);
  76. }
  77. }
  78. static inline u32
  79. i915_pipestat(int pipe)
  80. {
  81. if (pipe == 0)
  82. return PIPEASTAT;
  83. if (pipe == 1)
  84. return PIPEBSTAT;
  85. BUG();
  86. }
  87. void
  88. i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  89. {
  90. if ((dev_priv->pipestat[pipe] & mask) != mask) {
  91. u32 reg = i915_pipestat(pipe);
  92. dev_priv->pipestat[pipe] |= mask;
  93. /* Enable the interrupt, clear any pending status */
  94. I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
  95. POSTING_READ(reg);
  96. }
  97. }
  98. void
  99. i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  100. {
  101. if ((dev_priv->pipestat[pipe] & mask) != 0) {
  102. u32 reg = i915_pipestat(pipe);
  103. dev_priv->pipestat[pipe] &= ~mask;
  104. I915_WRITE(reg, dev_priv->pipestat[pipe]);
  105. POSTING_READ(reg);
  106. }
  107. }
  108. /**
  109. * intel_enable_asle - enable ASLE interrupt for OpRegion
  110. */
  111. void intel_enable_asle(struct drm_device *dev)
  112. {
  113. drm_i915_private_t *dev_priv = dev->dev_private;
  114. unsigned long irqflags;
  115. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  116. if (HAS_PCH_SPLIT(dev))
  117. ironlake_enable_display_irq(dev_priv, DE_GSE);
  118. else {
  119. i915_enable_pipestat(dev_priv, 1,
  120. PIPE_LEGACY_BLC_EVENT_ENABLE);
  121. if (INTEL_INFO(dev)->gen >= 4)
  122. i915_enable_pipestat(dev_priv, 0,
  123. PIPE_LEGACY_BLC_EVENT_ENABLE);
  124. }
  125. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  126. }
  127. /**
  128. * i915_pipe_enabled - check if a pipe is enabled
  129. * @dev: DRM device
  130. * @pipe: pipe to check
  131. *
  132. * Reading certain registers when the pipe is disabled can hang the chip.
  133. * Use this routine to make sure the PLL is running and the pipe is active
  134. * before reading such registers if unsure.
  135. */
  136. static int
  137. i915_pipe_enabled(struct drm_device *dev, int pipe)
  138. {
  139. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  140. return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
  141. }
  142. /* Called from drm generic code, passed a 'crtc', which
  143. * we use as a pipe index
  144. */
  145. u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
  146. {
  147. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  148. unsigned long high_frame;
  149. unsigned long low_frame;
  150. u32 high1, high2, low;
  151. if (!i915_pipe_enabled(dev, pipe)) {
  152. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  153. "pipe %d\n", pipe);
  154. return 0;
  155. }
  156. high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
  157. low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
  158. /*
  159. * High & low register fields aren't synchronized, so make sure
  160. * we get a low value that's stable across two reads of the high
  161. * register.
  162. */
  163. do {
  164. high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  165. low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
  166. high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  167. } while (high1 != high2);
  168. high1 >>= PIPE_FRAME_HIGH_SHIFT;
  169. low >>= PIPE_FRAME_LOW_SHIFT;
  170. return (high1 << 8) | low;
  171. }
  172. u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
  173. {
  174. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  175. int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
  176. if (!i915_pipe_enabled(dev, pipe)) {
  177. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  178. "pipe %d\n", pipe);
  179. return 0;
  180. }
  181. return I915_READ(reg);
  182. }
  183. int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
  184. int *vpos, int *hpos)
  185. {
  186. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  187. u32 vbl = 0, position = 0;
  188. int vbl_start, vbl_end, htotal, vtotal;
  189. bool in_vbl = true;
  190. int ret = 0;
  191. if (!i915_pipe_enabled(dev, pipe)) {
  192. DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
  193. "pipe %d\n", pipe);
  194. return 0;
  195. }
  196. /* Get vtotal. */
  197. vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
  198. if (INTEL_INFO(dev)->gen >= 4) {
  199. /* No obvious pixelcount register. Only query vertical
  200. * scanout position from Display scan line register.
  201. */
  202. position = I915_READ(PIPEDSL(pipe));
  203. /* Decode into vertical scanout position. Don't have
  204. * horizontal scanout position.
  205. */
  206. *vpos = position & 0x1fff;
  207. *hpos = 0;
  208. } else {
  209. /* Have access to pixelcount since start of frame.
  210. * We can split this into vertical and horizontal
  211. * scanout position.
  212. */
  213. position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
  214. htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
  215. *vpos = position / htotal;
  216. *hpos = position - (*vpos * htotal);
  217. }
  218. /* Query vblank area. */
  219. vbl = I915_READ(VBLANK(pipe));
  220. /* Test position against vblank region. */
  221. vbl_start = vbl & 0x1fff;
  222. vbl_end = (vbl >> 16) & 0x1fff;
  223. if ((*vpos < vbl_start) || (*vpos > vbl_end))
  224. in_vbl = false;
  225. /* Inside "upper part" of vblank area? Apply corrective offset: */
  226. if (in_vbl && (*vpos >= vbl_start))
  227. *vpos = *vpos - vtotal;
  228. /* Readouts valid? */
  229. if (vbl > 0)
  230. ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
  231. /* In vblank? */
  232. if (in_vbl)
  233. ret |= DRM_SCANOUTPOS_INVBL;
  234. return ret;
  235. }
  236. int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
  237. int *max_error,
  238. struct timeval *vblank_time,
  239. unsigned flags)
  240. {
  241. struct drm_i915_private *dev_priv = dev->dev_private;
  242. struct drm_crtc *crtc;
  243. if (pipe < 0 || pipe >= dev_priv->num_pipe) {
  244. DRM_ERROR("Invalid crtc %d\n", pipe);
  245. return -EINVAL;
  246. }
  247. /* Get drm_crtc to timestamp: */
  248. crtc = intel_get_crtc_for_pipe(dev, pipe);
  249. if (crtc == NULL) {
  250. DRM_ERROR("Invalid crtc %d\n", pipe);
  251. return -EINVAL;
  252. }
  253. if (!crtc->enabled) {
  254. DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
  255. return -EBUSY;
  256. }
  257. /* Helper routine in DRM core does all the work: */
  258. return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
  259. vblank_time, flags,
  260. crtc);
  261. }
  262. /*
  263. * Handle hotplug events outside the interrupt handler proper.
  264. */
  265. static void i915_hotplug_work_func(struct work_struct *work)
  266. {
  267. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  268. hotplug_work);
  269. struct drm_device *dev = dev_priv->dev;
  270. struct drm_mode_config *mode_config = &dev->mode_config;
  271. struct intel_encoder *encoder;
  272. list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
  273. if (encoder->hot_plug)
  274. encoder->hot_plug(encoder);
  275. /* Just fire off a uevent and let userspace tell us what to do */
  276. drm_helper_hpd_irq_event(dev);
  277. }
  278. static void i915_handle_rps_change(struct drm_device *dev)
  279. {
  280. drm_i915_private_t *dev_priv = dev->dev_private;
  281. u32 busy_up, busy_down, max_avg, min_avg;
  282. u8 new_delay = dev_priv->cur_delay;
  283. I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
  284. busy_up = I915_READ(RCPREVBSYTUPAVG);
  285. busy_down = I915_READ(RCPREVBSYTDNAVG);
  286. max_avg = I915_READ(RCBMAXAVG);
  287. min_avg = I915_READ(RCBMINAVG);
  288. /* Handle RCS change request from hw */
  289. if (busy_up > max_avg) {
  290. if (dev_priv->cur_delay != dev_priv->max_delay)
  291. new_delay = dev_priv->cur_delay - 1;
  292. if (new_delay < dev_priv->max_delay)
  293. new_delay = dev_priv->max_delay;
  294. } else if (busy_down < min_avg) {
  295. if (dev_priv->cur_delay != dev_priv->min_delay)
  296. new_delay = dev_priv->cur_delay + 1;
  297. if (new_delay > dev_priv->min_delay)
  298. new_delay = dev_priv->min_delay;
  299. }
  300. if (ironlake_set_drps(dev, new_delay))
  301. dev_priv->cur_delay = new_delay;
  302. return;
  303. }
  304. static void notify_ring(struct drm_device *dev,
  305. struct intel_ring_buffer *ring)
  306. {
  307. struct drm_i915_private *dev_priv = dev->dev_private;
  308. u32 seqno;
  309. if (ring->obj == NULL)
  310. return;
  311. seqno = ring->get_seqno(ring);
  312. trace_i915_gem_request_complete(dev, seqno);
  313. ring->irq_seqno = seqno;
  314. wake_up_all(&ring->irq_queue);
  315. dev_priv->hangcheck_count = 0;
  316. mod_timer(&dev_priv->hangcheck_timer,
  317. jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
  318. }
  319. static void gen6_pm_irq_handler(struct drm_device *dev)
  320. {
  321. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  322. u8 new_delay = dev_priv->cur_delay;
  323. u32 pm_iir;
  324. pm_iir = I915_READ(GEN6_PMIIR);
  325. if (!pm_iir)
  326. return;
  327. if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
  328. if (dev_priv->cur_delay != dev_priv->max_delay)
  329. new_delay = dev_priv->cur_delay + 1;
  330. if (new_delay > dev_priv->max_delay)
  331. new_delay = dev_priv->max_delay;
  332. } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
  333. if (dev_priv->cur_delay != dev_priv->min_delay)
  334. new_delay = dev_priv->cur_delay - 1;
  335. if (new_delay < dev_priv->min_delay) {
  336. new_delay = dev_priv->min_delay;
  337. I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
  338. I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
  339. ((new_delay << 16) & 0x3f0000));
  340. } else {
  341. /* Make sure we continue to get down interrupts
  342. * until we hit the minimum frequency */
  343. I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
  344. I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
  345. }
  346. }
  347. gen6_set_rps(dev, new_delay);
  348. dev_priv->cur_delay = new_delay;
  349. I915_WRITE(GEN6_PMIIR, pm_iir);
  350. }
  351. static void pch_irq_handler(struct drm_device *dev)
  352. {
  353. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  354. u32 pch_iir;
  355. pch_iir = I915_READ(SDEIIR);
  356. if (pch_iir & SDE_AUDIO_POWER_MASK)
  357. DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  358. (pch_iir & SDE_AUDIO_POWER_MASK) >>
  359. SDE_AUDIO_POWER_SHIFT);
  360. if (pch_iir & SDE_GMBUS)
  361. DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
  362. if (pch_iir & SDE_AUDIO_HDCP_MASK)
  363. DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
  364. if (pch_iir & SDE_AUDIO_TRANS_MASK)
  365. DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
  366. if (pch_iir & SDE_POISON)
  367. DRM_ERROR("PCH poison interrupt\n");
  368. if (pch_iir & SDE_FDI_MASK) {
  369. u32 fdia, fdib;
  370. fdia = I915_READ(FDI_RXA_IIR);
  371. fdib = I915_READ(FDI_RXB_IIR);
  372. DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib);
  373. }
  374. if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
  375. DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
  376. if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
  377. DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
  378. if (pch_iir & SDE_TRANSB_FIFO_UNDER)
  379. DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
  380. if (pch_iir & SDE_TRANSA_FIFO_UNDER)
  381. DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
  382. }
  383. static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
  384. {
  385. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  386. int ret = IRQ_NONE;
  387. u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
  388. u32 hotplug_mask;
  389. struct drm_i915_master_private *master_priv;
  390. u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
  391. if (IS_GEN6(dev))
  392. bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
  393. /* disable master interrupt before clearing iir */
  394. de_ier = I915_READ(DEIER);
  395. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  396. POSTING_READ(DEIER);
  397. de_iir = I915_READ(DEIIR);
  398. gt_iir = I915_READ(GTIIR);
  399. pch_iir = I915_READ(SDEIIR);
  400. pm_iir = I915_READ(GEN6_PMIIR);
  401. if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
  402. (!IS_GEN6(dev) || pm_iir == 0))
  403. goto done;
  404. if (HAS_PCH_CPT(dev))
  405. hotplug_mask = SDE_HOTPLUG_MASK_CPT;
  406. else
  407. hotplug_mask = SDE_HOTPLUG_MASK;
  408. ret = IRQ_HANDLED;
  409. if (dev->primary->master) {
  410. master_priv = dev->primary->master->driver_priv;
  411. if (master_priv->sarea_priv)
  412. master_priv->sarea_priv->last_dispatch =
  413. READ_BREADCRUMB(dev_priv);
  414. }
  415. if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
  416. notify_ring(dev, &dev_priv->ring[RCS]);
  417. if (gt_iir & bsd_usr_interrupt)
  418. notify_ring(dev, &dev_priv->ring[VCS]);
  419. if (gt_iir & GT_BLT_USER_INTERRUPT)
  420. notify_ring(dev, &dev_priv->ring[BCS]);
  421. if (de_iir & DE_GSE)
  422. intel_opregion_gse_intr(dev);
  423. if (de_iir & DE_PLANEA_FLIP_DONE) {
  424. intel_prepare_page_flip(dev, 0);
  425. intel_finish_page_flip_plane(dev, 0);
  426. }
  427. if (de_iir & DE_PLANEB_FLIP_DONE) {
  428. intel_prepare_page_flip(dev, 1);
  429. intel_finish_page_flip_plane(dev, 1);
  430. }
  431. if (de_iir & DE_PIPEA_VBLANK)
  432. drm_handle_vblank(dev, 0);
  433. if (de_iir & DE_PIPEB_VBLANK)
  434. drm_handle_vblank(dev, 1);
  435. /* check event from PCH */
  436. if (de_iir & DE_PCH_EVENT) {
  437. if (pch_iir & hotplug_mask)
  438. queue_work(dev_priv->wq, &dev_priv->hotplug_work);
  439. pch_irq_handler(dev);
  440. }
  441. if (de_iir & DE_PCU_EVENT) {
  442. I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  443. i915_handle_rps_change(dev);
  444. }
  445. if (IS_GEN6(dev))
  446. gen6_pm_irq_handler(dev);
  447. /* should clear PCH hotplug event before clear CPU irq */
  448. I915_WRITE(SDEIIR, pch_iir);
  449. I915_WRITE(GTIIR, gt_iir);
  450. I915_WRITE(DEIIR, de_iir);
  451. done:
  452. I915_WRITE(DEIER, de_ier);
  453. POSTING_READ(DEIER);
  454. return ret;
  455. }
  456. /**
  457. * i915_error_work_func - do process context error handling work
  458. * @work: work struct
  459. *
  460. * Fire an error uevent so userspace can see that a hang or error
  461. * was detected.
  462. */
  463. static void i915_error_work_func(struct work_struct *work)
  464. {
  465. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  466. error_work);
  467. struct drm_device *dev = dev_priv->dev;
  468. char *error_event[] = { "ERROR=1", NULL };
  469. char *reset_event[] = { "RESET=1", NULL };
  470. char *reset_done_event[] = { "ERROR=0", NULL };
  471. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
  472. if (atomic_read(&dev_priv->mm.wedged)) {
  473. DRM_DEBUG_DRIVER("resetting chip\n");
  474. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
  475. if (!i915_reset(dev, GRDOM_RENDER)) {
  476. atomic_set(&dev_priv->mm.wedged, 0);
  477. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
  478. }
  479. complete_all(&dev_priv->error_completion);
  480. }
  481. }
  482. #ifdef CONFIG_DEBUG_FS
  483. static struct drm_i915_error_object *
  484. i915_error_object_create(struct drm_i915_private *dev_priv,
  485. struct drm_i915_gem_object *src)
  486. {
  487. struct drm_i915_error_object *dst;
  488. int page, page_count;
  489. u32 reloc_offset;
  490. if (src == NULL || src->pages == NULL)
  491. return NULL;
  492. page_count = src->base.size / PAGE_SIZE;
  493. dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
  494. if (dst == NULL)
  495. return NULL;
  496. reloc_offset = src->gtt_offset;
  497. for (page = 0; page < page_count; page++) {
  498. unsigned long flags;
  499. void __iomem *s;
  500. void *d;
  501. d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
  502. if (d == NULL)
  503. goto unwind;
  504. local_irq_save(flags);
  505. s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  506. reloc_offset);
  507. memcpy_fromio(d, s, PAGE_SIZE);
  508. io_mapping_unmap_atomic(s);
  509. local_irq_restore(flags);
  510. dst->pages[page] = d;
  511. reloc_offset += PAGE_SIZE;
  512. }
  513. dst->page_count = page_count;
  514. dst->gtt_offset = src->gtt_offset;
  515. return dst;
  516. unwind:
  517. while (page--)
  518. kfree(dst->pages[page]);
  519. kfree(dst);
  520. return NULL;
  521. }
  522. static void
  523. i915_error_object_free(struct drm_i915_error_object *obj)
  524. {
  525. int page;
  526. if (obj == NULL)
  527. return;
  528. for (page = 0; page < obj->page_count; page++)
  529. kfree(obj->pages[page]);
  530. kfree(obj);
  531. }
  532. static void
  533. i915_error_state_free(struct drm_device *dev,
  534. struct drm_i915_error_state *error)
  535. {
  536. int i;
  537. for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++)
  538. i915_error_object_free(error->batchbuffer[i]);
  539. for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++)
  540. i915_error_object_free(error->ringbuffer[i]);
  541. kfree(error->active_bo);
  542. kfree(error->overlay);
  543. kfree(error);
  544. }
  545. static u32 capture_bo_list(struct drm_i915_error_buffer *err,
  546. int count,
  547. struct list_head *head)
  548. {
  549. struct drm_i915_gem_object *obj;
  550. int i = 0;
  551. list_for_each_entry(obj, head, mm_list) {
  552. err->size = obj->base.size;
  553. err->name = obj->base.name;
  554. err->seqno = obj->last_rendering_seqno;
  555. err->gtt_offset = obj->gtt_offset;
  556. err->read_domains = obj->base.read_domains;
  557. err->write_domain = obj->base.write_domain;
  558. err->fence_reg = obj->fence_reg;
  559. err->pinned = 0;
  560. if (obj->pin_count > 0)
  561. err->pinned = 1;
  562. if (obj->user_pin_count > 0)
  563. err->pinned = -1;
  564. err->tiling = obj->tiling_mode;
  565. err->dirty = obj->dirty;
  566. err->purgeable = obj->madv != I915_MADV_WILLNEED;
  567. err->ring = obj->ring ? obj->ring->id : 0;
  568. err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY;
  569. if (++i == count)
  570. break;
  571. err++;
  572. }
  573. return i;
  574. }
  575. static void i915_gem_record_fences(struct drm_device *dev,
  576. struct drm_i915_error_state *error)
  577. {
  578. struct drm_i915_private *dev_priv = dev->dev_private;
  579. int i;
  580. /* Fences */
  581. switch (INTEL_INFO(dev)->gen) {
  582. case 6:
  583. for (i = 0; i < 16; i++)
  584. error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
  585. break;
  586. case 5:
  587. case 4:
  588. for (i = 0; i < 16; i++)
  589. error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
  590. break;
  591. case 3:
  592. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  593. for (i = 0; i < 8; i++)
  594. error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
  595. case 2:
  596. for (i = 0; i < 8; i++)
  597. error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
  598. break;
  599. }
  600. }
  601. static struct drm_i915_error_object *
  602. i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
  603. struct intel_ring_buffer *ring)
  604. {
  605. struct drm_i915_gem_object *obj;
  606. u32 seqno;
  607. if (!ring->get_seqno)
  608. return NULL;
  609. seqno = ring->get_seqno(ring);
  610. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
  611. if (obj->ring != ring)
  612. continue;
  613. if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
  614. continue;
  615. if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
  616. continue;
  617. /* We need to copy these to an anonymous buffer as the simplest
  618. * method to avoid being overwritten by userspace.
  619. */
  620. return i915_error_object_create(dev_priv, obj);
  621. }
  622. return NULL;
  623. }
  624. /**
  625. * i915_capture_error_state - capture an error record for later analysis
  626. * @dev: drm device
  627. *
  628. * Should be called when an error is detected (either a hang or an error
  629. * interrupt) to capture error state from the time of the error. Fills
  630. * out a structure which becomes available in debugfs for user level tools
  631. * to pick up.
  632. */
  633. static void i915_capture_error_state(struct drm_device *dev)
  634. {
  635. struct drm_i915_private *dev_priv = dev->dev_private;
  636. struct drm_i915_gem_object *obj;
  637. struct drm_i915_error_state *error;
  638. unsigned long flags;
  639. int i;
  640. spin_lock_irqsave(&dev_priv->error_lock, flags);
  641. error = dev_priv->first_error;
  642. spin_unlock_irqrestore(&dev_priv->error_lock, flags);
  643. if (error)
  644. return;
  645. error = kmalloc(sizeof(*error), GFP_ATOMIC);
  646. if (!error) {
  647. DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
  648. return;
  649. }
  650. DRM_DEBUG_DRIVER("generating error event\n");
  651. error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
  652. error->eir = I915_READ(EIR);
  653. error->pgtbl_er = I915_READ(PGTBL_ER);
  654. error->pipeastat = I915_READ(PIPEASTAT);
  655. error->pipebstat = I915_READ(PIPEBSTAT);
  656. error->instpm = I915_READ(INSTPM);
  657. error->error = 0;
  658. if (INTEL_INFO(dev)->gen >= 6) {
  659. error->error = I915_READ(ERROR_GEN6);
  660. error->bcs_acthd = I915_READ(BCS_ACTHD);
  661. error->bcs_ipehr = I915_READ(BCS_IPEHR);
  662. error->bcs_ipeir = I915_READ(BCS_IPEIR);
  663. error->bcs_instdone = I915_READ(BCS_INSTDONE);
  664. error->bcs_seqno = 0;
  665. if (dev_priv->ring[BCS].get_seqno)
  666. error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
  667. error->vcs_acthd = I915_READ(VCS_ACTHD);
  668. error->vcs_ipehr = I915_READ(VCS_IPEHR);
  669. error->vcs_ipeir = I915_READ(VCS_IPEIR);
  670. error->vcs_instdone = I915_READ(VCS_INSTDONE);
  671. error->vcs_seqno = 0;
  672. if (dev_priv->ring[VCS].get_seqno)
  673. error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
  674. }
  675. if (INTEL_INFO(dev)->gen >= 4) {
  676. error->ipeir = I915_READ(IPEIR_I965);
  677. error->ipehr = I915_READ(IPEHR_I965);
  678. error->instdone = I915_READ(INSTDONE_I965);
  679. error->instps = I915_READ(INSTPS);
  680. error->instdone1 = I915_READ(INSTDONE1);
  681. error->acthd = I915_READ(ACTHD_I965);
  682. error->bbaddr = I915_READ64(BB_ADDR);
  683. } else {
  684. error->ipeir = I915_READ(IPEIR);
  685. error->ipehr = I915_READ(IPEHR);
  686. error->instdone = I915_READ(INSTDONE);
  687. error->acthd = I915_READ(ACTHD);
  688. error->bbaddr = 0;
  689. }
  690. i915_gem_record_fences(dev, error);
  691. /* Record the active batch and ring buffers */
  692. for (i = 0; i < I915_NUM_RINGS; i++) {
  693. error->batchbuffer[i] =
  694. i915_error_first_batchbuffer(dev_priv,
  695. &dev_priv->ring[i]);
  696. error->ringbuffer[i] =
  697. i915_error_object_create(dev_priv,
  698. dev_priv->ring[i].obj);
  699. }
  700. /* Record buffers on the active and pinned lists. */
  701. error->active_bo = NULL;
  702. error->pinned_bo = NULL;
  703. i = 0;
  704. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
  705. i++;
  706. error->active_bo_count = i;
  707. list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
  708. i++;
  709. error->pinned_bo_count = i - error->active_bo_count;
  710. error->active_bo = NULL;
  711. error->pinned_bo = NULL;
  712. if (i) {
  713. error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
  714. GFP_ATOMIC);
  715. if (error->active_bo)
  716. error->pinned_bo =
  717. error->active_bo + error->active_bo_count;
  718. }
  719. if (error->active_bo)
  720. error->active_bo_count =
  721. capture_bo_list(error->active_bo,
  722. error->active_bo_count,
  723. &dev_priv->mm.active_list);
  724. if (error->pinned_bo)
  725. error->pinned_bo_count =
  726. capture_bo_list(error->pinned_bo,
  727. error->pinned_bo_count,
  728. &dev_priv->mm.pinned_list);
  729. do_gettimeofday(&error->time);
  730. error->overlay = intel_overlay_capture_error_state(dev);
  731. error->display = intel_display_capture_error_state(dev);
  732. spin_lock_irqsave(&dev_priv->error_lock, flags);
  733. if (dev_priv->first_error == NULL) {
  734. dev_priv->first_error = error;
  735. error = NULL;
  736. }
  737. spin_unlock_irqrestore(&dev_priv->error_lock, flags);
  738. if (error)
  739. i915_error_state_free(dev, error);
  740. }
  741. void i915_destroy_error_state(struct drm_device *dev)
  742. {
  743. struct drm_i915_private *dev_priv = dev->dev_private;
  744. struct drm_i915_error_state *error;
  745. spin_lock(&dev_priv->error_lock);
  746. error = dev_priv->first_error;
  747. dev_priv->first_error = NULL;
  748. spin_unlock(&dev_priv->error_lock);
  749. if (error)
  750. i915_error_state_free(dev, error);
  751. }
  752. #else
  753. #define i915_capture_error_state(x)
  754. #endif
  755. static void i915_report_and_clear_eir(struct drm_device *dev)
  756. {
  757. struct drm_i915_private *dev_priv = dev->dev_private;
  758. u32 eir = I915_READ(EIR);
  759. if (!eir)
  760. return;
  761. printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
  762. eir);
  763. if (IS_G4X(dev)) {
  764. if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
  765. u32 ipeir = I915_READ(IPEIR_I965);
  766. printk(KERN_ERR " IPEIR: 0x%08x\n",
  767. I915_READ(IPEIR_I965));
  768. printk(KERN_ERR " IPEHR: 0x%08x\n",
  769. I915_READ(IPEHR_I965));
  770. printk(KERN_ERR " INSTDONE: 0x%08x\n",
  771. I915_READ(INSTDONE_I965));
  772. printk(KERN_ERR " INSTPS: 0x%08x\n",
  773. I915_READ(INSTPS));
  774. printk(KERN_ERR " INSTDONE1: 0x%08x\n",
  775. I915_READ(INSTDONE1));
  776. printk(KERN_ERR " ACTHD: 0x%08x\n",
  777. I915_READ(ACTHD_I965));
  778. I915_WRITE(IPEIR_I965, ipeir);
  779. POSTING_READ(IPEIR_I965);
  780. }
  781. if (eir & GM45_ERROR_PAGE_TABLE) {
  782. u32 pgtbl_err = I915_READ(PGTBL_ER);
  783. printk(KERN_ERR "page table error\n");
  784. printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
  785. pgtbl_err);
  786. I915_WRITE(PGTBL_ER, pgtbl_err);
  787. POSTING_READ(PGTBL_ER);
  788. }
  789. }
  790. if (!IS_GEN2(dev)) {
  791. if (eir & I915_ERROR_PAGE_TABLE) {
  792. u32 pgtbl_err = I915_READ(PGTBL_ER);
  793. printk(KERN_ERR "page table error\n");
  794. printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
  795. pgtbl_err);
  796. I915_WRITE(PGTBL_ER, pgtbl_err);
  797. POSTING_READ(PGTBL_ER);
  798. }
  799. }
  800. if (eir & I915_ERROR_MEMORY_REFRESH) {
  801. u32 pipea_stats = I915_READ(PIPEASTAT);
  802. u32 pipeb_stats = I915_READ(PIPEBSTAT);
  803. printk(KERN_ERR "memory refresh error\n");
  804. printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
  805. pipea_stats);
  806. printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
  807. pipeb_stats);
  808. /* pipestat has already been acked */
  809. }
  810. if (eir & I915_ERROR_INSTRUCTION) {
  811. printk(KERN_ERR "instruction error\n");
  812. printk(KERN_ERR " INSTPM: 0x%08x\n",
  813. I915_READ(INSTPM));
  814. if (INTEL_INFO(dev)->gen < 4) {
  815. u32 ipeir = I915_READ(IPEIR);
  816. printk(KERN_ERR " IPEIR: 0x%08x\n",
  817. I915_READ(IPEIR));
  818. printk(KERN_ERR " IPEHR: 0x%08x\n",
  819. I915_READ(IPEHR));
  820. printk(KERN_ERR " INSTDONE: 0x%08x\n",
  821. I915_READ(INSTDONE));
  822. printk(KERN_ERR " ACTHD: 0x%08x\n",
  823. I915_READ(ACTHD));
  824. I915_WRITE(IPEIR, ipeir);
  825. POSTING_READ(IPEIR);
  826. } else {
  827. u32 ipeir = I915_READ(IPEIR_I965);
  828. printk(KERN_ERR " IPEIR: 0x%08x\n",
  829. I915_READ(IPEIR_I965));
  830. printk(KERN_ERR " IPEHR: 0x%08x\n",
  831. I915_READ(IPEHR_I965));
  832. printk(KERN_ERR " INSTDONE: 0x%08x\n",
  833. I915_READ(INSTDONE_I965));
  834. printk(KERN_ERR " INSTPS: 0x%08x\n",
  835. I915_READ(INSTPS));
  836. printk(KERN_ERR " INSTDONE1: 0x%08x\n",
  837. I915_READ(INSTDONE1));
  838. printk(KERN_ERR " ACTHD: 0x%08x\n",
  839. I915_READ(ACTHD_I965));
  840. I915_WRITE(IPEIR_I965, ipeir);
  841. POSTING_READ(IPEIR_I965);
  842. }
  843. }
  844. I915_WRITE(EIR, eir);
  845. POSTING_READ(EIR);
  846. eir = I915_READ(EIR);
  847. if (eir) {
  848. /*
  849. * some errors might have become stuck,
  850. * mask them.
  851. */
  852. DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
  853. I915_WRITE(EMR, I915_READ(EMR) | eir);
  854. I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  855. }
  856. }
  857. /**
  858. * i915_handle_error - handle an error interrupt
  859. * @dev: drm device
  860. *
  861. * Do some basic checking of regsiter state at error interrupt time and
  862. * dump it to the syslog. Also call i915_capture_error_state() to make
  863. * sure we get a record and make it available in debugfs. Fire a uevent
  864. * so userspace knows something bad happened (should trigger collection
  865. * of a ring dump etc.).
  866. */
  867. void i915_handle_error(struct drm_device *dev, bool wedged)
  868. {
  869. struct drm_i915_private *dev_priv = dev->dev_private;
  870. i915_capture_error_state(dev);
  871. i915_report_and_clear_eir(dev);
  872. if (wedged) {
  873. INIT_COMPLETION(dev_priv->error_completion);
  874. atomic_set(&dev_priv->mm.wedged, 1);
  875. /*
  876. * Wakeup waiting processes so they don't hang
  877. */
  878. wake_up_all(&dev_priv->ring[RCS].irq_queue);
  879. if (HAS_BSD(dev))
  880. wake_up_all(&dev_priv->ring[VCS].irq_queue);
  881. if (HAS_BLT(dev))
  882. wake_up_all(&dev_priv->ring[BCS].irq_queue);
  883. }
  884. queue_work(dev_priv->wq, &dev_priv->error_work);
  885. }
  886. static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
  887. {
  888. drm_i915_private_t *dev_priv = dev->dev_private;
  889. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  890. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  891. struct drm_i915_gem_object *obj;
  892. struct intel_unpin_work *work;
  893. unsigned long flags;
  894. bool stall_detected;
  895. /* Ignore early vblank irqs */
  896. if (intel_crtc == NULL)
  897. return;
  898. spin_lock_irqsave(&dev->event_lock, flags);
  899. work = intel_crtc->unpin_work;
  900. if (work == NULL || work->pending || !work->enable_stall_check) {
  901. /* Either the pending flip IRQ arrived, or we're too early. Don't check */
  902. spin_unlock_irqrestore(&dev->event_lock, flags);
  903. return;
  904. }
  905. /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
  906. obj = work->pending_flip_obj;
  907. if (INTEL_INFO(dev)->gen >= 4) {
  908. int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
  909. stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
  910. } else {
  911. int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
  912. stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
  913. crtc->y * crtc->fb->pitch +
  914. crtc->x * crtc->fb->bits_per_pixel/8);
  915. }
  916. spin_unlock_irqrestore(&dev->event_lock, flags);
  917. if (stall_detected) {
  918. DRM_DEBUG_DRIVER("Pageflip stall detected\n");
  919. intel_prepare_page_flip(dev, intel_crtc->plane);
  920. }
  921. }
  922. irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
  923. {
  924. struct drm_device *dev = (struct drm_device *) arg;
  925. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  926. struct drm_i915_master_private *master_priv;
  927. u32 iir, new_iir;
  928. u32 pipea_stats, pipeb_stats;
  929. u32 vblank_status;
  930. int vblank = 0;
  931. unsigned long irqflags;
  932. int irq_received;
  933. int ret = IRQ_NONE;
  934. atomic_inc(&dev_priv->irq_received);
  935. if (HAS_PCH_SPLIT(dev))
  936. return ironlake_irq_handler(dev);
  937. iir = I915_READ(IIR);
  938. if (INTEL_INFO(dev)->gen >= 4)
  939. vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
  940. else
  941. vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
  942. for (;;) {
  943. irq_received = iir != 0;
  944. /* Can't rely on pipestat interrupt bit in iir as it might
  945. * have been cleared after the pipestat interrupt was received.
  946. * It doesn't set the bit in iir again, but it still produces
  947. * interrupts (for non-MSI).
  948. */
  949. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  950. pipea_stats = I915_READ(PIPEASTAT);
  951. pipeb_stats = I915_READ(PIPEBSTAT);
  952. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  953. i915_handle_error(dev, false);
  954. /*
  955. * Clear the PIPE(A|B)STAT regs before the IIR
  956. */
  957. if (pipea_stats & 0x8000ffff) {
  958. if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
  959. DRM_DEBUG_DRIVER("pipe a underrun\n");
  960. I915_WRITE(PIPEASTAT, pipea_stats);
  961. irq_received = 1;
  962. }
  963. if (pipeb_stats & 0x8000ffff) {
  964. if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
  965. DRM_DEBUG_DRIVER("pipe b underrun\n");
  966. I915_WRITE(PIPEBSTAT, pipeb_stats);
  967. irq_received = 1;
  968. }
  969. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  970. if (!irq_received)
  971. break;
  972. ret = IRQ_HANDLED;
  973. /* Consume port. Then clear IIR or we'll miss events */
  974. if ((I915_HAS_HOTPLUG(dev)) &&
  975. (iir & I915_DISPLAY_PORT_INTERRUPT)) {
  976. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  977. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  978. hotplug_status);
  979. if (hotplug_status & dev_priv->hotplug_supported_mask)
  980. queue_work(dev_priv->wq,
  981. &dev_priv->hotplug_work);
  982. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  983. I915_READ(PORT_HOTPLUG_STAT);
  984. }
  985. I915_WRITE(IIR, iir);
  986. new_iir = I915_READ(IIR); /* Flush posted writes */
  987. if (dev->primary->master) {
  988. master_priv = dev->primary->master->driver_priv;
  989. if (master_priv->sarea_priv)
  990. master_priv->sarea_priv->last_dispatch =
  991. READ_BREADCRUMB(dev_priv);
  992. }
  993. if (iir & I915_USER_INTERRUPT)
  994. notify_ring(dev, &dev_priv->ring[RCS]);
  995. if (iir & I915_BSD_USER_INTERRUPT)
  996. notify_ring(dev, &dev_priv->ring[VCS]);
  997. if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
  998. intel_prepare_page_flip(dev, 0);
  999. if (dev_priv->flip_pending_is_done)
  1000. intel_finish_page_flip_plane(dev, 0);
  1001. }
  1002. if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
  1003. intel_prepare_page_flip(dev, 1);
  1004. if (dev_priv->flip_pending_is_done)
  1005. intel_finish_page_flip_plane(dev, 1);
  1006. }
  1007. if (pipea_stats & vblank_status) {
  1008. vblank++;
  1009. drm_handle_vblank(dev, 0);
  1010. if (!dev_priv->flip_pending_is_done) {
  1011. i915_pageflip_stall_check(dev, 0);
  1012. intel_finish_page_flip(dev, 0);
  1013. }
  1014. }
  1015. if (pipeb_stats & vblank_status) {
  1016. vblank++;
  1017. drm_handle_vblank(dev, 1);
  1018. if (!dev_priv->flip_pending_is_done) {
  1019. i915_pageflip_stall_check(dev, 1);
  1020. intel_finish_page_flip(dev, 1);
  1021. }
  1022. }
  1023. if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
  1024. (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
  1025. (iir & I915_ASLE_INTERRUPT))
  1026. intel_opregion_asle_intr(dev);
  1027. /* With MSI, interrupts are only generated when iir
  1028. * transitions from zero to nonzero. If another bit got
  1029. * set while we were handling the existing iir bits, then
  1030. * we would never get another interrupt.
  1031. *
  1032. * This is fine on non-MSI as well, as if we hit this path
  1033. * we avoid exiting the interrupt handler only to generate
  1034. * another one.
  1035. *
  1036. * Note that for MSI this could cause a stray interrupt report
  1037. * if an interrupt landed in the time between writing IIR and
  1038. * the posting read. This should be rare enough to never
  1039. * trigger the 99% of 100,000 interrupts test for disabling
  1040. * stray interrupts.
  1041. */
  1042. iir = new_iir;
  1043. }
  1044. return ret;
  1045. }
  1046. static int i915_emit_irq(struct drm_device * dev)
  1047. {
  1048. drm_i915_private_t *dev_priv = dev->dev_private;
  1049. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  1050. i915_kernel_lost_context(dev);
  1051. DRM_DEBUG_DRIVER("\n");
  1052. dev_priv->counter++;
  1053. if (dev_priv->counter > 0x7FFFFFFFUL)
  1054. dev_priv->counter = 1;
  1055. if (master_priv->sarea_priv)
  1056. master_priv->sarea_priv->last_enqueue = dev_priv->counter;
  1057. if (BEGIN_LP_RING(4) == 0) {
  1058. OUT_RING(MI_STORE_DWORD_INDEX);
  1059. OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  1060. OUT_RING(dev_priv->counter);
  1061. OUT_RING(MI_USER_INTERRUPT);
  1062. ADVANCE_LP_RING();
  1063. }
  1064. return dev_priv->counter;
  1065. }
  1066. void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
  1067. {
  1068. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1069. struct intel_ring_buffer *ring = LP_RING(dev_priv);
  1070. if (dev_priv->trace_irq_seqno == 0 &&
  1071. ring->irq_get(ring))
  1072. dev_priv->trace_irq_seqno = seqno;
  1073. }
  1074. static int i915_wait_irq(struct drm_device * dev, int irq_nr)
  1075. {
  1076. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1077. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  1078. int ret = 0;
  1079. struct intel_ring_buffer *ring = LP_RING(dev_priv);
  1080. DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
  1081. READ_BREADCRUMB(dev_priv));
  1082. if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
  1083. if (master_priv->sarea_priv)
  1084. master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  1085. return 0;
  1086. }
  1087. if (master_priv->sarea_priv)
  1088. master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  1089. if (ring->irq_get(ring)) {
  1090. DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
  1091. READ_BREADCRUMB(dev_priv) >= irq_nr);
  1092. ring->irq_put(ring);
  1093. } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
  1094. ret = -EBUSY;
  1095. if (ret == -EBUSY) {
  1096. DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
  1097. READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
  1098. }
  1099. return ret;
  1100. }
  1101. /* Needs the lock as it touches the ring.
  1102. */
  1103. int i915_irq_emit(struct drm_device *dev, void *data,
  1104. struct drm_file *file_priv)
  1105. {
  1106. drm_i915_private_t *dev_priv = dev->dev_private;
  1107. drm_i915_irq_emit_t *emit = data;
  1108. int result;
  1109. if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
  1110. DRM_ERROR("called with no initialization\n");
  1111. return -EINVAL;
  1112. }
  1113. RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  1114. mutex_lock(&dev->struct_mutex);
  1115. result = i915_emit_irq(dev);
  1116. mutex_unlock(&dev->struct_mutex);
  1117. if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
  1118. DRM_ERROR("copy_to_user\n");
  1119. return -EFAULT;
  1120. }
  1121. return 0;
  1122. }
  1123. /* Doesn't need the hardware lock.
  1124. */
  1125. int i915_irq_wait(struct drm_device *dev, void *data,
  1126. struct drm_file *file_priv)
  1127. {
  1128. drm_i915_private_t *dev_priv = dev->dev_private;
  1129. drm_i915_irq_wait_t *irqwait = data;
  1130. if (!dev_priv) {
  1131. DRM_ERROR("called with no initialization\n");
  1132. return -EINVAL;
  1133. }
  1134. return i915_wait_irq(dev, irqwait->irq_seq);
  1135. }
  1136. static void i915_vblank_work_func(struct work_struct *work)
  1137. {
  1138. drm_i915_private_t *dev_priv =
  1139. container_of(work, drm_i915_private_t, vblank_work);
  1140. if (atomic_read(&dev_priv->vblank_enabled)) {
  1141. if (!dev_priv->vblank_pm_qos.pm_qos_class)
  1142. pm_qos_add_request(&dev_priv->vblank_pm_qos,
  1143. PM_QOS_CPU_DMA_LATENCY,
  1144. 15); //>=20 won't work
  1145. } else {
  1146. if (dev_priv->vblank_pm_qos.pm_qos_class)
  1147. pm_qos_remove_request(&dev_priv->vblank_pm_qos);
  1148. }
  1149. }
  1150. /* Called from drm generic code, passed 'crtc' which
  1151. * we use as a pipe index
  1152. */
  1153. int i915_enable_vblank(struct drm_device *dev, int pipe)
  1154. {
  1155. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1156. unsigned long irqflags;
  1157. if (!i915_pipe_enabled(dev, pipe))
  1158. return -EINVAL;
  1159. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1160. if (HAS_PCH_SPLIT(dev))
  1161. ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
  1162. DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
  1163. else if (INTEL_INFO(dev)->gen >= 4)
  1164. i915_enable_pipestat(dev_priv, pipe,
  1165. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1166. else
  1167. i915_enable_pipestat(dev_priv, pipe,
  1168. PIPE_VBLANK_INTERRUPT_ENABLE);
  1169. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1170. /* gen3 platforms have an issue with vsync interrupts not reaching
  1171. * cpu during deep c-state sleep (>C1), so we need to install a
  1172. * PM QoS handle to prevent C-state starvation of the GPU.
  1173. */
  1174. if (dev_priv->info->gen == 3 && !dev_priv->info->is_g33) {
  1175. atomic_inc(&dev_priv->vblank_enabled);
  1176. queue_work(dev_priv->wq, &dev_priv->vblank_work);
  1177. }
  1178. return 0;
  1179. }
  1180. /* Called from drm generic code, passed 'crtc' which
  1181. * we use as a pipe index
  1182. */
  1183. void i915_disable_vblank(struct drm_device *dev, int pipe)
  1184. {
  1185. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1186. unsigned long irqflags;
  1187. if (dev_priv->info->gen == 3 && !dev_priv->info->is_g33) {
  1188. atomic_dec(&dev_priv->vblank_enabled);
  1189. queue_work(dev_priv->wq, &dev_priv->vblank_work);
  1190. }
  1191. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1192. if (HAS_PCH_SPLIT(dev))
  1193. ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
  1194. DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
  1195. else
  1196. i915_disable_pipestat(dev_priv, pipe,
  1197. PIPE_VBLANK_INTERRUPT_ENABLE |
  1198. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1199. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1200. }
  1201. /* Set the vblank monitor pipe
  1202. */
  1203. int i915_vblank_pipe_set(struct drm_device *dev, void *data,
  1204. struct drm_file *file_priv)
  1205. {
  1206. drm_i915_private_t *dev_priv = dev->dev_private;
  1207. if (!dev_priv) {
  1208. DRM_ERROR("called with no initialization\n");
  1209. return -EINVAL;
  1210. }
  1211. return 0;
  1212. }
  1213. int i915_vblank_pipe_get(struct drm_device *dev, void *data,
  1214. struct drm_file *file_priv)
  1215. {
  1216. drm_i915_private_t *dev_priv = dev->dev_private;
  1217. drm_i915_vblank_pipe_t *pipe = data;
  1218. if (!dev_priv) {
  1219. DRM_ERROR("called with no initialization\n");
  1220. return -EINVAL;
  1221. }
  1222. pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  1223. return 0;
  1224. }
  1225. /**
  1226. * Schedule buffer swap at given vertical blank.
  1227. */
  1228. int i915_vblank_swap(struct drm_device *dev, void *data,
  1229. struct drm_file *file_priv)
  1230. {
  1231. /* The delayed swap mechanism was fundamentally racy, and has been
  1232. * removed. The model was that the client requested a delayed flip/swap
  1233. * from the kernel, then waited for vblank before continuing to perform
  1234. * rendering. The problem was that the kernel might wake the client
  1235. * up before it dispatched the vblank swap (since the lock has to be
  1236. * held while touching the ringbuffer), in which case the client would
  1237. * clear and start the next frame before the swap occurred, and
  1238. * flicker would occur in addition to likely missing the vblank.
  1239. *
  1240. * In the absence of this ioctl, userland falls back to a correct path
  1241. * of waiting for a vblank, then dispatching the swap on its own.
  1242. * Context switching to userland and back is plenty fast enough for
  1243. * meeting the requirements of vblank swapping.
  1244. */
  1245. return -EINVAL;
  1246. }
  1247. static u32
  1248. ring_last_seqno(struct intel_ring_buffer *ring)
  1249. {
  1250. return list_entry(ring->request_list.prev,
  1251. struct drm_i915_gem_request, list)->seqno;
  1252. }
  1253. static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
  1254. {
  1255. if (list_empty(&ring->request_list) ||
  1256. i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
  1257. /* Issue a wake-up to catch stuck h/w. */
  1258. if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
  1259. DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
  1260. ring->name,
  1261. ring->waiting_seqno,
  1262. ring->get_seqno(ring));
  1263. wake_up_all(&ring->irq_queue);
  1264. *err = true;
  1265. }
  1266. return true;
  1267. }
  1268. return false;
  1269. }
  1270. static bool kick_ring(struct intel_ring_buffer *ring)
  1271. {
  1272. struct drm_device *dev = ring->dev;
  1273. struct drm_i915_private *dev_priv = dev->dev_private;
  1274. u32 tmp = I915_READ_CTL(ring);
  1275. if (tmp & RING_WAIT) {
  1276. DRM_ERROR("Kicking stuck wait on %s\n",
  1277. ring->name);
  1278. I915_WRITE_CTL(ring, tmp);
  1279. return true;
  1280. }
  1281. if (IS_GEN6(dev) &&
  1282. (tmp & RING_WAIT_SEMAPHORE)) {
  1283. DRM_ERROR("Kicking stuck semaphore on %s\n",
  1284. ring->name);
  1285. I915_WRITE_CTL(ring, tmp);
  1286. return true;
  1287. }
  1288. return false;
  1289. }
  1290. /**
  1291. * This is called when the chip hasn't reported back with completed
  1292. * batchbuffers in a long time. The first time this is called we simply record
  1293. * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
  1294. * again, we assume the chip is wedged and try to fix it.
  1295. */
  1296. void i915_hangcheck_elapsed(unsigned long data)
  1297. {
  1298. struct drm_device *dev = (struct drm_device *)data;
  1299. drm_i915_private_t *dev_priv = dev->dev_private;
  1300. uint32_t acthd, instdone, instdone1;
  1301. bool err = false;
  1302. /* If all work is done then ACTHD clearly hasn't advanced. */
  1303. if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
  1304. i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
  1305. i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
  1306. dev_priv->hangcheck_count = 0;
  1307. if (err)
  1308. goto repeat;
  1309. return;
  1310. }
  1311. if (INTEL_INFO(dev)->gen < 4) {
  1312. acthd = I915_READ(ACTHD);
  1313. instdone = I915_READ(INSTDONE);
  1314. instdone1 = 0;
  1315. } else {
  1316. acthd = I915_READ(ACTHD_I965);
  1317. instdone = I915_READ(INSTDONE_I965);
  1318. instdone1 = I915_READ(INSTDONE1);
  1319. }
  1320. if (dev_priv->last_acthd == acthd &&
  1321. dev_priv->last_instdone == instdone &&
  1322. dev_priv->last_instdone1 == instdone1) {
  1323. if (dev_priv->hangcheck_count++ > 1) {
  1324. DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
  1325. if (!IS_GEN2(dev)) {
  1326. /* Is the chip hanging on a WAIT_FOR_EVENT?
  1327. * If so we can simply poke the RB_WAIT bit
  1328. * and break the hang. This should work on
  1329. * all but the second generation chipsets.
  1330. */
  1331. if (kick_ring(&dev_priv->ring[RCS]))
  1332. goto repeat;
  1333. if (HAS_BSD(dev) &&
  1334. kick_ring(&dev_priv->ring[VCS]))
  1335. goto repeat;
  1336. if (HAS_BLT(dev) &&
  1337. kick_ring(&dev_priv->ring[BCS]))
  1338. goto repeat;
  1339. }
  1340. i915_handle_error(dev, true);
  1341. return;
  1342. }
  1343. } else {
  1344. dev_priv->hangcheck_count = 0;
  1345. dev_priv->last_acthd = acthd;
  1346. dev_priv->last_instdone = instdone;
  1347. dev_priv->last_instdone1 = instdone1;
  1348. }
  1349. repeat:
  1350. /* Reset timer case chip hangs without another request being added */
  1351. mod_timer(&dev_priv->hangcheck_timer,
  1352. jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
  1353. }
  1354. /* drm_dma.h hooks
  1355. */
  1356. static void ironlake_irq_preinstall(struct drm_device *dev)
  1357. {
  1358. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1359. I915_WRITE(HWSTAM, 0xeffe);
  1360. /* XXX hotplug from PCH */
  1361. I915_WRITE(DEIMR, 0xffffffff);
  1362. I915_WRITE(DEIER, 0x0);
  1363. POSTING_READ(DEIER);
  1364. /* and GT */
  1365. I915_WRITE(GTIMR, 0xffffffff);
  1366. I915_WRITE(GTIER, 0x0);
  1367. POSTING_READ(GTIER);
  1368. /* south display irq */
  1369. I915_WRITE(SDEIMR, 0xffffffff);
  1370. I915_WRITE(SDEIER, 0x0);
  1371. POSTING_READ(SDEIER);
  1372. }
  1373. static int ironlake_irq_postinstall(struct drm_device *dev)
  1374. {
  1375. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1376. /* enable kind of interrupts always enabled */
  1377. u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  1378. DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
  1379. u32 render_irqs;
  1380. u32 hotplug_mask;
  1381. dev_priv->irq_mask = ~display_mask;
  1382. /* should always can generate irq */
  1383. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1384. I915_WRITE(DEIMR, dev_priv->irq_mask);
  1385. I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
  1386. POSTING_READ(DEIER);
  1387. dev_priv->gt_irq_mask = ~0;
  1388. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1389. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  1390. if (IS_GEN6(dev))
  1391. render_irqs =
  1392. GT_USER_INTERRUPT |
  1393. GT_GEN6_BSD_USER_INTERRUPT |
  1394. GT_BLT_USER_INTERRUPT;
  1395. else
  1396. render_irqs =
  1397. GT_USER_INTERRUPT |
  1398. GT_PIPE_NOTIFY |
  1399. GT_BSD_USER_INTERRUPT;
  1400. I915_WRITE(GTIER, render_irqs);
  1401. POSTING_READ(GTIER);
  1402. if (HAS_PCH_CPT(dev)) {
  1403. hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
  1404. SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
  1405. } else {
  1406. hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
  1407. SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
  1408. hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
  1409. I915_WRITE(FDI_RXA_IMR, 0);
  1410. I915_WRITE(FDI_RXB_IMR, 0);
  1411. }
  1412. dev_priv->pch_irq_mask = ~hotplug_mask;
  1413. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  1414. I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
  1415. I915_WRITE(SDEIER, hotplug_mask);
  1416. POSTING_READ(SDEIER);
  1417. if (IS_IRONLAKE_M(dev)) {
  1418. /* Clear & enable PCU event interrupts */
  1419. I915_WRITE(DEIIR, DE_PCU_EVENT);
  1420. I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
  1421. ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
  1422. }
  1423. return 0;
  1424. }
  1425. void i915_driver_irq_preinstall(struct drm_device * dev)
  1426. {
  1427. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1428. atomic_set(&dev_priv->irq_received, 0);
  1429. atomic_set(&dev_priv->vblank_enabled, 0);
  1430. INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
  1431. INIT_WORK(&dev_priv->error_work, i915_error_work_func);
  1432. INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_func);
  1433. if (HAS_PCH_SPLIT(dev)) {
  1434. ironlake_irq_preinstall(dev);
  1435. return;
  1436. }
  1437. if (I915_HAS_HOTPLUG(dev)) {
  1438. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1439. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1440. }
  1441. I915_WRITE(HWSTAM, 0xeffe);
  1442. I915_WRITE(PIPEASTAT, 0);
  1443. I915_WRITE(PIPEBSTAT, 0);
  1444. I915_WRITE(IMR, 0xffffffff);
  1445. I915_WRITE(IER, 0x0);
  1446. POSTING_READ(IER);
  1447. }
  1448. /*
  1449. * Must be called after intel_modeset_init or hotplug interrupts won't be
  1450. * enabled correctly.
  1451. */
  1452. int i915_driver_irq_postinstall(struct drm_device *dev)
  1453. {
  1454. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1455. u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
  1456. u32 error_mask;
  1457. DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
  1458. if (HAS_BSD(dev))
  1459. DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
  1460. if (HAS_BLT(dev))
  1461. DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
  1462. dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  1463. if (HAS_PCH_SPLIT(dev))
  1464. return ironlake_irq_postinstall(dev);
  1465. /* Unmask the interrupts that we always want on. */
  1466. dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
  1467. dev_priv->pipestat[0] = 0;
  1468. dev_priv->pipestat[1] = 0;
  1469. if (I915_HAS_HOTPLUG(dev)) {
  1470. /* Enable in IER... */
  1471. enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  1472. /* and unmask in IMR */
  1473. dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  1474. }
  1475. /*
  1476. * Enable some error detection, note the instruction error mask
  1477. * bit is reserved, so we leave it masked.
  1478. */
  1479. if (IS_G4X(dev)) {
  1480. error_mask = ~(GM45_ERROR_PAGE_TABLE |
  1481. GM45_ERROR_MEM_PRIV |
  1482. GM45_ERROR_CP_PRIV |
  1483. I915_ERROR_MEMORY_REFRESH);
  1484. } else {
  1485. error_mask = ~(I915_ERROR_PAGE_TABLE |
  1486. I915_ERROR_MEMORY_REFRESH);
  1487. }
  1488. I915_WRITE(EMR, error_mask);
  1489. I915_WRITE(IMR, dev_priv->irq_mask);
  1490. I915_WRITE(IER, enable_mask);
  1491. POSTING_READ(IER);
  1492. if (I915_HAS_HOTPLUG(dev)) {
  1493. u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  1494. /* Note HDMI and DP share bits */
  1495. if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
  1496. hotplug_en |= HDMIB_HOTPLUG_INT_EN;
  1497. if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
  1498. hotplug_en |= HDMIC_HOTPLUG_INT_EN;
  1499. if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
  1500. hotplug_en |= HDMID_HOTPLUG_INT_EN;
  1501. if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
  1502. hotplug_en |= SDVOC_HOTPLUG_INT_EN;
  1503. if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
  1504. hotplug_en |= SDVOB_HOTPLUG_INT_EN;
  1505. if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
  1506. hotplug_en |= CRT_HOTPLUG_INT_EN;
  1507. /* Programming the CRT detection parameters tends
  1508. to generate a spurious hotplug event about three
  1509. seconds later. So just do it once.
  1510. */
  1511. if (IS_G4X(dev))
  1512. hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  1513. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  1514. }
  1515. /* Ignore TV since it's buggy */
  1516. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  1517. }
  1518. intel_opregion_enable_asle(dev);
  1519. return 0;
  1520. }
  1521. static void ironlake_irq_uninstall(struct drm_device *dev)
  1522. {
  1523. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1524. I915_WRITE(HWSTAM, 0xffffffff);
  1525. I915_WRITE(DEIMR, 0xffffffff);
  1526. I915_WRITE(DEIER, 0x0);
  1527. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1528. I915_WRITE(GTIMR, 0xffffffff);
  1529. I915_WRITE(GTIER, 0x0);
  1530. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1531. }
  1532. void i915_driver_irq_uninstall(struct drm_device * dev)
  1533. {
  1534. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1535. if (!dev_priv)
  1536. return;
  1537. dev_priv->vblank_pipe = 0;
  1538. if (HAS_PCH_SPLIT(dev)) {
  1539. ironlake_irq_uninstall(dev);
  1540. return;
  1541. }
  1542. if (I915_HAS_HOTPLUG(dev)) {
  1543. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1544. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1545. }
  1546. I915_WRITE(HWSTAM, 0xffffffff);
  1547. I915_WRITE(PIPEASTAT, 0);
  1548. I915_WRITE(PIPEBSTAT, 0);
  1549. I915_WRITE(IMR, 0xffffffff);
  1550. I915_WRITE(IER, 0x0);
  1551. I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
  1552. I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
  1553. I915_WRITE(IIR, I915_READ(IIR));
  1554. }