i915_irq.c 79 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/sysrq.h>
  30. #include <linux/slab.h>
  31. #include <drm/drmP.h>
  32. #include <drm/i915_drm.h>
  33. #include "i915_drv.h"
  34. #include "i915_trace.h"
  35. #include "intel_drv.h"
  36. /* For display hotplug interrupt */
  37. static void
  38. ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  39. {
  40. if ((dev_priv->irq_mask & mask) != 0) {
  41. dev_priv->irq_mask &= ~mask;
  42. I915_WRITE(DEIMR, dev_priv->irq_mask);
  43. POSTING_READ(DEIMR);
  44. }
  45. }
  46. static inline void
  47. ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  48. {
  49. if ((dev_priv->irq_mask & mask) != mask) {
  50. dev_priv->irq_mask |= mask;
  51. I915_WRITE(DEIMR, dev_priv->irq_mask);
  52. POSTING_READ(DEIMR);
  53. }
  54. }
  55. void
  56. i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  57. {
  58. if ((dev_priv->pipestat[pipe] & mask) != mask) {
  59. u32 reg = PIPESTAT(pipe);
  60. dev_priv->pipestat[pipe] |= mask;
  61. /* Enable the interrupt, clear any pending status */
  62. I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
  63. POSTING_READ(reg);
  64. }
  65. }
  66. void
  67. i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  68. {
  69. if ((dev_priv->pipestat[pipe] & mask) != 0) {
  70. u32 reg = PIPESTAT(pipe);
  71. dev_priv->pipestat[pipe] &= ~mask;
  72. I915_WRITE(reg, dev_priv->pipestat[pipe]);
  73. POSTING_READ(reg);
  74. }
  75. }
  76. /**
  77. * intel_enable_asle - enable ASLE interrupt for OpRegion
  78. */
  79. void intel_enable_asle(struct drm_device *dev)
  80. {
  81. drm_i915_private_t *dev_priv = dev->dev_private;
  82. unsigned long irqflags;
  83. /* FIXME: opregion/asle for VLV */
  84. if (IS_VALLEYVIEW(dev))
  85. return;
  86. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  87. if (HAS_PCH_SPLIT(dev))
  88. ironlake_enable_display_irq(dev_priv, DE_GSE);
  89. else {
  90. i915_enable_pipestat(dev_priv, 1,
  91. PIPE_LEGACY_BLC_EVENT_ENABLE);
  92. if (INTEL_INFO(dev)->gen >= 4)
  93. i915_enable_pipestat(dev_priv, 0,
  94. PIPE_LEGACY_BLC_EVENT_ENABLE);
  95. }
  96. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  97. }
  98. /**
  99. * i915_pipe_enabled - check if a pipe is enabled
  100. * @dev: DRM device
  101. * @pipe: pipe to check
  102. *
  103. * Reading certain registers when the pipe is disabled can hang the chip.
  104. * Use this routine to make sure the PLL is running and the pipe is active
  105. * before reading such registers if unsure.
  106. */
  107. static int
  108. i915_pipe_enabled(struct drm_device *dev, int pipe)
  109. {
  110. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  111. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  112. pipe);
  113. return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
  114. }
  115. /* Called from drm generic code, passed a 'crtc', which
  116. * we use as a pipe index
  117. */
  118. static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
  119. {
  120. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  121. unsigned long high_frame;
  122. unsigned long low_frame;
  123. u32 high1, high2, low;
  124. if (!i915_pipe_enabled(dev, pipe)) {
  125. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  126. "pipe %c\n", pipe_name(pipe));
  127. return 0;
  128. }
  129. high_frame = PIPEFRAME(pipe);
  130. low_frame = PIPEFRAMEPIXEL(pipe);
  131. /*
  132. * High & low register fields aren't synchronized, so make sure
  133. * we get a low value that's stable across two reads of the high
  134. * register.
  135. */
  136. do {
  137. high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  138. low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
  139. high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  140. } while (high1 != high2);
  141. high1 >>= PIPE_FRAME_HIGH_SHIFT;
  142. low >>= PIPE_FRAME_LOW_SHIFT;
  143. return (high1 << 8) | low;
  144. }
  145. static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
  146. {
  147. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  148. int reg = PIPE_FRMCOUNT_GM45(pipe);
  149. if (!i915_pipe_enabled(dev, pipe)) {
  150. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  151. "pipe %c\n", pipe_name(pipe));
  152. return 0;
  153. }
  154. return I915_READ(reg);
  155. }
  156. static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
  157. int *vpos, int *hpos)
  158. {
  159. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  160. u32 vbl = 0, position = 0;
  161. int vbl_start, vbl_end, htotal, vtotal;
  162. bool in_vbl = true;
  163. int ret = 0;
  164. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  165. pipe);
  166. if (!i915_pipe_enabled(dev, pipe)) {
  167. DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
  168. "pipe %c\n", pipe_name(pipe));
  169. return 0;
  170. }
  171. /* Get vtotal. */
  172. vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  173. if (INTEL_INFO(dev)->gen >= 4) {
  174. /* No obvious pixelcount register. Only query vertical
  175. * scanout position from Display scan line register.
  176. */
  177. position = I915_READ(PIPEDSL(pipe));
  178. /* Decode into vertical scanout position. Don't have
  179. * horizontal scanout position.
  180. */
  181. *vpos = position & 0x1fff;
  182. *hpos = 0;
  183. } else {
  184. /* Have access to pixelcount since start of frame.
  185. * We can split this into vertical and horizontal
  186. * scanout position.
  187. */
  188. position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
  189. htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  190. *vpos = position / htotal;
  191. *hpos = position - (*vpos * htotal);
  192. }
  193. /* Query vblank area. */
  194. vbl = I915_READ(VBLANK(cpu_transcoder));
  195. /* Test position against vblank region. */
  196. vbl_start = vbl & 0x1fff;
  197. vbl_end = (vbl >> 16) & 0x1fff;
  198. if ((*vpos < vbl_start) || (*vpos > vbl_end))
  199. in_vbl = false;
  200. /* Inside "upper part" of vblank area? Apply corrective offset: */
  201. if (in_vbl && (*vpos >= vbl_start))
  202. *vpos = *vpos - vtotal;
  203. /* Readouts valid? */
  204. if (vbl > 0)
  205. ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
  206. /* In vblank? */
  207. if (in_vbl)
  208. ret |= DRM_SCANOUTPOS_INVBL;
  209. return ret;
  210. }
  211. static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
  212. int *max_error,
  213. struct timeval *vblank_time,
  214. unsigned flags)
  215. {
  216. struct drm_i915_private *dev_priv = dev->dev_private;
  217. struct drm_crtc *crtc;
  218. if (pipe < 0 || pipe >= dev_priv->num_pipe) {
  219. DRM_ERROR("Invalid crtc %d\n", pipe);
  220. return -EINVAL;
  221. }
  222. /* Get drm_crtc to timestamp: */
  223. crtc = intel_get_crtc_for_pipe(dev, pipe);
  224. if (crtc == NULL) {
  225. DRM_ERROR("Invalid crtc %d\n", pipe);
  226. return -EINVAL;
  227. }
  228. if (!crtc->enabled) {
  229. DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
  230. return -EBUSY;
  231. }
  232. /* Helper routine in DRM core does all the work: */
  233. return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
  234. vblank_time, flags,
  235. crtc);
  236. }
  237. /*
  238. * Handle hotplug events outside the interrupt handler proper.
  239. */
  240. static void i915_hotplug_work_func(struct work_struct *work)
  241. {
  242. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  243. hotplug_work);
  244. struct drm_device *dev = dev_priv->dev;
  245. struct drm_mode_config *mode_config = &dev->mode_config;
  246. struct intel_encoder *encoder;
  247. /* HPD irq before everything is fully set up. */
  248. if (!dev_priv->enable_hotplug_processing)
  249. return;
  250. mutex_lock(&mode_config->mutex);
  251. DRM_DEBUG_KMS("running encoder hotplug functions\n");
  252. list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
  253. if (encoder->hot_plug)
  254. encoder->hot_plug(encoder);
  255. mutex_unlock(&mode_config->mutex);
  256. /* Just fire off a uevent and let userspace tell us what to do */
  257. drm_helper_hpd_irq_event(dev);
  258. }
  259. static void ironlake_handle_rps_change(struct drm_device *dev)
  260. {
  261. drm_i915_private_t *dev_priv = dev->dev_private;
  262. u32 busy_up, busy_down, max_avg, min_avg;
  263. u8 new_delay;
  264. unsigned long flags;
  265. spin_lock_irqsave(&mchdev_lock, flags);
  266. I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  267. new_delay = dev_priv->ips.cur_delay;
  268. I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
  269. busy_up = I915_READ(RCPREVBSYTUPAVG);
  270. busy_down = I915_READ(RCPREVBSYTDNAVG);
  271. max_avg = I915_READ(RCBMAXAVG);
  272. min_avg = I915_READ(RCBMINAVG);
  273. /* Handle RCS change request from hw */
  274. if (busy_up > max_avg) {
  275. if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
  276. new_delay = dev_priv->ips.cur_delay - 1;
  277. if (new_delay < dev_priv->ips.max_delay)
  278. new_delay = dev_priv->ips.max_delay;
  279. } else if (busy_down < min_avg) {
  280. if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
  281. new_delay = dev_priv->ips.cur_delay + 1;
  282. if (new_delay > dev_priv->ips.min_delay)
  283. new_delay = dev_priv->ips.min_delay;
  284. }
  285. if (ironlake_set_drps(dev, new_delay))
  286. dev_priv->ips.cur_delay = new_delay;
  287. spin_unlock_irqrestore(&mchdev_lock, flags);
  288. return;
  289. }
  290. static void notify_ring(struct drm_device *dev,
  291. struct intel_ring_buffer *ring)
  292. {
  293. struct drm_i915_private *dev_priv = dev->dev_private;
  294. if (ring->obj == NULL)
  295. return;
  296. trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
  297. wake_up_all(&ring->irq_queue);
  298. if (i915_enable_hangcheck) {
  299. dev_priv->gpu_error.hangcheck_count = 0;
  300. mod_timer(&dev_priv->gpu_error.hangcheck_timer,
  301. round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  302. }
  303. }
  304. static void gen6_pm_rps_work(struct work_struct *work)
  305. {
  306. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  307. rps.work);
  308. u32 pm_iir, pm_imr;
  309. u8 new_delay;
  310. spin_lock_irq(&dev_priv->rps.lock);
  311. pm_iir = dev_priv->rps.pm_iir;
  312. dev_priv->rps.pm_iir = 0;
  313. pm_imr = I915_READ(GEN6_PMIMR);
  314. I915_WRITE(GEN6_PMIMR, 0);
  315. spin_unlock_irq(&dev_priv->rps.lock);
  316. if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
  317. return;
  318. mutex_lock(&dev_priv->rps.hw_lock);
  319. if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
  320. new_delay = dev_priv->rps.cur_delay + 1;
  321. else
  322. new_delay = dev_priv->rps.cur_delay - 1;
  323. /* sysfs frequency interfaces may have snuck in while servicing the
  324. * interrupt
  325. */
  326. if (!(new_delay > dev_priv->rps.max_delay ||
  327. new_delay < dev_priv->rps.min_delay)) {
  328. gen6_set_rps(dev_priv->dev, new_delay);
  329. }
  330. mutex_unlock(&dev_priv->rps.hw_lock);
  331. }
  332. /**
  333. * ivybridge_parity_work - Workqueue called when a parity error interrupt
  334. * occurred.
  335. * @work: workqueue struct
  336. *
  337. * Doesn't actually do anything except notify userspace. As a consequence of
  338. * this event, userspace should try to remap the bad rows since statistically
  339. * it is likely the same row is more likely to go bad again.
  340. */
  341. static void ivybridge_parity_work(struct work_struct *work)
  342. {
  343. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  344. l3_parity.error_work);
  345. u32 error_status, row, bank, subbank;
  346. char *parity_event[5];
  347. uint32_t misccpctl;
  348. unsigned long flags;
  349. /* We must turn off DOP level clock gating to access the L3 registers.
  350. * In order to prevent a get/put style interface, acquire struct mutex
  351. * any time we access those registers.
  352. */
  353. mutex_lock(&dev_priv->dev->struct_mutex);
  354. misccpctl = I915_READ(GEN7_MISCCPCTL);
  355. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  356. POSTING_READ(GEN7_MISCCPCTL);
  357. error_status = I915_READ(GEN7_L3CDERRST1);
  358. row = GEN7_PARITY_ERROR_ROW(error_status);
  359. bank = GEN7_PARITY_ERROR_BANK(error_status);
  360. subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
  361. I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
  362. GEN7_L3CDERRST1_ENABLE);
  363. POSTING_READ(GEN7_L3CDERRST1);
  364. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  365. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  366. dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  367. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  368. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  369. mutex_unlock(&dev_priv->dev->struct_mutex);
  370. parity_event[0] = "L3_PARITY_ERROR=1";
  371. parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
  372. parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
  373. parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
  374. parity_event[4] = NULL;
  375. kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
  376. KOBJ_CHANGE, parity_event);
  377. DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
  378. row, bank, subbank);
  379. kfree(parity_event[3]);
  380. kfree(parity_event[2]);
  381. kfree(parity_event[1]);
  382. }
  383. static void ivybridge_handle_parity_error(struct drm_device *dev)
  384. {
  385. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  386. unsigned long flags;
  387. if (!HAS_L3_GPU_CACHE(dev))
  388. return;
  389. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  390. dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  391. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  392. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  393. queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
  394. }
  395. static void snb_gt_irq_handler(struct drm_device *dev,
  396. struct drm_i915_private *dev_priv,
  397. u32 gt_iir)
  398. {
  399. if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
  400. GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
  401. notify_ring(dev, &dev_priv->ring[RCS]);
  402. if (gt_iir & GEN6_BSD_USER_INTERRUPT)
  403. notify_ring(dev, &dev_priv->ring[VCS]);
  404. if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
  405. notify_ring(dev, &dev_priv->ring[BCS]);
  406. if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
  407. GT_GEN6_BSD_CS_ERROR_INTERRUPT |
  408. GT_RENDER_CS_ERROR_INTERRUPT)) {
  409. DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
  410. i915_handle_error(dev, false);
  411. }
  412. if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
  413. ivybridge_handle_parity_error(dev);
  414. }
  415. static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
  416. u32 pm_iir)
  417. {
  418. unsigned long flags;
  419. /*
  420. * IIR bits should never already be set because IMR should
  421. * prevent an interrupt from being shown in IIR. The warning
  422. * displays a case where we've unsafely cleared
  423. * dev_priv->rps.pm_iir. Although missing an interrupt of the same
  424. * type is not a problem, it displays a problem in the logic.
  425. *
  426. * The mask bit in IMR is cleared by dev_priv->rps.work.
  427. */
  428. spin_lock_irqsave(&dev_priv->rps.lock, flags);
  429. dev_priv->rps.pm_iir |= pm_iir;
  430. I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
  431. POSTING_READ(GEN6_PMIMR);
  432. spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
  433. queue_work(dev_priv->wq, &dev_priv->rps.work);
  434. }
  435. static void gmbus_irq_handler(struct drm_device *dev)
  436. {
  437. struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
  438. wake_up_all(&dev_priv->gmbus_wait_queue);
  439. }
  440. static void dp_aux_irq_handler(struct drm_device *dev)
  441. {
  442. struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
  443. wake_up_all(&dev_priv->gmbus_wait_queue);
  444. }
  445. static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  446. {
  447. struct drm_device *dev = (struct drm_device *) arg;
  448. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  449. u32 iir, gt_iir, pm_iir;
  450. irqreturn_t ret = IRQ_NONE;
  451. unsigned long irqflags;
  452. int pipe;
  453. u32 pipe_stats[I915_MAX_PIPES];
  454. atomic_inc(&dev_priv->irq_received);
  455. while (true) {
  456. iir = I915_READ(VLV_IIR);
  457. gt_iir = I915_READ(GTIIR);
  458. pm_iir = I915_READ(GEN6_PMIIR);
  459. if (gt_iir == 0 && pm_iir == 0 && iir == 0)
  460. goto out;
  461. ret = IRQ_HANDLED;
  462. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  463. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  464. for_each_pipe(pipe) {
  465. int reg = PIPESTAT(pipe);
  466. pipe_stats[pipe] = I915_READ(reg);
  467. /*
  468. * Clear the PIPE*STAT regs before the IIR
  469. */
  470. if (pipe_stats[pipe] & 0x8000ffff) {
  471. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  472. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  473. pipe_name(pipe));
  474. I915_WRITE(reg, pipe_stats[pipe]);
  475. }
  476. }
  477. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  478. for_each_pipe(pipe) {
  479. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
  480. drm_handle_vblank(dev, pipe);
  481. if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
  482. intel_prepare_page_flip(dev, pipe);
  483. intel_finish_page_flip(dev, pipe);
  484. }
  485. }
  486. /* Consume port. Then clear IIR or we'll miss events */
  487. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  488. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  489. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  490. hotplug_status);
  491. if (hotplug_status & dev_priv->hotplug_supported_mask)
  492. queue_work(dev_priv->wq,
  493. &dev_priv->hotplug_work);
  494. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  495. I915_READ(PORT_HOTPLUG_STAT);
  496. }
  497. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  498. gmbus_irq_handler(dev);
  499. if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
  500. gen6_queue_rps_work(dev_priv, pm_iir);
  501. I915_WRITE(GTIIR, gt_iir);
  502. I915_WRITE(GEN6_PMIIR, pm_iir);
  503. I915_WRITE(VLV_IIR, iir);
  504. }
  505. out:
  506. return ret;
  507. }
  508. static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
  509. {
  510. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  511. int pipe;
  512. if (pch_iir & SDE_HOTPLUG_MASK)
  513. queue_work(dev_priv->wq, &dev_priv->hotplug_work);
  514. if (pch_iir & SDE_AUDIO_POWER_MASK)
  515. DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  516. (pch_iir & SDE_AUDIO_POWER_MASK) >>
  517. SDE_AUDIO_POWER_SHIFT);
  518. if (pch_iir & SDE_AUX_MASK)
  519. dp_aux_irq_handler(dev);
  520. if (pch_iir & SDE_GMBUS)
  521. gmbus_irq_handler(dev);
  522. if (pch_iir & SDE_AUDIO_HDCP_MASK)
  523. DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
  524. if (pch_iir & SDE_AUDIO_TRANS_MASK)
  525. DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
  526. if (pch_iir & SDE_POISON)
  527. DRM_ERROR("PCH poison interrupt\n");
  528. if (pch_iir & SDE_FDI_MASK)
  529. for_each_pipe(pipe)
  530. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  531. pipe_name(pipe),
  532. I915_READ(FDI_RX_IIR(pipe)));
  533. if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
  534. DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
  535. if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
  536. DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
  537. if (pch_iir & SDE_TRANSB_FIFO_UNDER)
  538. DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
  539. if (pch_iir & SDE_TRANSA_FIFO_UNDER)
  540. DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
  541. }
  542. static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
  543. {
  544. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  545. int pipe;
  546. if (pch_iir & SDE_HOTPLUG_MASK_CPT)
  547. queue_work(dev_priv->wq, &dev_priv->hotplug_work);
  548. if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
  549. DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  550. (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
  551. SDE_AUDIO_POWER_SHIFT_CPT);
  552. if (pch_iir & SDE_AUX_MASK_CPT)
  553. dp_aux_irq_handler(dev);
  554. if (pch_iir & SDE_GMBUS_CPT)
  555. gmbus_irq_handler(dev);
  556. if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
  557. DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
  558. if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
  559. DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
  560. if (pch_iir & SDE_FDI_MASK_CPT)
  561. for_each_pipe(pipe)
  562. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  563. pipe_name(pipe),
  564. I915_READ(FDI_RX_IIR(pipe)));
  565. }
  566. static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
  567. {
  568. struct drm_device *dev = (struct drm_device *) arg;
  569. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  570. u32 de_iir, gt_iir, de_ier, pm_iir;
  571. irqreturn_t ret = IRQ_NONE;
  572. int i;
  573. atomic_inc(&dev_priv->irq_received);
  574. /* disable master interrupt before clearing iir */
  575. de_ier = I915_READ(DEIER);
  576. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  577. gt_iir = I915_READ(GTIIR);
  578. if (gt_iir) {
  579. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  580. I915_WRITE(GTIIR, gt_iir);
  581. ret = IRQ_HANDLED;
  582. }
  583. de_iir = I915_READ(DEIIR);
  584. if (de_iir) {
  585. if (de_iir & DE_AUX_CHANNEL_A_IVB)
  586. dp_aux_irq_handler(dev);
  587. if (de_iir & DE_GSE_IVB)
  588. intel_opregion_gse_intr(dev);
  589. for (i = 0; i < 3; i++) {
  590. if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
  591. drm_handle_vblank(dev, i);
  592. if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
  593. intel_prepare_page_flip(dev, i);
  594. intel_finish_page_flip_plane(dev, i);
  595. }
  596. }
  597. /* check event from PCH */
  598. if (de_iir & DE_PCH_EVENT_IVB) {
  599. u32 pch_iir = I915_READ(SDEIIR);
  600. cpt_irq_handler(dev, pch_iir);
  601. /* clear PCH hotplug event before clear CPU irq */
  602. I915_WRITE(SDEIIR, pch_iir);
  603. }
  604. I915_WRITE(DEIIR, de_iir);
  605. ret = IRQ_HANDLED;
  606. }
  607. pm_iir = I915_READ(GEN6_PMIIR);
  608. if (pm_iir) {
  609. if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
  610. gen6_queue_rps_work(dev_priv, pm_iir);
  611. I915_WRITE(GEN6_PMIIR, pm_iir);
  612. ret = IRQ_HANDLED;
  613. }
  614. I915_WRITE(DEIER, de_ier);
  615. POSTING_READ(DEIER);
  616. return ret;
  617. }
  618. static void ilk_gt_irq_handler(struct drm_device *dev,
  619. struct drm_i915_private *dev_priv,
  620. u32 gt_iir)
  621. {
  622. if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
  623. notify_ring(dev, &dev_priv->ring[RCS]);
  624. if (gt_iir & GT_BSD_USER_INTERRUPT)
  625. notify_ring(dev, &dev_priv->ring[VCS]);
  626. }
  627. static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  628. {
  629. struct drm_device *dev = (struct drm_device *) arg;
  630. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  631. int ret = IRQ_NONE;
  632. u32 de_iir, gt_iir, de_ier, pm_iir;
  633. atomic_inc(&dev_priv->irq_received);
  634. /* disable master interrupt before clearing iir */
  635. de_ier = I915_READ(DEIER);
  636. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  637. POSTING_READ(DEIER);
  638. de_iir = I915_READ(DEIIR);
  639. gt_iir = I915_READ(GTIIR);
  640. pm_iir = I915_READ(GEN6_PMIIR);
  641. if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
  642. goto done;
  643. ret = IRQ_HANDLED;
  644. if (IS_GEN5(dev))
  645. ilk_gt_irq_handler(dev, dev_priv, gt_iir);
  646. else
  647. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  648. if (de_iir & DE_AUX_CHANNEL_A)
  649. dp_aux_irq_handler(dev);
  650. if (de_iir & DE_GSE)
  651. intel_opregion_gse_intr(dev);
  652. if (de_iir & DE_PIPEA_VBLANK)
  653. drm_handle_vblank(dev, 0);
  654. if (de_iir & DE_PIPEB_VBLANK)
  655. drm_handle_vblank(dev, 1);
  656. if (de_iir & DE_PLANEA_FLIP_DONE) {
  657. intel_prepare_page_flip(dev, 0);
  658. intel_finish_page_flip_plane(dev, 0);
  659. }
  660. if (de_iir & DE_PLANEB_FLIP_DONE) {
  661. intel_prepare_page_flip(dev, 1);
  662. intel_finish_page_flip_plane(dev, 1);
  663. }
  664. /* check event from PCH */
  665. if (de_iir & DE_PCH_EVENT) {
  666. u32 pch_iir = I915_READ(SDEIIR);
  667. if (HAS_PCH_CPT(dev))
  668. cpt_irq_handler(dev, pch_iir);
  669. else
  670. ibx_irq_handler(dev, pch_iir);
  671. /* should clear PCH hotplug event before clear CPU irq */
  672. I915_WRITE(SDEIIR, pch_iir);
  673. }
  674. if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
  675. ironlake_handle_rps_change(dev);
  676. if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
  677. gen6_queue_rps_work(dev_priv, pm_iir);
  678. I915_WRITE(GTIIR, gt_iir);
  679. I915_WRITE(DEIIR, de_iir);
  680. I915_WRITE(GEN6_PMIIR, pm_iir);
  681. done:
  682. I915_WRITE(DEIER, de_ier);
  683. POSTING_READ(DEIER);
  684. return ret;
  685. }
  686. /**
  687. * i915_error_work_func - do process context error handling work
  688. * @work: work struct
  689. *
  690. * Fire an error uevent so userspace can see that a hang or error
  691. * was detected.
  692. */
  693. static void i915_error_work_func(struct work_struct *work)
  694. {
  695. struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
  696. work);
  697. drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
  698. gpu_error);
  699. struct drm_device *dev = dev_priv->dev;
  700. struct intel_ring_buffer *ring;
  701. char *error_event[] = { "ERROR=1", NULL };
  702. char *reset_event[] = { "RESET=1", NULL };
  703. char *reset_done_event[] = { "ERROR=0", NULL };
  704. int i, ret;
  705. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
  706. /*
  707. * Note that there's only one work item which does gpu resets, so we
  708. * need not worry about concurrent gpu resets potentially incrementing
  709. * error->reset_counter twice. We only need to take care of another
  710. * racing irq/hangcheck declaring the gpu dead for a second time. A
  711. * quick check for that is good enough: schedule_work ensures the
  712. * correct ordering between hang detection and this work item, and since
  713. * the reset in-progress bit is only ever set by code outside of this
  714. * work we don't need to worry about any other races.
  715. */
  716. if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
  717. DRM_DEBUG_DRIVER("resetting chip\n");
  718. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
  719. reset_event);
  720. ret = i915_reset(dev);
  721. if (ret == 0) {
  722. /*
  723. * After all the gem state is reset, increment the reset
  724. * counter and wake up everyone waiting for the reset to
  725. * complete.
  726. *
  727. * Since unlock operations are a one-sided barrier only,
  728. * we need to insert a barrier here to order any seqno
  729. * updates before
  730. * the counter increment.
  731. */
  732. smp_mb__before_atomic_inc();
  733. atomic_inc(&dev_priv->gpu_error.reset_counter);
  734. kobject_uevent_env(&dev->primary->kdev.kobj,
  735. KOBJ_CHANGE, reset_done_event);
  736. } else {
  737. atomic_set(&error->reset_counter, I915_WEDGED);
  738. }
  739. for_each_ring(ring, dev_priv, i)
  740. wake_up_all(&ring->irq_queue);
  741. wake_up_all(&dev_priv->gpu_error.reset_queue);
  742. }
  743. }
  744. /* NB: please notice the memset */
  745. static void i915_get_extra_instdone(struct drm_device *dev,
  746. uint32_t *instdone)
  747. {
  748. struct drm_i915_private *dev_priv = dev->dev_private;
  749. memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
  750. switch(INTEL_INFO(dev)->gen) {
  751. case 2:
  752. case 3:
  753. instdone[0] = I915_READ(INSTDONE);
  754. break;
  755. case 4:
  756. case 5:
  757. case 6:
  758. instdone[0] = I915_READ(INSTDONE_I965);
  759. instdone[1] = I915_READ(INSTDONE1);
  760. break;
  761. default:
  762. WARN_ONCE(1, "Unsupported platform\n");
  763. case 7:
  764. instdone[0] = I915_READ(GEN7_INSTDONE_1);
  765. instdone[1] = I915_READ(GEN7_SC_INSTDONE);
  766. instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
  767. instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
  768. break;
  769. }
  770. }
  771. #ifdef CONFIG_DEBUG_FS
  772. static struct drm_i915_error_object *
  773. i915_error_object_create(struct drm_i915_private *dev_priv,
  774. struct drm_i915_gem_object *src)
  775. {
  776. struct drm_i915_error_object *dst;
  777. int i, count;
  778. u32 reloc_offset;
  779. if (src == NULL || src->pages == NULL)
  780. return NULL;
  781. count = src->base.size / PAGE_SIZE;
  782. dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
  783. if (dst == NULL)
  784. return NULL;
  785. reloc_offset = src->gtt_offset;
  786. for (i = 0; i < count; i++) {
  787. unsigned long flags;
  788. void *d;
  789. d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
  790. if (d == NULL)
  791. goto unwind;
  792. local_irq_save(flags);
  793. if (reloc_offset < dev_priv->gtt.mappable_end &&
  794. src->has_global_gtt_mapping) {
  795. void __iomem *s;
  796. /* Simply ignore tiling or any overlapping fence.
  797. * It's part of the error state, and this hopefully
  798. * captures what the GPU read.
  799. */
  800. s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
  801. reloc_offset);
  802. memcpy_fromio(d, s, PAGE_SIZE);
  803. io_mapping_unmap_atomic(s);
  804. } else if (src->stolen) {
  805. unsigned long offset;
  806. offset = dev_priv->mm.stolen_base;
  807. offset += src->stolen->start;
  808. offset += i << PAGE_SHIFT;
  809. memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
  810. } else {
  811. struct page *page;
  812. void *s;
  813. page = i915_gem_object_get_page(src, i);
  814. drm_clflush_pages(&page, 1);
  815. s = kmap_atomic(page);
  816. memcpy(d, s, PAGE_SIZE);
  817. kunmap_atomic(s);
  818. drm_clflush_pages(&page, 1);
  819. }
  820. local_irq_restore(flags);
  821. dst->pages[i] = d;
  822. reloc_offset += PAGE_SIZE;
  823. }
  824. dst->page_count = count;
  825. dst->gtt_offset = src->gtt_offset;
  826. return dst;
  827. unwind:
  828. while (i--)
  829. kfree(dst->pages[i]);
  830. kfree(dst);
  831. return NULL;
  832. }
  833. static void
  834. i915_error_object_free(struct drm_i915_error_object *obj)
  835. {
  836. int page;
  837. if (obj == NULL)
  838. return;
  839. for (page = 0; page < obj->page_count; page++)
  840. kfree(obj->pages[page]);
  841. kfree(obj);
  842. }
  843. void
  844. i915_error_state_free(struct kref *error_ref)
  845. {
  846. struct drm_i915_error_state *error = container_of(error_ref,
  847. typeof(*error), ref);
  848. int i;
  849. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  850. i915_error_object_free(error->ring[i].batchbuffer);
  851. i915_error_object_free(error->ring[i].ringbuffer);
  852. kfree(error->ring[i].requests);
  853. }
  854. kfree(error->active_bo);
  855. kfree(error->overlay);
  856. kfree(error);
  857. }
  858. static void capture_bo(struct drm_i915_error_buffer *err,
  859. struct drm_i915_gem_object *obj)
  860. {
  861. err->size = obj->base.size;
  862. err->name = obj->base.name;
  863. err->rseqno = obj->last_read_seqno;
  864. err->wseqno = obj->last_write_seqno;
  865. err->gtt_offset = obj->gtt_offset;
  866. err->read_domains = obj->base.read_domains;
  867. err->write_domain = obj->base.write_domain;
  868. err->fence_reg = obj->fence_reg;
  869. err->pinned = 0;
  870. if (obj->pin_count > 0)
  871. err->pinned = 1;
  872. if (obj->user_pin_count > 0)
  873. err->pinned = -1;
  874. err->tiling = obj->tiling_mode;
  875. err->dirty = obj->dirty;
  876. err->purgeable = obj->madv != I915_MADV_WILLNEED;
  877. err->ring = obj->ring ? obj->ring->id : -1;
  878. err->cache_level = obj->cache_level;
  879. }
  880. static u32 capture_active_bo(struct drm_i915_error_buffer *err,
  881. int count, struct list_head *head)
  882. {
  883. struct drm_i915_gem_object *obj;
  884. int i = 0;
  885. list_for_each_entry(obj, head, mm_list) {
  886. capture_bo(err++, obj);
  887. if (++i == count)
  888. break;
  889. }
  890. return i;
  891. }
  892. static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
  893. int count, struct list_head *head)
  894. {
  895. struct drm_i915_gem_object *obj;
  896. int i = 0;
  897. list_for_each_entry(obj, head, gtt_list) {
  898. if (obj->pin_count == 0)
  899. continue;
  900. capture_bo(err++, obj);
  901. if (++i == count)
  902. break;
  903. }
  904. return i;
  905. }
  906. static void i915_gem_record_fences(struct drm_device *dev,
  907. struct drm_i915_error_state *error)
  908. {
  909. struct drm_i915_private *dev_priv = dev->dev_private;
  910. int i;
  911. /* Fences */
  912. switch (INTEL_INFO(dev)->gen) {
  913. case 7:
  914. case 6:
  915. for (i = 0; i < 16; i++)
  916. error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
  917. break;
  918. case 5:
  919. case 4:
  920. for (i = 0; i < 16; i++)
  921. error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
  922. break;
  923. case 3:
  924. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  925. for (i = 0; i < 8; i++)
  926. error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
  927. case 2:
  928. for (i = 0; i < 8; i++)
  929. error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
  930. break;
  931. default:
  932. BUG();
  933. }
  934. }
  935. static struct drm_i915_error_object *
  936. i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
  937. struct intel_ring_buffer *ring)
  938. {
  939. struct drm_i915_gem_object *obj;
  940. u32 seqno;
  941. if (!ring->get_seqno)
  942. return NULL;
  943. if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
  944. u32 acthd = I915_READ(ACTHD);
  945. if (WARN_ON(ring->id != RCS))
  946. return NULL;
  947. obj = ring->private;
  948. if (acthd >= obj->gtt_offset &&
  949. acthd < obj->gtt_offset + obj->base.size)
  950. return i915_error_object_create(dev_priv, obj);
  951. }
  952. seqno = ring->get_seqno(ring, false);
  953. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
  954. if (obj->ring != ring)
  955. continue;
  956. if (i915_seqno_passed(seqno, obj->last_read_seqno))
  957. continue;
  958. if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
  959. continue;
  960. /* We need to copy these to an anonymous buffer as the simplest
  961. * method to avoid being overwritten by userspace.
  962. */
  963. return i915_error_object_create(dev_priv, obj);
  964. }
  965. return NULL;
  966. }
  967. static void i915_record_ring_state(struct drm_device *dev,
  968. struct drm_i915_error_state *error,
  969. struct intel_ring_buffer *ring)
  970. {
  971. struct drm_i915_private *dev_priv = dev->dev_private;
  972. if (INTEL_INFO(dev)->gen >= 6) {
  973. error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
  974. error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
  975. error->semaphore_mboxes[ring->id][0]
  976. = I915_READ(RING_SYNC_0(ring->mmio_base));
  977. error->semaphore_mboxes[ring->id][1]
  978. = I915_READ(RING_SYNC_1(ring->mmio_base));
  979. error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
  980. error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
  981. }
  982. if (INTEL_INFO(dev)->gen >= 4) {
  983. error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
  984. error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
  985. error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
  986. error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
  987. error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
  988. if (ring->id == RCS)
  989. error->bbaddr = I915_READ64(BB_ADDR);
  990. } else {
  991. error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
  992. error->ipeir[ring->id] = I915_READ(IPEIR);
  993. error->ipehr[ring->id] = I915_READ(IPEHR);
  994. error->instdone[ring->id] = I915_READ(INSTDONE);
  995. }
  996. error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
  997. error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
  998. error->seqno[ring->id] = ring->get_seqno(ring, false);
  999. error->acthd[ring->id] = intel_ring_get_active_head(ring);
  1000. error->head[ring->id] = I915_READ_HEAD(ring);
  1001. error->tail[ring->id] = I915_READ_TAIL(ring);
  1002. error->cpu_ring_head[ring->id] = ring->head;
  1003. error->cpu_ring_tail[ring->id] = ring->tail;
  1004. }
  1005. static void i915_gem_record_rings(struct drm_device *dev,
  1006. struct drm_i915_error_state *error)
  1007. {
  1008. struct drm_i915_private *dev_priv = dev->dev_private;
  1009. struct intel_ring_buffer *ring;
  1010. struct drm_i915_gem_request *request;
  1011. int i, count;
  1012. for_each_ring(ring, dev_priv, i) {
  1013. i915_record_ring_state(dev, error, ring);
  1014. error->ring[i].batchbuffer =
  1015. i915_error_first_batchbuffer(dev_priv, ring);
  1016. error->ring[i].ringbuffer =
  1017. i915_error_object_create(dev_priv, ring->obj);
  1018. count = 0;
  1019. list_for_each_entry(request, &ring->request_list, list)
  1020. count++;
  1021. error->ring[i].num_requests = count;
  1022. error->ring[i].requests =
  1023. kmalloc(count*sizeof(struct drm_i915_error_request),
  1024. GFP_ATOMIC);
  1025. if (error->ring[i].requests == NULL) {
  1026. error->ring[i].num_requests = 0;
  1027. continue;
  1028. }
  1029. count = 0;
  1030. list_for_each_entry(request, &ring->request_list, list) {
  1031. struct drm_i915_error_request *erq;
  1032. erq = &error->ring[i].requests[count++];
  1033. erq->seqno = request->seqno;
  1034. erq->jiffies = request->emitted_jiffies;
  1035. erq->tail = request->tail;
  1036. }
  1037. }
  1038. }
  1039. /**
  1040. * i915_capture_error_state - capture an error record for later analysis
  1041. * @dev: drm device
  1042. *
  1043. * Should be called when an error is detected (either a hang or an error
  1044. * interrupt) to capture error state from the time of the error. Fills
  1045. * out a structure which becomes available in debugfs for user level tools
  1046. * to pick up.
  1047. */
  1048. static void i915_capture_error_state(struct drm_device *dev)
  1049. {
  1050. struct drm_i915_private *dev_priv = dev->dev_private;
  1051. struct drm_i915_gem_object *obj;
  1052. struct drm_i915_error_state *error;
  1053. unsigned long flags;
  1054. int i, pipe;
  1055. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1056. error = dev_priv->gpu_error.first_error;
  1057. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1058. if (error)
  1059. return;
  1060. /* Account for pipe specific data like PIPE*STAT */
  1061. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  1062. if (!error) {
  1063. DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
  1064. return;
  1065. }
  1066. DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
  1067. dev->primary->index);
  1068. kref_init(&error->ref);
  1069. error->eir = I915_READ(EIR);
  1070. error->pgtbl_er = I915_READ(PGTBL_ER);
  1071. error->ccid = I915_READ(CCID);
  1072. if (HAS_PCH_SPLIT(dev))
  1073. error->ier = I915_READ(DEIER) | I915_READ(GTIER);
  1074. else if (IS_VALLEYVIEW(dev))
  1075. error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
  1076. else if (IS_GEN2(dev))
  1077. error->ier = I915_READ16(IER);
  1078. else
  1079. error->ier = I915_READ(IER);
  1080. for_each_pipe(pipe)
  1081. error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
  1082. if (INTEL_INFO(dev)->gen >= 6) {
  1083. error->error = I915_READ(ERROR_GEN6);
  1084. error->done_reg = I915_READ(DONE_REG);
  1085. }
  1086. if (INTEL_INFO(dev)->gen == 7)
  1087. error->err_int = I915_READ(GEN7_ERR_INT);
  1088. i915_get_extra_instdone(dev, error->extra_instdone);
  1089. i915_gem_record_fences(dev, error);
  1090. i915_gem_record_rings(dev, error);
  1091. /* Record buffers on the active and pinned lists. */
  1092. error->active_bo = NULL;
  1093. error->pinned_bo = NULL;
  1094. i = 0;
  1095. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
  1096. i++;
  1097. error->active_bo_count = i;
  1098. list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
  1099. if (obj->pin_count)
  1100. i++;
  1101. error->pinned_bo_count = i - error->active_bo_count;
  1102. error->active_bo = NULL;
  1103. error->pinned_bo = NULL;
  1104. if (i) {
  1105. error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
  1106. GFP_ATOMIC);
  1107. if (error->active_bo)
  1108. error->pinned_bo =
  1109. error->active_bo + error->active_bo_count;
  1110. }
  1111. if (error->active_bo)
  1112. error->active_bo_count =
  1113. capture_active_bo(error->active_bo,
  1114. error->active_bo_count,
  1115. &dev_priv->mm.active_list);
  1116. if (error->pinned_bo)
  1117. error->pinned_bo_count =
  1118. capture_pinned_bo(error->pinned_bo,
  1119. error->pinned_bo_count,
  1120. &dev_priv->mm.bound_list);
  1121. do_gettimeofday(&error->time);
  1122. error->overlay = intel_overlay_capture_error_state(dev);
  1123. error->display = intel_display_capture_error_state(dev);
  1124. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1125. if (dev_priv->gpu_error.first_error == NULL) {
  1126. dev_priv->gpu_error.first_error = error;
  1127. error = NULL;
  1128. }
  1129. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1130. if (error)
  1131. i915_error_state_free(&error->ref);
  1132. }
  1133. void i915_destroy_error_state(struct drm_device *dev)
  1134. {
  1135. struct drm_i915_private *dev_priv = dev->dev_private;
  1136. struct drm_i915_error_state *error;
  1137. unsigned long flags;
  1138. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1139. error = dev_priv->gpu_error.first_error;
  1140. dev_priv->gpu_error.first_error = NULL;
  1141. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1142. if (error)
  1143. kref_put(&error->ref, i915_error_state_free);
  1144. }
  1145. #else
  1146. #define i915_capture_error_state(x)
  1147. #endif
  1148. static void i915_report_and_clear_eir(struct drm_device *dev)
  1149. {
  1150. struct drm_i915_private *dev_priv = dev->dev_private;
  1151. uint32_t instdone[I915_NUM_INSTDONE_REG];
  1152. u32 eir = I915_READ(EIR);
  1153. int pipe, i;
  1154. if (!eir)
  1155. return;
  1156. pr_err("render error detected, EIR: 0x%08x\n", eir);
  1157. i915_get_extra_instdone(dev, instdone);
  1158. if (IS_G4X(dev)) {
  1159. if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
  1160. u32 ipeir = I915_READ(IPEIR_I965);
  1161. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1162. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1163. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1164. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1165. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1166. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1167. I915_WRITE(IPEIR_I965, ipeir);
  1168. POSTING_READ(IPEIR_I965);
  1169. }
  1170. if (eir & GM45_ERROR_PAGE_TABLE) {
  1171. u32 pgtbl_err = I915_READ(PGTBL_ER);
  1172. pr_err("page table error\n");
  1173. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  1174. I915_WRITE(PGTBL_ER, pgtbl_err);
  1175. POSTING_READ(PGTBL_ER);
  1176. }
  1177. }
  1178. if (!IS_GEN2(dev)) {
  1179. if (eir & I915_ERROR_PAGE_TABLE) {
  1180. u32 pgtbl_err = I915_READ(PGTBL_ER);
  1181. pr_err("page table error\n");
  1182. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  1183. I915_WRITE(PGTBL_ER, pgtbl_err);
  1184. POSTING_READ(PGTBL_ER);
  1185. }
  1186. }
  1187. if (eir & I915_ERROR_MEMORY_REFRESH) {
  1188. pr_err("memory refresh error:\n");
  1189. for_each_pipe(pipe)
  1190. pr_err("pipe %c stat: 0x%08x\n",
  1191. pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
  1192. /* pipestat has already been acked */
  1193. }
  1194. if (eir & I915_ERROR_INSTRUCTION) {
  1195. pr_err("instruction error\n");
  1196. pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
  1197. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1198. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1199. if (INTEL_INFO(dev)->gen < 4) {
  1200. u32 ipeir = I915_READ(IPEIR);
  1201. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
  1202. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
  1203. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
  1204. I915_WRITE(IPEIR, ipeir);
  1205. POSTING_READ(IPEIR);
  1206. } else {
  1207. u32 ipeir = I915_READ(IPEIR_I965);
  1208. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1209. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1210. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1211. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1212. I915_WRITE(IPEIR_I965, ipeir);
  1213. POSTING_READ(IPEIR_I965);
  1214. }
  1215. }
  1216. I915_WRITE(EIR, eir);
  1217. POSTING_READ(EIR);
  1218. eir = I915_READ(EIR);
  1219. if (eir) {
  1220. /*
  1221. * some errors might have become stuck,
  1222. * mask them.
  1223. */
  1224. DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
  1225. I915_WRITE(EMR, I915_READ(EMR) | eir);
  1226. I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1227. }
  1228. }
  1229. /**
  1230. * i915_handle_error - handle an error interrupt
  1231. * @dev: drm device
  1232. *
  1233. * Do some basic checking of regsiter state at error interrupt time and
  1234. * dump it to the syslog. Also call i915_capture_error_state() to make
  1235. * sure we get a record and make it available in debugfs. Fire a uevent
  1236. * so userspace knows something bad happened (should trigger collection
  1237. * of a ring dump etc.).
  1238. */
  1239. void i915_handle_error(struct drm_device *dev, bool wedged)
  1240. {
  1241. struct drm_i915_private *dev_priv = dev->dev_private;
  1242. struct intel_ring_buffer *ring;
  1243. int i;
  1244. i915_capture_error_state(dev);
  1245. i915_report_and_clear_eir(dev);
  1246. if (wedged) {
  1247. atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
  1248. &dev_priv->gpu_error.reset_counter);
  1249. /*
  1250. * Wakeup waiting processes so that the reset work item
  1251. * doesn't deadlock trying to grab various locks.
  1252. */
  1253. for_each_ring(ring, dev_priv, i)
  1254. wake_up_all(&ring->irq_queue);
  1255. }
  1256. queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
  1257. }
  1258. static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
  1259. {
  1260. drm_i915_private_t *dev_priv = dev->dev_private;
  1261. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  1262. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1263. struct drm_i915_gem_object *obj;
  1264. struct intel_unpin_work *work;
  1265. unsigned long flags;
  1266. bool stall_detected;
  1267. /* Ignore early vblank irqs */
  1268. if (intel_crtc == NULL)
  1269. return;
  1270. spin_lock_irqsave(&dev->event_lock, flags);
  1271. work = intel_crtc->unpin_work;
  1272. if (work == NULL ||
  1273. atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
  1274. !work->enable_stall_check) {
  1275. /* Either the pending flip IRQ arrived, or we're too early. Don't check */
  1276. spin_unlock_irqrestore(&dev->event_lock, flags);
  1277. return;
  1278. }
  1279. /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
  1280. obj = work->pending_flip_obj;
  1281. if (INTEL_INFO(dev)->gen >= 4) {
  1282. int dspsurf = DSPSURF(intel_crtc->plane);
  1283. stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
  1284. obj->gtt_offset;
  1285. } else {
  1286. int dspaddr = DSPADDR(intel_crtc->plane);
  1287. stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
  1288. crtc->y * crtc->fb->pitches[0] +
  1289. crtc->x * crtc->fb->bits_per_pixel/8);
  1290. }
  1291. spin_unlock_irqrestore(&dev->event_lock, flags);
  1292. if (stall_detected) {
  1293. DRM_DEBUG_DRIVER("Pageflip stall detected\n");
  1294. intel_prepare_page_flip(dev, intel_crtc->plane);
  1295. }
  1296. }
  1297. /* Called from drm generic code, passed 'crtc' which
  1298. * we use as a pipe index
  1299. */
  1300. static int i915_enable_vblank(struct drm_device *dev, int pipe)
  1301. {
  1302. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1303. unsigned long irqflags;
  1304. if (!i915_pipe_enabled(dev, pipe))
  1305. return -EINVAL;
  1306. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1307. if (INTEL_INFO(dev)->gen >= 4)
  1308. i915_enable_pipestat(dev_priv, pipe,
  1309. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1310. else
  1311. i915_enable_pipestat(dev_priv, pipe,
  1312. PIPE_VBLANK_INTERRUPT_ENABLE);
  1313. /* maintain vblank delivery even in deep C-states */
  1314. if (dev_priv->info->gen == 3)
  1315. I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
  1316. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1317. return 0;
  1318. }
  1319. static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
  1320. {
  1321. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1322. unsigned long irqflags;
  1323. if (!i915_pipe_enabled(dev, pipe))
  1324. return -EINVAL;
  1325. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1326. ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
  1327. DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
  1328. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1329. return 0;
  1330. }
  1331. static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
  1332. {
  1333. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1334. unsigned long irqflags;
  1335. if (!i915_pipe_enabled(dev, pipe))
  1336. return -EINVAL;
  1337. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1338. ironlake_enable_display_irq(dev_priv,
  1339. DE_PIPEA_VBLANK_IVB << (5 * pipe));
  1340. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1341. return 0;
  1342. }
  1343. static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
  1344. {
  1345. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1346. unsigned long irqflags;
  1347. u32 imr;
  1348. if (!i915_pipe_enabled(dev, pipe))
  1349. return -EINVAL;
  1350. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1351. imr = I915_READ(VLV_IMR);
  1352. if (pipe == 0)
  1353. imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1354. else
  1355. imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1356. I915_WRITE(VLV_IMR, imr);
  1357. i915_enable_pipestat(dev_priv, pipe,
  1358. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1359. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1360. return 0;
  1361. }
  1362. /* Called from drm generic code, passed 'crtc' which
  1363. * we use as a pipe index
  1364. */
  1365. static void i915_disable_vblank(struct drm_device *dev, int pipe)
  1366. {
  1367. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1368. unsigned long irqflags;
  1369. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1370. if (dev_priv->info->gen == 3)
  1371. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
  1372. i915_disable_pipestat(dev_priv, pipe,
  1373. PIPE_VBLANK_INTERRUPT_ENABLE |
  1374. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1375. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1376. }
  1377. static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
  1378. {
  1379. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1380. unsigned long irqflags;
  1381. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1382. ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
  1383. DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
  1384. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1385. }
  1386. static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
  1387. {
  1388. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1389. unsigned long irqflags;
  1390. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1391. ironlake_disable_display_irq(dev_priv,
  1392. DE_PIPEA_VBLANK_IVB << (pipe * 5));
  1393. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1394. }
  1395. static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
  1396. {
  1397. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1398. unsigned long irqflags;
  1399. u32 imr;
  1400. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1401. i915_disable_pipestat(dev_priv, pipe,
  1402. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1403. imr = I915_READ(VLV_IMR);
  1404. if (pipe == 0)
  1405. imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1406. else
  1407. imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1408. I915_WRITE(VLV_IMR, imr);
  1409. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1410. }
  1411. static u32
  1412. ring_last_seqno(struct intel_ring_buffer *ring)
  1413. {
  1414. return list_entry(ring->request_list.prev,
  1415. struct drm_i915_gem_request, list)->seqno;
  1416. }
  1417. static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
  1418. {
  1419. if (list_empty(&ring->request_list) ||
  1420. i915_seqno_passed(ring->get_seqno(ring, false),
  1421. ring_last_seqno(ring))) {
  1422. /* Issue a wake-up to catch stuck h/w. */
  1423. if (waitqueue_active(&ring->irq_queue)) {
  1424. DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
  1425. ring->name);
  1426. wake_up_all(&ring->irq_queue);
  1427. *err = true;
  1428. }
  1429. return true;
  1430. }
  1431. return false;
  1432. }
  1433. static bool kick_ring(struct intel_ring_buffer *ring)
  1434. {
  1435. struct drm_device *dev = ring->dev;
  1436. struct drm_i915_private *dev_priv = dev->dev_private;
  1437. u32 tmp = I915_READ_CTL(ring);
  1438. if (tmp & RING_WAIT) {
  1439. DRM_ERROR("Kicking stuck wait on %s\n",
  1440. ring->name);
  1441. I915_WRITE_CTL(ring, tmp);
  1442. return true;
  1443. }
  1444. return false;
  1445. }
  1446. static bool i915_hangcheck_hung(struct drm_device *dev)
  1447. {
  1448. drm_i915_private_t *dev_priv = dev->dev_private;
  1449. if (dev_priv->gpu_error.hangcheck_count++ > 1) {
  1450. bool hung = true;
  1451. DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
  1452. i915_handle_error(dev, true);
  1453. if (!IS_GEN2(dev)) {
  1454. struct intel_ring_buffer *ring;
  1455. int i;
  1456. /* Is the chip hanging on a WAIT_FOR_EVENT?
  1457. * If so we can simply poke the RB_WAIT bit
  1458. * and break the hang. This should work on
  1459. * all but the second generation chipsets.
  1460. */
  1461. for_each_ring(ring, dev_priv, i)
  1462. hung &= !kick_ring(ring);
  1463. }
  1464. return hung;
  1465. }
  1466. return false;
  1467. }
  1468. /**
  1469. * This is called when the chip hasn't reported back with completed
  1470. * batchbuffers in a long time. The first time this is called we simply record
  1471. * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
  1472. * again, we assume the chip is wedged and try to fix it.
  1473. */
  1474. void i915_hangcheck_elapsed(unsigned long data)
  1475. {
  1476. struct drm_device *dev = (struct drm_device *)data;
  1477. drm_i915_private_t *dev_priv = dev->dev_private;
  1478. uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
  1479. struct intel_ring_buffer *ring;
  1480. bool err = false, idle;
  1481. int i;
  1482. if (!i915_enable_hangcheck)
  1483. return;
  1484. memset(acthd, 0, sizeof(acthd));
  1485. idle = true;
  1486. for_each_ring(ring, dev_priv, i) {
  1487. idle &= i915_hangcheck_ring_idle(ring, &err);
  1488. acthd[i] = intel_ring_get_active_head(ring);
  1489. }
  1490. /* If all work is done then ACTHD clearly hasn't advanced. */
  1491. if (idle) {
  1492. if (err) {
  1493. if (i915_hangcheck_hung(dev))
  1494. return;
  1495. goto repeat;
  1496. }
  1497. dev_priv->gpu_error.hangcheck_count = 0;
  1498. return;
  1499. }
  1500. i915_get_extra_instdone(dev, instdone);
  1501. if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
  1502. sizeof(acthd)) == 0 &&
  1503. memcmp(dev_priv->gpu_error.prev_instdone, instdone,
  1504. sizeof(instdone)) == 0) {
  1505. if (i915_hangcheck_hung(dev))
  1506. return;
  1507. } else {
  1508. dev_priv->gpu_error.hangcheck_count = 0;
  1509. memcpy(dev_priv->gpu_error.last_acthd, acthd,
  1510. sizeof(acthd));
  1511. memcpy(dev_priv->gpu_error.prev_instdone, instdone,
  1512. sizeof(instdone));
  1513. }
  1514. repeat:
  1515. /* Reset timer case chip hangs without another request being added */
  1516. mod_timer(&dev_priv->gpu_error.hangcheck_timer,
  1517. round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  1518. }
  1519. /* drm_dma.h hooks
  1520. */
  1521. static void ironlake_irq_preinstall(struct drm_device *dev)
  1522. {
  1523. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1524. atomic_set(&dev_priv->irq_received, 0);
  1525. I915_WRITE(HWSTAM, 0xeffe);
  1526. /* XXX hotplug from PCH */
  1527. I915_WRITE(DEIMR, 0xffffffff);
  1528. I915_WRITE(DEIER, 0x0);
  1529. POSTING_READ(DEIER);
  1530. /* and GT */
  1531. I915_WRITE(GTIMR, 0xffffffff);
  1532. I915_WRITE(GTIER, 0x0);
  1533. POSTING_READ(GTIER);
  1534. /* south display irq */
  1535. I915_WRITE(SDEIMR, 0xffffffff);
  1536. I915_WRITE(SDEIER, 0x0);
  1537. POSTING_READ(SDEIER);
  1538. }
  1539. static void valleyview_irq_preinstall(struct drm_device *dev)
  1540. {
  1541. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1542. int pipe;
  1543. atomic_set(&dev_priv->irq_received, 0);
  1544. /* VLV magic */
  1545. I915_WRITE(VLV_IMR, 0);
  1546. I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
  1547. I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
  1548. I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
  1549. /* and GT */
  1550. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1551. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1552. I915_WRITE(GTIMR, 0xffffffff);
  1553. I915_WRITE(GTIER, 0x0);
  1554. POSTING_READ(GTIER);
  1555. I915_WRITE(DPINVGTT, 0xff);
  1556. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1557. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1558. for_each_pipe(pipe)
  1559. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1560. I915_WRITE(VLV_IIR, 0xffffffff);
  1561. I915_WRITE(VLV_IMR, 0xffffffff);
  1562. I915_WRITE(VLV_IER, 0x0);
  1563. POSTING_READ(VLV_IER);
  1564. }
  1565. /*
  1566. * Enable digital hotplug on the PCH, and configure the DP short pulse
  1567. * duration to 2ms (which is the minimum in the Display Port spec)
  1568. *
  1569. * This register is the same on all known PCH chips.
  1570. */
  1571. static void ironlake_enable_pch_hotplug(struct drm_device *dev)
  1572. {
  1573. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1574. u32 hotplug;
  1575. hotplug = I915_READ(PCH_PORT_HOTPLUG);
  1576. hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
  1577. hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
  1578. hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
  1579. hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
  1580. I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  1581. }
  1582. static int ironlake_irq_postinstall(struct drm_device *dev)
  1583. {
  1584. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1585. /* enable kind of interrupts always enabled */
  1586. u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  1587. DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
  1588. DE_AUX_CHANNEL_A;
  1589. u32 render_irqs;
  1590. u32 hotplug_mask;
  1591. u32 pch_irq_mask;
  1592. dev_priv->irq_mask = ~display_mask;
  1593. /* should always can generate irq */
  1594. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1595. I915_WRITE(DEIMR, dev_priv->irq_mask);
  1596. I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
  1597. POSTING_READ(DEIER);
  1598. dev_priv->gt_irq_mask = ~0;
  1599. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1600. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  1601. if (IS_GEN6(dev))
  1602. render_irqs =
  1603. GT_USER_INTERRUPT |
  1604. GEN6_BSD_USER_INTERRUPT |
  1605. GEN6_BLITTER_USER_INTERRUPT;
  1606. else
  1607. render_irqs =
  1608. GT_USER_INTERRUPT |
  1609. GT_PIPE_NOTIFY |
  1610. GT_BSD_USER_INTERRUPT;
  1611. I915_WRITE(GTIER, render_irqs);
  1612. POSTING_READ(GTIER);
  1613. if (HAS_PCH_CPT(dev)) {
  1614. hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
  1615. SDE_PORTB_HOTPLUG_CPT |
  1616. SDE_PORTC_HOTPLUG_CPT |
  1617. SDE_PORTD_HOTPLUG_CPT |
  1618. SDE_GMBUS_CPT |
  1619. SDE_AUX_MASK_CPT);
  1620. } else {
  1621. hotplug_mask = (SDE_CRT_HOTPLUG |
  1622. SDE_PORTB_HOTPLUG |
  1623. SDE_PORTC_HOTPLUG |
  1624. SDE_PORTD_HOTPLUG |
  1625. SDE_GMBUS |
  1626. SDE_AUX_MASK);
  1627. }
  1628. pch_irq_mask = ~hotplug_mask;
  1629. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  1630. I915_WRITE(SDEIMR, pch_irq_mask);
  1631. I915_WRITE(SDEIER, hotplug_mask);
  1632. POSTING_READ(SDEIER);
  1633. ironlake_enable_pch_hotplug(dev);
  1634. if (IS_IRONLAKE_M(dev)) {
  1635. /* Clear & enable PCU event interrupts */
  1636. I915_WRITE(DEIIR, DE_PCU_EVENT);
  1637. I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
  1638. ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
  1639. }
  1640. return 0;
  1641. }
  1642. static int ivybridge_irq_postinstall(struct drm_device *dev)
  1643. {
  1644. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1645. /* enable kind of interrupts always enabled */
  1646. u32 display_mask =
  1647. DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
  1648. DE_PLANEC_FLIP_DONE_IVB |
  1649. DE_PLANEB_FLIP_DONE_IVB |
  1650. DE_PLANEA_FLIP_DONE_IVB |
  1651. DE_AUX_CHANNEL_A_IVB;
  1652. u32 render_irqs;
  1653. u32 hotplug_mask;
  1654. u32 pch_irq_mask;
  1655. dev_priv->irq_mask = ~display_mask;
  1656. /* should always can generate irq */
  1657. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1658. I915_WRITE(DEIMR, dev_priv->irq_mask);
  1659. I915_WRITE(DEIER,
  1660. display_mask |
  1661. DE_PIPEC_VBLANK_IVB |
  1662. DE_PIPEB_VBLANK_IVB |
  1663. DE_PIPEA_VBLANK_IVB);
  1664. POSTING_READ(DEIER);
  1665. dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  1666. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1667. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  1668. render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
  1669. GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  1670. I915_WRITE(GTIER, render_irqs);
  1671. POSTING_READ(GTIER);
  1672. hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
  1673. SDE_PORTB_HOTPLUG_CPT |
  1674. SDE_PORTC_HOTPLUG_CPT |
  1675. SDE_PORTD_HOTPLUG_CPT |
  1676. SDE_GMBUS_CPT |
  1677. SDE_AUX_MASK_CPT);
  1678. pch_irq_mask = ~hotplug_mask;
  1679. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  1680. I915_WRITE(SDEIMR, pch_irq_mask);
  1681. I915_WRITE(SDEIER, hotplug_mask);
  1682. POSTING_READ(SDEIER);
  1683. ironlake_enable_pch_hotplug(dev);
  1684. return 0;
  1685. }
  1686. static int valleyview_irq_postinstall(struct drm_device *dev)
  1687. {
  1688. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1689. u32 enable_mask;
  1690. u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
  1691. u32 render_irqs;
  1692. u16 msid;
  1693. enable_mask = I915_DISPLAY_PORT_INTERRUPT;
  1694. enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1695. I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  1696. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1697. I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1698. /*
  1699. *Leave vblank interrupts masked initially. enable/disable will
  1700. * toggle them based on usage.
  1701. */
  1702. dev_priv->irq_mask = (~enable_mask) |
  1703. I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  1704. I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1705. dev_priv->pipestat[0] = 0;
  1706. dev_priv->pipestat[1] = 0;
  1707. /* Hack for broken MSIs on VLV */
  1708. pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
  1709. pci_read_config_word(dev->pdev, 0x98, &msid);
  1710. msid &= 0xff; /* mask out delivery bits */
  1711. msid |= (1<<14);
  1712. pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
  1713. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1714. POSTING_READ(PORT_HOTPLUG_EN);
  1715. I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  1716. I915_WRITE(VLV_IER, enable_mask);
  1717. I915_WRITE(VLV_IIR, 0xffffffff);
  1718. I915_WRITE(PIPESTAT(0), 0xffff);
  1719. I915_WRITE(PIPESTAT(1), 0xffff);
  1720. POSTING_READ(VLV_IER);
  1721. i915_enable_pipestat(dev_priv, 0, pipestat_enable);
  1722. i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  1723. i915_enable_pipestat(dev_priv, 1, pipestat_enable);
  1724. I915_WRITE(VLV_IIR, 0xffffffff);
  1725. I915_WRITE(VLV_IIR, 0xffffffff);
  1726. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1727. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  1728. render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
  1729. GEN6_BLITTER_USER_INTERRUPT;
  1730. I915_WRITE(GTIER, render_irqs);
  1731. POSTING_READ(GTIER);
  1732. /* ack & enable invalid PTE error interrupts */
  1733. #if 0 /* FIXME: add support to irq handler for checking these bits */
  1734. I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  1735. I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
  1736. #endif
  1737. I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  1738. return 0;
  1739. }
  1740. static void valleyview_hpd_irq_setup(struct drm_device *dev)
  1741. {
  1742. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1743. u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  1744. /* Note HDMI and DP share bits */
  1745. if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
  1746. hotplug_en |= HDMIB_HOTPLUG_INT_EN;
  1747. if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
  1748. hotplug_en |= HDMIC_HOTPLUG_INT_EN;
  1749. if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
  1750. hotplug_en |= HDMID_HOTPLUG_INT_EN;
  1751. if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
  1752. hotplug_en |= SDVOC_HOTPLUG_INT_EN;
  1753. if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
  1754. hotplug_en |= SDVOB_HOTPLUG_INT_EN;
  1755. if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
  1756. hotplug_en |= CRT_HOTPLUG_INT_EN;
  1757. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  1758. }
  1759. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  1760. }
  1761. static void valleyview_irq_uninstall(struct drm_device *dev)
  1762. {
  1763. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1764. int pipe;
  1765. if (!dev_priv)
  1766. return;
  1767. for_each_pipe(pipe)
  1768. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1769. I915_WRITE(HWSTAM, 0xffffffff);
  1770. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1771. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1772. for_each_pipe(pipe)
  1773. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1774. I915_WRITE(VLV_IIR, 0xffffffff);
  1775. I915_WRITE(VLV_IMR, 0xffffffff);
  1776. I915_WRITE(VLV_IER, 0x0);
  1777. POSTING_READ(VLV_IER);
  1778. }
  1779. static void ironlake_irq_uninstall(struct drm_device *dev)
  1780. {
  1781. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1782. if (!dev_priv)
  1783. return;
  1784. I915_WRITE(HWSTAM, 0xffffffff);
  1785. I915_WRITE(DEIMR, 0xffffffff);
  1786. I915_WRITE(DEIER, 0x0);
  1787. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1788. I915_WRITE(GTIMR, 0xffffffff);
  1789. I915_WRITE(GTIER, 0x0);
  1790. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1791. I915_WRITE(SDEIMR, 0xffffffff);
  1792. I915_WRITE(SDEIER, 0x0);
  1793. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  1794. }
  1795. static void i8xx_irq_preinstall(struct drm_device * dev)
  1796. {
  1797. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1798. int pipe;
  1799. atomic_set(&dev_priv->irq_received, 0);
  1800. for_each_pipe(pipe)
  1801. I915_WRITE(PIPESTAT(pipe), 0);
  1802. I915_WRITE16(IMR, 0xffff);
  1803. I915_WRITE16(IER, 0x0);
  1804. POSTING_READ16(IER);
  1805. }
  1806. static int i8xx_irq_postinstall(struct drm_device *dev)
  1807. {
  1808. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1809. dev_priv->pipestat[0] = 0;
  1810. dev_priv->pipestat[1] = 0;
  1811. I915_WRITE16(EMR,
  1812. ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  1813. /* Unmask the interrupts that we always want on. */
  1814. dev_priv->irq_mask =
  1815. ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1816. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1817. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  1818. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  1819. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1820. I915_WRITE16(IMR, dev_priv->irq_mask);
  1821. I915_WRITE16(IER,
  1822. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1823. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1824. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  1825. I915_USER_INTERRUPT);
  1826. POSTING_READ16(IER);
  1827. return 0;
  1828. }
  1829. static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  1830. {
  1831. struct drm_device *dev = (struct drm_device *) arg;
  1832. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1833. u16 iir, new_iir;
  1834. u32 pipe_stats[2];
  1835. unsigned long irqflags;
  1836. int irq_received;
  1837. int pipe;
  1838. u16 flip_mask =
  1839. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  1840. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  1841. atomic_inc(&dev_priv->irq_received);
  1842. iir = I915_READ16(IIR);
  1843. if (iir == 0)
  1844. return IRQ_NONE;
  1845. while (iir & ~flip_mask) {
  1846. /* Can't rely on pipestat interrupt bit in iir as it might
  1847. * have been cleared after the pipestat interrupt was received.
  1848. * It doesn't set the bit in iir again, but it still produces
  1849. * interrupts (for non-MSI).
  1850. */
  1851. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1852. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  1853. i915_handle_error(dev, false);
  1854. for_each_pipe(pipe) {
  1855. int reg = PIPESTAT(pipe);
  1856. pipe_stats[pipe] = I915_READ(reg);
  1857. /*
  1858. * Clear the PIPE*STAT regs before the IIR
  1859. */
  1860. if (pipe_stats[pipe] & 0x8000ffff) {
  1861. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  1862. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  1863. pipe_name(pipe));
  1864. I915_WRITE(reg, pipe_stats[pipe]);
  1865. irq_received = 1;
  1866. }
  1867. }
  1868. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1869. I915_WRITE16(IIR, iir & ~flip_mask);
  1870. new_iir = I915_READ16(IIR); /* Flush posted writes */
  1871. i915_update_dri1_breadcrumb(dev);
  1872. if (iir & I915_USER_INTERRUPT)
  1873. notify_ring(dev, &dev_priv->ring[RCS]);
  1874. if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
  1875. drm_handle_vblank(dev, 0)) {
  1876. if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
  1877. intel_prepare_page_flip(dev, 0);
  1878. intel_finish_page_flip(dev, 0);
  1879. flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
  1880. }
  1881. }
  1882. if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
  1883. drm_handle_vblank(dev, 1)) {
  1884. if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
  1885. intel_prepare_page_flip(dev, 1);
  1886. intel_finish_page_flip(dev, 1);
  1887. flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  1888. }
  1889. }
  1890. iir = new_iir;
  1891. }
  1892. return IRQ_HANDLED;
  1893. }
  1894. static void i8xx_irq_uninstall(struct drm_device * dev)
  1895. {
  1896. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1897. int pipe;
  1898. for_each_pipe(pipe) {
  1899. /* Clear enable bits; then clear status bits */
  1900. I915_WRITE(PIPESTAT(pipe), 0);
  1901. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  1902. }
  1903. I915_WRITE16(IMR, 0xffff);
  1904. I915_WRITE16(IER, 0x0);
  1905. I915_WRITE16(IIR, I915_READ16(IIR));
  1906. }
  1907. static void i915_irq_preinstall(struct drm_device * dev)
  1908. {
  1909. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1910. int pipe;
  1911. atomic_set(&dev_priv->irq_received, 0);
  1912. if (I915_HAS_HOTPLUG(dev)) {
  1913. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1914. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1915. }
  1916. I915_WRITE16(HWSTAM, 0xeffe);
  1917. for_each_pipe(pipe)
  1918. I915_WRITE(PIPESTAT(pipe), 0);
  1919. I915_WRITE(IMR, 0xffffffff);
  1920. I915_WRITE(IER, 0x0);
  1921. POSTING_READ(IER);
  1922. }
  1923. static int i915_irq_postinstall(struct drm_device *dev)
  1924. {
  1925. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1926. u32 enable_mask;
  1927. dev_priv->pipestat[0] = 0;
  1928. dev_priv->pipestat[1] = 0;
  1929. I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  1930. /* Unmask the interrupts that we always want on. */
  1931. dev_priv->irq_mask =
  1932. ~(I915_ASLE_INTERRUPT |
  1933. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1934. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1935. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  1936. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  1937. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1938. enable_mask =
  1939. I915_ASLE_INTERRUPT |
  1940. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1941. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1942. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  1943. I915_USER_INTERRUPT;
  1944. if (I915_HAS_HOTPLUG(dev)) {
  1945. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1946. POSTING_READ(PORT_HOTPLUG_EN);
  1947. /* Enable in IER... */
  1948. enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  1949. /* and unmask in IMR */
  1950. dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  1951. }
  1952. I915_WRITE(IMR, dev_priv->irq_mask);
  1953. I915_WRITE(IER, enable_mask);
  1954. POSTING_READ(IER);
  1955. intel_opregion_enable_asle(dev);
  1956. return 0;
  1957. }
  1958. static void i915_hpd_irq_setup(struct drm_device *dev)
  1959. {
  1960. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1961. u32 hotplug_en;
  1962. if (I915_HAS_HOTPLUG(dev)) {
  1963. hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  1964. if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
  1965. hotplug_en |= HDMIB_HOTPLUG_INT_EN;
  1966. if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
  1967. hotplug_en |= HDMIC_HOTPLUG_INT_EN;
  1968. if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
  1969. hotplug_en |= HDMID_HOTPLUG_INT_EN;
  1970. if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
  1971. hotplug_en |= SDVOC_HOTPLUG_INT_EN;
  1972. if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
  1973. hotplug_en |= SDVOB_HOTPLUG_INT_EN;
  1974. if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
  1975. hotplug_en |= CRT_HOTPLUG_INT_EN;
  1976. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  1977. }
  1978. /* Ignore TV since it's buggy */
  1979. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  1980. }
  1981. }
  1982. static irqreturn_t i915_irq_handler(int irq, void *arg)
  1983. {
  1984. struct drm_device *dev = (struct drm_device *) arg;
  1985. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1986. u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
  1987. unsigned long irqflags;
  1988. u32 flip_mask =
  1989. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  1990. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  1991. u32 flip[2] = {
  1992. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
  1993. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
  1994. };
  1995. int pipe, ret = IRQ_NONE;
  1996. atomic_inc(&dev_priv->irq_received);
  1997. iir = I915_READ(IIR);
  1998. do {
  1999. bool irq_received = (iir & ~flip_mask) != 0;
  2000. bool blc_event = false;
  2001. /* Can't rely on pipestat interrupt bit in iir as it might
  2002. * have been cleared after the pipestat interrupt was received.
  2003. * It doesn't set the bit in iir again, but it still produces
  2004. * interrupts (for non-MSI).
  2005. */
  2006. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2007. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2008. i915_handle_error(dev, false);
  2009. for_each_pipe(pipe) {
  2010. int reg = PIPESTAT(pipe);
  2011. pipe_stats[pipe] = I915_READ(reg);
  2012. /* Clear the PIPE*STAT regs before the IIR */
  2013. if (pipe_stats[pipe] & 0x8000ffff) {
  2014. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2015. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2016. pipe_name(pipe));
  2017. I915_WRITE(reg, pipe_stats[pipe]);
  2018. irq_received = true;
  2019. }
  2020. }
  2021. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2022. if (!irq_received)
  2023. break;
  2024. /* Consume port. Then clear IIR or we'll miss events */
  2025. if ((I915_HAS_HOTPLUG(dev)) &&
  2026. (iir & I915_DISPLAY_PORT_INTERRUPT)) {
  2027. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2028. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2029. hotplug_status);
  2030. if (hotplug_status & dev_priv->hotplug_supported_mask)
  2031. queue_work(dev_priv->wq,
  2032. &dev_priv->hotplug_work);
  2033. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2034. POSTING_READ(PORT_HOTPLUG_STAT);
  2035. }
  2036. I915_WRITE(IIR, iir & ~flip_mask);
  2037. new_iir = I915_READ(IIR); /* Flush posted writes */
  2038. if (iir & I915_USER_INTERRUPT)
  2039. notify_ring(dev, &dev_priv->ring[RCS]);
  2040. for_each_pipe(pipe) {
  2041. int plane = pipe;
  2042. if (IS_MOBILE(dev))
  2043. plane = !plane;
  2044. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2045. drm_handle_vblank(dev, pipe)) {
  2046. if (iir & flip[plane]) {
  2047. intel_prepare_page_flip(dev, plane);
  2048. intel_finish_page_flip(dev, pipe);
  2049. flip_mask &= ~flip[plane];
  2050. }
  2051. }
  2052. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  2053. blc_event = true;
  2054. }
  2055. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  2056. intel_opregion_asle_intr(dev);
  2057. /* With MSI, interrupts are only generated when iir
  2058. * transitions from zero to nonzero. If another bit got
  2059. * set while we were handling the existing iir bits, then
  2060. * we would never get another interrupt.
  2061. *
  2062. * This is fine on non-MSI as well, as if we hit this path
  2063. * we avoid exiting the interrupt handler only to generate
  2064. * another one.
  2065. *
  2066. * Note that for MSI this could cause a stray interrupt report
  2067. * if an interrupt landed in the time between writing IIR and
  2068. * the posting read. This should be rare enough to never
  2069. * trigger the 99% of 100,000 interrupts test for disabling
  2070. * stray interrupts.
  2071. */
  2072. ret = IRQ_HANDLED;
  2073. iir = new_iir;
  2074. } while (iir & ~flip_mask);
  2075. i915_update_dri1_breadcrumb(dev);
  2076. return ret;
  2077. }
  2078. static void i915_irq_uninstall(struct drm_device * dev)
  2079. {
  2080. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2081. int pipe;
  2082. if (I915_HAS_HOTPLUG(dev)) {
  2083. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2084. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2085. }
  2086. I915_WRITE16(HWSTAM, 0xffff);
  2087. for_each_pipe(pipe) {
  2088. /* Clear enable bits; then clear status bits */
  2089. I915_WRITE(PIPESTAT(pipe), 0);
  2090. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  2091. }
  2092. I915_WRITE(IMR, 0xffffffff);
  2093. I915_WRITE(IER, 0x0);
  2094. I915_WRITE(IIR, I915_READ(IIR));
  2095. }
  2096. static void i965_irq_preinstall(struct drm_device * dev)
  2097. {
  2098. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2099. int pipe;
  2100. atomic_set(&dev_priv->irq_received, 0);
  2101. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2102. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2103. I915_WRITE(HWSTAM, 0xeffe);
  2104. for_each_pipe(pipe)
  2105. I915_WRITE(PIPESTAT(pipe), 0);
  2106. I915_WRITE(IMR, 0xffffffff);
  2107. I915_WRITE(IER, 0x0);
  2108. POSTING_READ(IER);
  2109. }
  2110. static int i965_irq_postinstall(struct drm_device *dev)
  2111. {
  2112. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2113. u32 enable_mask;
  2114. u32 error_mask;
  2115. /* Unmask the interrupts that we always want on. */
  2116. dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
  2117. I915_DISPLAY_PORT_INTERRUPT |
  2118. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2119. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2120. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2121. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2122. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2123. enable_mask = ~dev_priv->irq_mask;
  2124. enable_mask |= I915_USER_INTERRUPT;
  2125. if (IS_G4X(dev))
  2126. enable_mask |= I915_BSD_USER_INTERRUPT;
  2127. dev_priv->pipestat[0] = 0;
  2128. dev_priv->pipestat[1] = 0;
  2129. i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  2130. /*
  2131. * Enable some error detection, note the instruction error mask
  2132. * bit is reserved, so we leave it masked.
  2133. */
  2134. if (IS_G4X(dev)) {
  2135. error_mask = ~(GM45_ERROR_PAGE_TABLE |
  2136. GM45_ERROR_MEM_PRIV |
  2137. GM45_ERROR_CP_PRIV |
  2138. I915_ERROR_MEMORY_REFRESH);
  2139. } else {
  2140. error_mask = ~(I915_ERROR_PAGE_TABLE |
  2141. I915_ERROR_MEMORY_REFRESH);
  2142. }
  2143. I915_WRITE(EMR, error_mask);
  2144. I915_WRITE(IMR, dev_priv->irq_mask);
  2145. I915_WRITE(IER, enable_mask);
  2146. POSTING_READ(IER);
  2147. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2148. POSTING_READ(PORT_HOTPLUG_EN);
  2149. intel_opregion_enable_asle(dev);
  2150. return 0;
  2151. }
  2152. static void i965_hpd_irq_setup(struct drm_device *dev)
  2153. {
  2154. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2155. u32 hotplug_en;
  2156. /* Note HDMI and DP share hotplug bits */
  2157. hotplug_en = 0;
  2158. if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
  2159. hotplug_en |= HDMIB_HOTPLUG_INT_EN;
  2160. if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
  2161. hotplug_en |= HDMIC_HOTPLUG_INT_EN;
  2162. if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
  2163. hotplug_en |= HDMID_HOTPLUG_INT_EN;
  2164. if (IS_G4X(dev)) {
  2165. if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
  2166. hotplug_en |= SDVOC_HOTPLUG_INT_EN;
  2167. if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
  2168. hotplug_en |= SDVOB_HOTPLUG_INT_EN;
  2169. } else {
  2170. if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
  2171. hotplug_en |= SDVOC_HOTPLUG_INT_EN;
  2172. if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
  2173. hotplug_en |= SDVOB_HOTPLUG_INT_EN;
  2174. }
  2175. if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
  2176. hotplug_en |= CRT_HOTPLUG_INT_EN;
  2177. /* Programming the CRT detection parameters tends
  2178. to generate a spurious hotplug event about three
  2179. seconds later. So just do it once.
  2180. */
  2181. if (IS_G4X(dev))
  2182. hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  2183. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  2184. }
  2185. /* Ignore TV since it's buggy */
  2186. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  2187. }
  2188. static irqreturn_t i965_irq_handler(int irq, void *arg)
  2189. {
  2190. struct drm_device *dev = (struct drm_device *) arg;
  2191. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2192. u32 iir, new_iir;
  2193. u32 pipe_stats[I915_MAX_PIPES];
  2194. unsigned long irqflags;
  2195. int irq_received;
  2196. int ret = IRQ_NONE, pipe;
  2197. atomic_inc(&dev_priv->irq_received);
  2198. iir = I915_READ(IIR);
  2199. for (;;) {
  2200. bool blc_event = false;
  2201. irq_received = iir != 0;
  2202. /* Can't rely on pipestat interrupt bit in iir as it might
  2203. * have been cleared after the pipestat interrupt was received.
  2204. * It doesn't set the bit in iir again, but it still produces
  2205. * interrupts (for non-MSI).
  2206. */
  2207. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2208. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2209. i915_handle_error(dev, false);
  2210. for_each_pipe(pipe) {
  2211. int reg = PIPESTAT(pipe);
  2212. pipe_stats[pipe] = I915_READ(reg);
  2213. /*
  2214. * Clear the PIPE*STAT regs before the IIR
  2215. */
  2216. if (pipe_stats[pipe] & 0x8000ffff) {
  2217. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2218. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2219. pipe_name(pipe));
  2220. I915_WRITE(reg, pipe_stats[pipe]);
  2221. irq_received = 1;
  2222. }
  2223. }
  2224. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2225. if (!irq_received)
  2226. break;
  2227. ret = IRQ_HANDLED;
  2228. /* Consume port. Then clear IIR or we'll miss events */
  2229. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  2230. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2231. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2232. hotplug_status);
  2233. if (hotplug_status & dev_priv->hotplug_supported_mask)
  2234. queue_work(dev_priv->wq,
  2235. &dev_priv->hotplug_work);
  2236. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2237. I915_READ(PORT_HOTPLUG_STAT);
  2238. }
  2239. I915_WRITE(IIR, iir);
  2240. new_iir = I915_READ(IIR); /* Flush posted writes */
  2241. if (iir & I915_USER_INTERRUPT)
  2242. notify_ring(dev, &dev_priv->ring[RCS]);
  2243. if (iir & I915_BSD_USER_INTERRUPT)
  2244. notify_ring(dev, &dev_priv->ring[VCS]);
  2245. if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
  2246. intel_prepare_page_flip(dev, 0);
  2247. if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
  2248. intel_prepare_page_flip(dev, 1);
  2249. for_each_pipe(pipe) {
  2250. if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  2251. drm_handle_vblank(dev, pipe)) {
  2252. i915_pageflip_stall_check(dev, pipe);
  2253. intel_finish_page_flip(dev, pipe);
  2254. }
  2255. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  2256. blc_event = true;
  2257. }
  2258. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  2259. intel_opregion_asle_intr(dev);
  2260. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  2261. gmbus_irq_handler(dev);
  2262. /* With MSI, interrupts are only generated when iir
  2263. * transitions from zero to nonzero. If another bit got
  2264. * set while we were handling the existing iir bits, then
  2265. * we would never get another interrupt.
  2266. *
  2267. * This is fine on non-MSI as well, as if we hit this path
  2268. * we avoid exiting the interrupt handler only to generate
  2269. * another one.
  2270. *
  2271. * Note that for MSI this could cause a stray interrupt report
  2272. * if an interrupt landed in the time between writing IIR and
  2273. * the posting read. This should be rare enough to never
  2274. * trigger the 99% of 100,000 interrupts test for disabling
  2275. * stray interrupts.
  2276. */
  2277. iir = new_iir;
  2278. }
  2279. i915_update_dri1_breadcrumb(dev);
  2280. return ret;
  2281. }
  2282. static void i965_irq_uninstall(struct drm_device * dev)
  2283. {
  2284. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2285. int pipe;
  2286. if (!dev_priv)
  2287. return;
  2288. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2289. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2290. I915_WRITE(HWSTAM, 0xffffffff);
  2291. for_each_pipe(pipe)
  2292. I915_WRITE(PIPESTAT(pipe), 0);
  2293. I915_WRITE(IMR, 0xffffffff);
  2294. I915_WRITE(IER, 0x0);
  2295. for_each_pipe(pipe)
  2296. I915_WRITE(PIPESTAT(pipe),
  2297. I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
  2298. I915_WRITE(IIR, I915_READ(IIR));
  2299. }
  2300. void intel_irq_init(struct drm_device *dev)
  2301. {
  2302. struct drm_i915_private *dev_priv = dev->dev_private;
  2303. INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
  2304. INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
  2305. INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
  2306. INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
  2307. setup_timer(&dev_priv->gpu_error.hangcheck_timer,
  2308. i915_hangcheck_elapsed,
  2309. (unsigned long) dev);
  2310. pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
  2311. dev->driver->get_vblank_counter = i915_get_vblank_counter;
  2312. dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  2313. if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  2314. dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
  2315. dev->driver->get_vblank_counter = gm45_get_vblank_counter;
  2316. }
  2317. if (drm_core_check_feature(dev, DRIVER_MODESET))
  2318. dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
  2319. else
  2320. dev->driver->get_vblank_timestamp = NULL;
  2321. dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  2322. if (IS_VALLEYVIEW(dev)) {
  2323. dev->driver->irq_handler = valleyview_irq_handler;
  2324. dev->driver->irq_preinstall = valleyview_irq_preinstall;
  2325. dev->driver->irq_postinstall = valleyview_irq_postinstall;
  2326. dev->driver->irq_uninstall = valleyview_irq_uninstall;
  2327. dev->driver->enable_vblank = valleyview_enable_vblank;
  2328. dev->driver->disable_vblank = valleyview_disable_vblank;
  2329. dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
  2330. } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
  2331. /* Share pre & uninstall handlers with ILK/SNB */
  2332. dev->driver->irq_handler = ivybridge_irq_handler;
  2333. dev->driver->irq_preinstall = ironlake_irq_preinstall;
  2334. dev->driver->irq_postinstall = ivybridge_irq_postinstall;
  2335. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  2336. dev->driver->enable_vblank = ivybridge_enable_vblank;
  2337. dev->driver->disable_vblank = ivybridge_disable_vblank;
  2338. } else if (HAS_PCH_SPLIT(dev)) {
  2339. dev->driver->irq_handler = ironlake_irq_handler;
  2340. dev->driver->irq_preinstall = ironlake_irq_preinstall;
  2341. dev->driver->irq_postinstall = ironlake_irq_postinstall;
  2342. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  2343. dev->driver->enable_vblank = ironlake_enable_vblank;
  2344. dev->driver->disable_vblank = ironlake_disable_vblank;
  2345. } else {
  2346. if (INTEL_INFO(dev)->gen == 2) {
  2347. dev->driver->irq_preinstall = i8xx_irq_preinstall;
  2348. dev->driver->irq_postinstall = i8xx_irq_postinstall;
  2349. dev->driver->irq_handler = i8xx_irq_handler;
  2350. dev->driver->irq_uninstall = i8xx_irq_uninstall;
  2351. } else if (INTEL_INFO(dev)->gen == 3) {
  2352. dev->driver->irq_preinstall = i915_irq_preinstall;
  2353. dev->driver->irq_postinstall = i915_irq_postinstall;
  2354. dev->driver->irq_uninstall = i915_irq_uninstall;
  2355. dev->driver->irq_handler = i915_irq_handler;
  2356. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  2357. } else {
  2358. dev->driver->irq_preinstall = i965_irq_preinstall;
  2359. dev->driver->irq_postinstall = i965_irq_postinstall;
  2360. dev->driver->irq_uninstall = i965_irq_uninstall;
  2361. dev->driver->irq_handler = i965_irq_handler;
  2362. dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
  2363. }
  2364. dev->driver->enable_vblank = i915_enable_vblank;
  2365. dev->driver->disable_vblank = i915_disable_vblank;
  2366. }
  2367. }
  2368. void intel_hpd_init(struct drm_device *dev)
  2369. {
  2370. struct drm_i915_private *dev_priv = dev->dev_private;
  2371. if (dev_priv->display.hpd_irq_setup)
  2372. dev_priv->display.hpd_irq_setup(dev);
  2373. }