i915_irq.c 76 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/sysrq.h>
  30. #include <linux/slab.h>
  31. #include <drm/drmP.h>
  32. #include <drm/i915_drm.h>
  33. #include "i915_drv.h"
  34. #include "i915_trace.h"
  35. #include "intel_drv.h"
  36. /* For display hotplug interrupt */
  37. static void
  38. ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  39. {
  40. if ((dev_priv->irq_mask & mask) != 0) {
  41. dev_priv->irq_mask &= ~mask;
  42. I915_WRITE(DEIMR, dev_priv->irq_mask);
  43. POSTING_READ(DEIMR);
  44. }
  45. }
  46. static inline void
  47. ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  48. {
  49. if ((dev_priv->irq_mask & mask) != mask) {
  50. dev_priv->irq_mask |= mask;
  51. I915_WRITE(DEIMR, dev_priv->irq_mask);
  52. POSTING_READ(DEIMR);
  53. }
  54. }
  55. void
  56. i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  57. {
  58. if ((dev_priv->pipestat[pipe] & mask) != mask) {
  59. u32 reg = PIPESTAT(pipe);
  60. dev_priv->pipestat[pipe] |= mask;
  61. /* Enable the interrupt, clear any pending status */
  62. I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
  63. POSTING_READ(reg);
  64. }
  65. }
  66. void
  67. i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  68. {
  69. if ((dev_priv->pipestat[pipe] & mask) != 0) {
  70. u32 reg = PIPESTAT(pipe);
  71. dev_priv->pipestat[pipe] &= ~mask;
  72. I915_WRITE(reg, dev_priv->pipestat[pipe]);
  73. POSTING_READ(reg);
  74. }
  75. }
  76. /**
  77. * intel_enable_asle - enable ASLE interrupt for OpRegion
  78. */
  79. void intel_enable_asle(struct drm_device *dev)
  80. {
  81. drm_i915_private_t *dev_priv = dev->dev_private;
  82. unsigned long irqflags;
  83. /* FIXME: opregion/asle for VLV */
  84. if (IS_VALLEYVIEW(dev))
  85. return;
  86. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  87. if (HAS_PCH_SPLIT(dev))
  88. ironlake_enable_display_irq(dev_priv, DE_GSE);
  89. else {
  90. i915_enable_pipestat(dev_priv, 1,
  91. PIPE_LEGACY_BLC_EVENT_ENABLE);
  92. if (INTEL_INFO(dev)->gen >= 4)
  93. i915_enable_pipestat(dev_priv, 0,
  94. PIPE_LEGACY_BLC_EVENT_ENABLE);
  95. }
  96. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  97. }
  98. /**
  99. * i915_pipe_enabled - check if a pipe is enabled
  100. * @dev: DRM device
  101. * @pipe: pipe to check
  102. *
  103. * Reading certain registers when the pipe is disabled can hang the chip.
  104. * Use this routine to make sure the PLL is running and the pipe is active
  105. * before reading such registers if unsure.
  106. */
  107. static int
  108. i915_pipe_enabled(struct drm_device *dev, int pipe)
  109. {
  110. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  111. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  112. pipe);
  113. return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
  114. }
  115. /* Called from drm generic code, passed a 'crtc', which
  116. * we use as a pipe index
  117. */
  118. static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
  119. {
  120. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  121. unsigned long high_frame;
  122. unsigned long low_frame;
  123. u32 high1, high2, low;
  124. if (!i915_pipe_enabled(dev, pipe)) {
  125. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  126. "pipe %c\n", pipe_name(pipe));
  127. return 0;
  128. }
  129. high_frame = PIPEFRAME(pipe);
  130. low_frame = PIPEFRAMEPIXEL(pipe);
  131. /*
  132. * High & low register fields aren't synchronized, so make sure
  133. * we get a low value that's stable across two reads of the high
  134. * register.
  135. */
  136. do {
  137. high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  138. low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
  139. high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  140. } while (high1 != high2);
  141. high1 >>= PIPE_FRAME_HIGH_SHIFT;
  142. low >>= PIPE_FRAME_LOW_SHIFT;
  143. return (high1 << 8) | low;
  144. }
  145. static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
  146. {
  147. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  148. int reg = PIPE_FRMCOUNT_GM45(pipe);
  149. if (!i915_pipe_enabled(dev, pipe)) {
  150. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  151. "pipe %c\n", pipe_name(pipe));
  152. return 0;
  153. }
  154. return I915_READ(reg);
  155. }
  156. static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
  157. int *vpos, int *hpos)
  158. {
  159. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  160. u32 vbl = 0, position = 0;
  161. int vbl_start, vbl_end, htotal, vtotal;
  162. bool in_vbl = true;
  163. int ret = 0;
  164. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  165. pipe);
  166. if (!i915_pipe_enabled(dev, pipe)) {
  167. DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
  168. "pipe %c\n", pipe_name(pipe));
  169. return 0;
  170. }
  171. /* Get vtotal. */
  172. vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  173. if (INTEL_INFO(dev)->gen >= 4) {
  174. /* No obvious pixelcount register. Only query vertical
  175. * scanout position from Display scan line register.
  176. */
  177. position = I915_READ(PIPEDSL(pipe));
  178. /* Decode into vertical scanout position. Don't have
  179. * horizontal scanout position.
  180. */
  181. *vpos = position & 0x1fff;
  182. *hpos = 0;
  183. } else {
  184. /* Have access to pixelcount since start of frame.
  185. * We can split this into vertical and horizontal
  186. * scanout position.
  187. */
  188. position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
  189. htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  190. *vpos = position / htotal;
  191. *hpos = position - (*vpos * htotal);
  192. }
  193. /* Query vblank area. */
  194. vbl = I915_READ(VBLANK(cpu_transcoder));
  195. /* Test position against vblank region. */
  196. vbl_start = vbl & 0x1fff;
  197. vbl_end = (vbl >> 16) & 0x1fff;
  198. if ((*vpos < vbl_start) || (*vpos > vbl_end))
  199. in_vbl = false;
  200. /* Inside "upper part" of vblank area? Apply corrective offset: */
  201. if (in_vbl && (*vpos >= vbl_start))
  202. *vpos = *vpos - vtotal;
  203. /* Readouts valid? */
  204. if (vbl > 0)
  205. ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
  206. /* In vblank? */
  207. if (in_vbl)
  208. ret |= DRM_SCANOUTPOS_INVBL;
  209. return ret;
  210. }
  211. static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
  212. int *max_error,
  213. struct timeval *vblank_time,
  214. unsigned flags)
  215. {
  216. struct drm_i915_private *dev_priv = dev->dev_private;
  217. struct drm_crtc *crtc;
  218. if (pipe < 0 || pipe >= dev_priv->num_pipe) {
  219. DRM_ERROR("Invalid crtc %d\n", pipe);
  220. return -EINVAL;
  221. }
  222. /* Get drm_crtc to timestamp: */
  223. crtc = intel_get_crtc_for_pipe(dev, pipe);
  224. if (crtc == NULL) {
  225. DRM_ERROR("Invalid crtc %d\n", pipe);
  226. return -EINVAL;
  227. }
  228. if (!crtc->enabled) {
  229. DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
  230. return -EBUSY;
  231. }
  232. /* Helper routine in DRM core does all the work: */
  233. return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
  234. vblank_time, flags,
  235. crtc);
  236. }
  237. /*
  238. * Handle hotplug events outside the interrupt handler proper.
  239. */
  240. static void i915_hotplug_work_func(struct work_struct *work)
  241. {
  242. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  243. hotplug_work);
  244. struct drm_device *dev = dev_priv->dev;
  245. struct drm_mode_config *mode_config = &dev->mode_config;
  246. struct intel_encoder *encoder;
  247. mutex_lock(&mode_config->mutex);
  248. DRM_DEBUG_KMS("running encoder hotplug functions\n");
  249. list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
  250. if (encoder->hot_plug)
  251. encoder->hot_plug(encoder);
  252. mutex_unlock(&mode_config->mutex);
  253. /* Just fire off a uevent and let userspace tell us what to do */
  254. drm_helper_hpd_irq_event(dev);
  255. }
  256. static void ironlake_handle_rps_change(struct drm_device *dev)
  257. {
  258. drm_i915_private_t *dev_priv = dev->dev_private;
  259. u32 busy_up, busy_down, max_avg, min_avg;
  260. u8 new_delay;
  261. unsigned long flags;
  262. spin_lock_irqsave(&mchdev_lock, flags);
  263. I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  264. new_delay = dev_priv->ips.cur_delay;
  265. I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
  266. busy_up = I915_READ(RCPREVBSYTUPAVG);
  267. busy_down = I915_READ(RCPREVBSYTDNAVG);
  268. max_avg = I915_READ(RCBMAXAVG);
  269. min_avg = I915_READ(RCBMINAVG);
  270. /* Handle RCS change request from hw */
  271. if (busy_up > max_avg) {
  272. if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
  273. new_delay = dev_priv->ips.cur_delay - 1;
  274. if (new_delay < dev_priv->ips.max_delay)
  275. new_delay = dev_priv->ips.max_delay;
  276. } else if (busy_down < min_avg) {
  277. if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
  278. new_delay = dev_priv->ips.cur_delay + 1;
  279. if (new_delay > dev_priv->ips.min_delay)
  280. new_delay = dev_priv->ips.min_delay;
  281. }
  282. if (ironlake_set_drps(dev, new_delay))
  283. dev_priv->ips.cur_delay = new_delay;
  284. spin_unlock_irqrestore(&mchdev_lock, flags);
  285. return;
  286. }
  287. static void notify_ring(struct drm_device *dev,
  288. struct intel_ring_buffer *ring)
  289. {
  290. struct drm_i915_private *dev_priv = dev->dev_private;
  291. if (ring->obj == NULL)
  292. return;
  293. trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
  294. wake_up_all(&ring->irq_queue);
  295. if (i915_enable_hangcheck) {
  296. dev_priv->hangcheck_count = 0;
  297. mod_timer(&dev_priv->hangcheck_timer,
  298. round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  299. }
  300. }
  301. static void gen6_pm_rps_work(struct work_struct *work)
  302. {
  303. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  304. rps.work);
  305. u32 pm_iir, pm_imr;
  306. u8 new_delay;
  307. spin_lock_irq(&dev_priv->rps.lock);
  308. pm_iir = dev_priv->rps.pm_iir;
  309. dev_priv->rps.pm_iir = 0;
  310. pm_imr = I915_READ(GEN6_PMIMR);
  311. I915_WRITE(GEN6_PMIMR, 0);
  312. spin_unlock_irq(&dev_priv->rps.lock);
  313. if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
  314. return;
  315. mutex_lock(&dev_priv->rps.hw_lock);
  316. if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
  317. new_delay = dev_priv->rps.cur_delay + 1;
  318. else
  319. new_delay = dev_priv->rps.cur_delay - 1;
  320. /* sysfs frequency interfaces may have snuck in while servicing the
  321. * interrupt
  322. */
  323. if (!(new_delay > dev_priv->rps.max_delay ||
  324. new_delay < dev_priv->rps.min_delay)) {
  325. gen6_set_rps(dev_priv->dev, new_delay);
  326. }
  327. mutex_unlock(&dev_priv->rps.hw_lock);
  328. }
  329. /**
  330. * ivybridge_parity_work - Workqueue called when a parity error interrupt
  331. * occurred.
  332. * @work: workqueue struct
  333. *
  334. * Doesn't actually do anything except notify userspace. As a consequence of
  335. * this event, userspace should try to remap the bad rows since statistically
  336. * it is likely the same row is more likely to go bad again.
  337. */
  338. static void ivybridge_parity_work(struct work_struct *work)
  339. {
  340. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  341. l3_parity.error_work);
  342. u32 error_status, row, bank, subbank;
  343. char *parity_event[5];
  344. uint32_t misccpctl;
  345. unsigned long flags;
  346. /* We must turn off DOP level clock gating to access the L3 registers.
  347. * In order to prevent a get/put style interface, acquire struct mutex
  348. * any time we access those registers.
  349. */
  350. mutex_lock(&dev_priv->dev->struct_mutex);
  351. misccpctl = I915_READ(GEN7_MISCCPCTL);
  352. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  353. POSTING_READ(GEN7_MISCCPCTL);
  354. error_status = I915_READ(GEN7_L3CDERRST1);
  355. row = GEN7_PARITY_ERROR_ROW(error_status);
  356. bank = GEN7_PARITY_ERROR_BANK(error_status);
  357. subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
  358. I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
  359. GEN7_L3CDERRST1_ENABLE);
  360. POSTING_READ(GEN7_L3CDERRST1);
  361. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  362. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  363. dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  364. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  365. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  366. mutex_unlock(&dev_priv->dev->struct_mutex);
  367. parity_event[0] = "L3_PARITY_ERROR=1";
  368. parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
  369. parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
  370. parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
  371. parity_event[4] = NULL;
  372. kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
  373. KOBJ_CHANGE, parity_event);
  374. DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
  375. row, bank, subbank);
  376. kfree(parity_event[3]);
  377. kfree(parity_event[2]);
  378. kfree(parity_event[1]);
  379. }
  380. static void ivybridge_handle_parity_error(struct drm_device *dev)
  381. {
  382. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  383. unsigned long flags;
  384. if (!HAS_L3_GPU_CACHE(dev))
  385. return;
  386. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  387. dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  388. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  389. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  390. queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
  391. }
  392. static void snb_gt_irq_handler(struct drm_device *dev,
  393. struct drm_i915_private *dev_priv,
  394. u32 gt_iir)
  395. {
  396. if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
  397. GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
  398. notify_ring(dev, &dev_priv->ring[RCS]);
  399. if (gt_iir & GEN6_BSD_USER_INTERRUPT)
  400. notify_ring(dev, &dev_priv->ring[VCS]);
  401. if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
  402. notify_ring(dev, &dev_priv->ring[BCS]);
  403. if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
  404. GT_GEN6_BSD_CS_ERROR_INTERRUPT |
  405. GT_RENDER_CS_ERROR_INTERRUPT)) {
  406. DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
  407. i915_handle_error(dev, false);
  408. }
  409. if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
  410. ivybridge_handle_parity_error(dev);
  411. }
  412. static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
  413. u32 pm_iir)
  414. {
  415. unsigned long flags;
  416. /*
  417. * IIR bits should never already be set because IMR should
  418. * prevent an interrupt from being shown in IIR. The warning
  419. * displays a case where we've unsafely cleared
  420. * dev_priv->rps.pm_iir. Although missing an interrupt of the same
  421. * type is not a problem, it displays a problem in the logic.
  422. *
  423. * The mask bit in IMR is cleared by dev_priv->rps.work.
  424. */
  425. spin_lock_irqsave(&dev_priv->rps.lock, flags);
  426. dev_priv->rps.pm_iir |= pm_iir;
  427. I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
  428. POSTING_READ(GEN6_PMIMR);
  429. spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
  430. queue_work(dev_priv->wq, &dev_priv->rps.work);
  431. }
  432. static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  433. {
  434. struct drm_device *dev = (struct drm_device *) arg;
  435. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  436. u32 iir, gt_iir, pm_iir;
  437. irqreturn_t ret = IRQ_NONE;
  438. unsigned long irqflags;
  439. int pipe;
  440. u32 pipe_stats[I915_MAX_PIPES];
  441. bool blc_event;
  442. atomic_inc(&dev_priv->irq_received);
  443. while (true) {
  444. iir = I915_READ(VLV_IIR);
  445. gt_iir = I915_READ(GTIIR);
  446. pm_iir = I915_READ(GEN6_PMIIR);
  447. if (gt_iir == 0 && pm_iir == 0 && iir == 0)
  448. goto out;
  449. ret = IRQ_HANDLED;
  450. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  451. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  452. for_each_pipe(pipe) {
  453. int reg = PIPESTAT(pipe);
  454. pipe_stats[pipe] = I915_READ(reg);
  455. /*
  456. * Clear the PIPE*STAT regs before the IIR
  457. */
  458. if (pipe_stats[pipe] & 0x8000ffff) {
  459. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  460. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  461. pipe_name(pipe));
  462. I915_WRITE(reg, pipe_stats[pipe]);
  463. }
  464. }
  465. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  466. for_each_pipe(pipe) {
  467. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
  468. drm_handle_vblank(dev, pipe);
  469. if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
  470. intel_prepare_page_flip(dev, pipe);
  471. intel_finish_page_flip(dev, pipe);
  472. }
  473. }
  474. /* Consume port. Then clear IIR or we'll miss events */
  475. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  476. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  477. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  478. hotplug_status);
  479. if (hotplug_status & dev_priv->hotplug_supported_mask)
  480. queue_work(dev_priv->wq,
  481. &dev_priv->hotplug_work);
  482. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  483. I915_READ(PORT_HOTPLUG_STAT);
  484. }
  485. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  486. blc_event = true;
  487. if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
  488. gen6_queue_rps_work(dev_priv, pm_iir);
  489. I915_WRITE(GTIIR, gt_iir);
  490. I915_WRITE(GEN6_PMIIR, pm_iir);
  491. I915_WRITE(VLV_IIR, iir);
  492. }
  493. out:
  494. return ret;
  495. }
  496. static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
  497. {
  498. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  499. int pipe;
  500. if (pch_iir & SDE_HOTPLUG_MASK)
  501. queue_work(dev_priv->wq, &dev_priv->hotplug_work);
  502. if (pch_iir & SDE_AUDIO_POWER_MASK)
  503. DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  504. (pch_iir & SDE_AUDIO_POWER_MASK) >>
  505. SDE_AUDIO_POWER_SHIFT);
  506. if (pch_iir & SDE_GMBUS)
  507. DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
  508. if (pch_iir & SDE_AUDIO_HDCP_MASK)
  509. DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
  510. if (pch_iir & SDE_AUDIO_TRANS_MASK)
  511. DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
  512. if (pch_iir & SDE_POISON)
  513. DRM_ERROR("PCH poison interrupt\n");
  514. if (pch_iir & SDE_FDI_MASK)
  515. for_each_pipe(pipe)
  516. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  517. pipe_name(pipe),
  518. I915_READ(FDI_RX_IIR(pipe)));
  519. if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
  520. DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
  521. if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
  522. DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
  523. if (pch_iir & SDE_TRANSB_FIFO_UNDER)
  524. DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
  525. if (pch_iir & SDE_TRANSA_FIFO_UNDER)
  526. DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
  527. }
  528. static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
  529. {
  530. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  531. int pipe;
  532. if (pch_iir & SDE_HOTPLUG_MASK_CPT)
  533. queue_work(dev_priv->wq, &dev_priv->hotplug_work);
  534. if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
  535. DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  536. (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
  537. SDE_AUDIO_POWER_SHIFT_CPT);
  538. if (pch_iir & SDE_AUX_MASK_CPT)
  539. DRM_DEBUG_DRIVER("AUX channel interrupt\n");
  540. if (pch_iir & SDE_GMBUS_CPT)
  541. DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
  542. if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
  543. DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
  544. if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
  545. DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
  546. if (pch_iir & SDE_FDI_MASK_CPT)
  547. for_each_pipe(pipe)
  548. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  549. pipe_name(pipe),
  550. I915_READ(FDI_RX_IIR(pipe)));
  551. }
  552. static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
  553. {
  554. struct drm_device *dev = (struct drm_device *) arg;
  555. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  556. u32 de_iir, gt_iir, de_ier, pm_iir;
  557. irqreturn_t ret = IRQ_NONE;
  558. int i;
  559. atomic_inc(&dev_priv->irq_received);
  560. /* disable master interrupt before clearing iir */
  561. de_ier = I915_READ(DEIER);
  562. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  563. gt_iir = I915_READ(GTIIR);
  564. if (gt_iir) {
  565. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  566. I915_WRITE(GTIIR, gt_iir);
  567. ret = IRQ_HANDLED;
  568. }
  569. de_iir = I915_READ(DEIIR);
  570. if (de_iir) {
  571. if (de_iir & DE_GSE_IVB)
  572. intel_opregion_gse_intr(dev);
  573. for (i = 0; i < 3; i++) {
  574. if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
  575. drm_handle_vblank(dev, i);
  576. if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
  577. intel_prepare_page_flip(dev, i);
  578. intel_finish_page_flip_plane(dev, i);
  579. }
  580. }
  581. /* check event from PCH */
  582. if (de_iir & DE_PCH_EVENT_IVB) {
  583. u32 pch_iir = I915_READ(SDEIIR);
  584. cpt_irq_handler(dev, pch_iir);
  585. /* clear PCH hotplug event before clear CPU irq */
  586. I915_WRITE(SDEIIR, pch_iir);
  587. }
  588. I915_WRITE(DEIIR, de_iir);
  589. ret = IRQ_HANDLED;
  590. }
  591. pm_iir = I915_READ(GEN6_PMIIR);
  592. if (pm_iir) {
  593. if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
  594. gen6_queue_rps_work(dev_priv, pm_iir);
  595. I915_WRITE(GEN6_PMIIR, pm_iir);
  596. ret = IRQ_HANDLED;
  597. }
  598. I915_WRITE(DEIER, de_ier);
  599. POSTING_READ(DEIER);
  600. return ret;
  601. }
  602. static void ilk_gt_irq_handler(struct drm_device *dev,
  603. struct drm_i915_private *dev_priv,
  604. u32 gt_iir)
  605. {
  606. if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
  607. notify_ring(dev, &dev_priv->ring[RCS]);
  608. if (gt_iir & GT_BSD_USER_INTERRUPT)
  609. notify_ring(dev, &dev_priv->ring[VCS]);
  610. }
  611. static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  612. {
  613. struct drm_device *dev = (struct drm_device *) arg;
  614. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  615. int ret = IRQ_NONE;
  616. u32 de_iir, gt_iir, de_ier, pm_iir;
  617. atomic_inc(&dev_priv->irq_received);
  618. /* disable master interrupt before clearing iir */
  619. de_ier = I915_READ(DEIER);
  620. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  621. POSTING_READ(DEIER);
  622. de_iir = I915_READ(DEIIR);
  623. gt_iir = I915_READ(GTIIR);
  624. pm_iir = I915_READ(GEN6_PMIIR);
  625. if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
  626. goto done;
  627. ret = IRQ_HANDLED;
  628. if (IS_GEN5(dev))
  629. ilk_gt_irq_handler(dev, dev_priv, gt_iir);
  630. else
  631. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  632. if (de_iir & DE_GSE)
  633. intel_opregion_gse_intr(dev);
  634. if (de_iir & DE_PIPEA_VBLANK)
  635. drm_handle_vblank(dev, 0);
  636. if (de_iir & DE_PIPEB_VBLANK)
  637. drm_handle_vblank(dev, 1);
  638. if (de_iir & DE_PLANEA_FLIP_DONE) {
  639. intel_prepare_page_flip(dev, 0);
  640. intel_finish_page_flip_plane(dev, 0);
  641. }
  642. if (de_iir & DE_PLANEB_FLIP_DONE) {
  643. intel_prepare_page_flip(dev, 1);
  644. intel_finish_page_flip_plane(dev, 1);
  645. }
  646. /* check event from PCH */
  647. if (de_iir & DE_PCH_EVENT) {
  648. u32 pch_iir = I915_READ(SDEIIR);
  649. if (HAS_PCH_CPT(dev))
  650. cpt_irq_handler(dev, pch_iir);
  651. else
  652. ibx_irq_handler(dev, pch_iir);
  653. /* should clear PCH hotplug event before clear CPU irq */
  654. I915_WRITE(SDEIIR, pch_iir);
  655. }
  656. if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
  657. ironlake_handle_rps_change(dev);
  658. if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
  659. gen6_queue_rps_work(dev_priv, pm_iir);
  660. I915_WRITE(GTIIR, gt_iir);
  661. I915_WRITE(DEIIR, de_iir);
  662. I915_WRITE(GEN6_PMIIR, pm_iir);
  663. done:
  664. I915_WRITE(DEIER, de_ier);
  665. POSTING_READ(DEIER);
  666. return ret;
  667. }
  668. /**
  669. * i915_error_work_func - do process context error handling work
  670. * @work: work struct
  671. *
  672. * Fire an error uevent so userspace can see that a hang or error
  673. * was detected.
  674. */
  675. static void i915_error_work_func(struct work_struct *work)
  676. {
  677. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  678. error_work);
  679. struct drm_device *dev = dev_priv->dev;
  680. char *error_event[] = { "ERROR=1", NULL };
  681. char *reset_event[] = { "RESET=1", NULL };
  682. char *reset_done_event[] = { "ERROR=0", NULL };
  683. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
  684. if (atomic_read(&dev_priv->mm.wedged)) {
  685. DRM_DEBUG_DRIVER("resetting chip\n");
  686. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
  687. if (!i915_reset(dev)) {
  688. atomic_set(&dev_priv->mm.wedged, 0);
  689. kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
  690. }
  691. complete_all(&dev_priv->error_completion);
  692. }
  693. }
  694. /* NB: please notice the memset */
  695. static void i915_get_extra_instdone(struct drm_device *dev,
  696. uint32_t *instdone)
  697. {
  698. struct drm_i915_private *dev_priv = dev->dev_private;
  699. memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
  700. switch(INTEL_INFO(dev)->gen) {
  701. case 2:
  702. case 3:
  703. instdone[0] = I915_READ(INSTDONE);
  704. break;
  705. case 4:
  706. case 5:
  707. case 6:
  708. instdone[0] = I915_READ(INSTDONE_I965);
  709. instdone[1] = I915_READ(INSTDONE1);
  710. break;
  711. default:
  712. WARN_ONCE(1, "Unsupported platform\n");
  713. case 7:
  714. instdone[0] = I915_READ(GEN7_INSTDONE_1);
  715. instdone[1] = I915_READ(GEN7_SC_INSTDONE);
  716. instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
  717. instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
  718. break;
  719. }
  720. }
  721. #ifdef CONFIG_DEBUG_FS
  722. static struct drm_i915_error_object *
  723. i915_error_object_create(struct drm_i915_private *dev_priv,
  724. struct drm_i915_gem_object *src)
  725. {
  726. struct drm_i915_error_object *dst;
  727. int i, count;
  728. u32 reloc_offset;
  729. if (src == NULL || src->pages == NULL)
  730. return NULL;
  731. count = src->base.size / PAGE_SIZE;
  732. dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
  733. if (dst == NULL)
  734. return NULL;
  735. reloc_offset = src->gtt_offset;
  736. for (i = 0; i < count; i++) {
  737. unsigned long flags;
  738. void *d;
  739. d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
  740. if (d == NULL)
  741. goto unwind;
  742. local_irq_save(flags);
  743. if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
  744. src->has_global_gtt_mapping) {
  745. void __iomem *s;
  746. /* Simply ignore tiling or any overlapping fence.
  747. * It's part of the error state, and this hopefully
  748. * captures what the GPU read.
  749. */
  750. s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  751. reloc_offset);
  752. memcpy_fromio(d, s, PAGE_SIZE);
  753. io_mapping_unmap_atomic(s);
  754. } else if (src->stolen) {
  755. unsigned long offset;
  756. offset = dev_priv->mm.stolen_base;
  757. offset += src->stolen->start;
  758. offset += i << PAGE_SHIFT;
  759. memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
  760. } else {
  761. struct page *page;
  762. void *s;
  763. page = i915_gem_object_get_page(src, i);
  764. drm_clflush_pages(&page, 1);
  765. s = kmap_atomic(page);
  766. memcpy(d, s, PAGE_SIZE);
  767. kunmap_atomic(s);
  768. drm_clflush_pages(&page, 1);
  769. }
  770. local_irq_restore(flags);
  771. dst->pages[i] = d;
  772. reloc_offset += PAGE_SIZE;
  773. }
  774. dst->page_count = count;
  775. dst->gtt_offset = src->gtt_offset;
  776. return dst;
  777. unwind:
  778. while (i--)
  779. kfree(dst->pages[i]);
  780. kfree(dst);
  781. return NULL;
  782. }
  783. static void
  784. i915_error_object_free(struct drm_i915_error_object *obj)
  785. {
  786. int page;
  787. if (obj == NULL)
  788. return;
  789. for (page = 0; page < obj->page_count; page++)
  790. kfree(obj->pages[page]);
  791. kfree(obj);
  792. }
  793. void
  794. i915_error_state_free(struct kref *error_ref)
  795. {
  796. struct drm_i915_error_state *error = container_of(error_ref,
  797. typeof(*error), ref);
  798. int i;
  799. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  800. i915_error_object_free(error->ring[i].batchbuffer);
  801. i915_error_object_free(error->ring[i].ringbuffer);
  802. kfree(error->ring[i].requests);
  803. }
  804. kfree(error->active_bo);
  805. kfree(error->overlay);
  806. kfree(error);
  807. }
  808. static void capture_bo(struct drm_i915_error_buffer *err,
  809. struct drm_i915_gem_object *obj)
  810. {
  811. err->size = obj->base.size;
  812. err->name = obj->base.name;
  813. err->rseqno = obj->last_read_seqno;
  814. err->wseqno = obj->last_write_seqno;
  815. err->gtt_offset = obj->gtt_offset;
  816. err->read_domains = obj->base.read_domains;
  817. err->write_domain = obj->base.write_domain;
  818. err->fence_reg = obj->fence_reg;
  819. err->pinned = 0;
  820. if (obj->pin_count > 0)
  821. err->pinned = 1;
  822. if (obj->user_pin_count > 0)
  823. err->pinned = -1;
  824. err->tiling = obj->tiling_mode;
  825. err->dirty = obj->dirty;
  826. err->purgeable = obj->madv != I915_MADV_WILLNEED;
  827. err->ring = obj->ring ? obj->ring->id : -1;
  828. err->cache_level = obj->cache_level;
  829. }
  830. static u32 capture_active_bo(struct drm_i915_error_buffer *err,
  831. int count, struct list_head *head)
  832. {
  833. struct drm_i915_gem_object *obj;
  834. int i = 0;
  835. list_for_each_entry(obj, head, mm_list) {
  836. capture_bo(err++, obj);
  837. if (++i == count)
  838. break;
  839. }
  840. return i;
  841. }
  842. static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
  843. int count, struct list_head *head)
  844. {
  845. struct drm_i915_gem_object *obj;
  846. int i = 0;
  847. list_for_each_entry(obj, head, gtt_list) {
  848. if (obj->pin_count == 0)
  849. continue;
  850. capture_bo(err++, obj);
  851. if (++i == count)
  852. break;
  853. }
  854. return i;
  855. }
  856. static void i915_gem_record_fences(struct drm_device *dev,
  857. struct drm_i915_error_state *error)
  858. {
  859. struct drm_i915_private *dev_priv = dev->dev_private;
  860. int i;
  861. /* Fences */
  862. switch (INTEL_INFO(dev)->gen) {
  863. case 7:
  864. case 6:
  865. for (i = 0; i < 16; i++)
  866. error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
  867. break;
  868. case 5:
  869. case 4:
  870. for (i = 0; i < 16; i++)
  871. error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
  872. break;
  873. case 3:
  874. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  875. for (i = 0; i < 8; i++)
  876. error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
  877. case 2:
  878. for (i = 0; i < 8; i++)
  879. error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
  880. break;
  881. }
  882. }
  883. static struct drm_i915_error_object *
  884. i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
  885. struct intel_ring_buffer *ring)
  886. {
  887. struct drm_i915_gem_object *obj;
  888. u32 seqno;
  889. if (!ring->get_seqno)
  890. return NULL;
  891. seqno = ring->get_seqno(ring, false);
  892. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
  893. if (obj->ring != ring)
  894. continue;
  895. if (i915_seqno_passed(seqno, obj->last_read_seqno))
  896. continue;
  897. if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
  898. continue;
  899. /* We need to copy these to an anonymous buffer as the simplest
  900. * method to avoid being overwritten by userspace.
  901. */
  902. return i915_error_object_create(dev_priv, obj);
  903. }
  904. return NULL;
  905. }
  906. static void i915_record_ring_state(struct drm_device *dev,
  907. struct drm_i915_error_state *error,
  908. struct intel_ring_buffer *ring)
  909. {
  910. struct drm_i915_private *dev_priv = dev->dev_private;
  911. if (INTEL_INFO(dev)->gen >= 6) {
  912. error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
  913. error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
  914. error->semaphore_mboxes[ring->id][0]
  915. = I915_READ(RING_SYNC_0(ring->mmio_base));
  916. error->semaphore_mboxes[ring->id][1]
  917. = I915_READ(RING_SYNC_1(ring->mmio_base));
  918. error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
  919. error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
  920. }
  921. if (INTEL_INFO(dev)->gen >= 4) {
  922. error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
  923. error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
  924. error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
  925. error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
  926. error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
  927. if (ring->id == RCS)
  928. error->bbaddr = I915_READ64(BB_ADDR);
  929. } else {
  930. error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
  931. error->ipeir[ring->id] = I915_READ(IPEIR);
  932. error->ipehr[ring->id] = I915_READ(IPEHR);
  933. error->instdone[ring->id] = I915_READ(INSTDONE);
  934. }
  935. error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
  936. error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
  937. error->seqno[ring->id] = ring->get_seqno(ring, false);
  938. error->acthd[ring->id] = intel_ring_get_active_head(ring);
  939. error->head[ring->id] = I915_READ_HEAD(ring);
  940. error->tail[ring->id] = I915_READ_TAIL(ring);
  941. error->cpu_ring_head[ring->id] = ring->head;
  942. error->cpu_ring_tail[ring->id] = ring->tail;
  943. }
  944. static void i915_gem_record_rings(struct drm_device *dev,
  945. struct drm_i915_error_state *error)
  946. {
  947. struct drm_i915_private *dev_priv = dev->dev_private;
  948. struct intel_ring_buffer *ring;
  949. struct drm_i915_gem_request *request;
  950. int i, count;
  951. for_each_ring(ring, dev_priv, i) {
  952. i915_record_ring_state(dev, error, ring);
  953. error->ring[i].batchbuffer =
  954. i915_error_first_batchbuffer(dev_priv, ring);
  955. error->ring[i].ringbuffer =
  956. i915_error_object_create(dev_priv, ring->obj);
  957. count = 0;
  958. list_for_each_entry(request, &ring->request_list, list)
  959. count++;
  960. error->ring[i].num_requests = count;
  961. error->ring[i].requests =
  962. kmalloc(count*sizeof(struct drm_i915_error_request),
  963. GFP_ATOMIC);
  964. if (error->ring[i].requests == NULL) {
  965. error->ring[i].num_requests = 0;
  966. continue;
  967. }
  968. count = 0;
  969. list_for_each_entry(request, &ring->request_list, list) {
  970. struct drm_i915_error_request *erq;
  971. erq = &error->ring[i].requests[count++];
  972. erq->seqno = request->seqno;
  973. erq->jiffies = request->emitted_jiffies;
  974. erq->tail = request->tail;
  975. }
  976. }
  977. }
  978. /**
  979. * i915_capture_error_state - capture an error record for later analysis
  980. * @dev: drm device
  981. *
  982. * Should be called when an error is detected (either a hang or an error
  983. * interrupt) to capture error state from the time of the error. Fills
  984. * out a structure which becomes available in debugfs for user level tools
  985. * to pick up.
  986. */
  987. static void i915_capture_error_state(struct drm_device *dev)
  988. {
  989. struct drm_i915_private *dev_priv = dev->dev_private;
  990. struct drm_i915_gem_object *obj;
  991. struct drm_i915_error_state *error;
  992. unsigned long flags;
  993. int i, pipe;
  994. spin_lock_irqsave(&dev_priv->error_lock, flags);
  995. error = dev_priv->first_error;
  996. spin_unlock_irqrestore(&dev_priv->error_lock, flags);
  997. if (error)
  998. return;
  999. /* Account for pipe specific data like PIPE*STAT */
  1000. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  1001. if (!error) {
  1002. DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
  1003. return;
  1004. }
  1005. DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
  1006. dev->primary->index);
  1007. kref_init(&error->ref);
  1008. error->eir = I915_READ(EIR);
  1009. error->pgtbl_er = I915_READ(PGTBL_ER);
  1010. error->ccid = I915_READ(CCID);
  1011. if (HAS_PCH_SPLIT(dev))
  1012. error->ier = I915_READ(DEIER) | I915_READ(GTIER);
  1013. else if (IS_VALLEYVIEW(dev))
  1014. error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
  1015. else if (IS_GEN2(dev))
  1016. error->ier = I915_READ16(IER);
  1017. else
  1018. error->ier = I915_READ(IER);
  1019. for_each_pipe(pipe)
  1020. error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
  1021. if (INTEL_INFO(dev)->gen >= 6) {
  1022. error->error = I915_READ(ERROR_GEN6);
  1023. error->done_reg = I915_READ(DONE_REG);
  1024. }
  1025. if (INTEL_INFO(dev)->gen == 7)
  1026. error->err_int = I915_READ(GEN7_ERR_INT);
  1027. i915_get_extra_instdone(dev, error->extra_instdone);
  1028. i915_gem_record_fences(dev, error);
  1029. i915_gem_record_rings(dev, error);
  1030. /* Record buffers on the active and pinned lists. */
  1031. error->active_bo = NULL;
  1032. error->pinned_bo = NULL;
  1033. i = 0;
  1034. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
  1035. i++;
  1036. error->active_bo_count = i;
  1037. list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
  1038. if (obj->pin_count)
  1039. i++;
  1040. error->pinned_bo_count = i - error->active_bo_count;
  1041. error->active_bo = NULL;
  1042. error->pinned_bo = NULL;
  1043. if (i) {
  1044. error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
  1045. GFP_ATOMIC);
  1046. if (error->active_bo)
  1047. error->pinned_bo =
  1048. error->active_bo + error->active_bo_count;
  1049. }
  1050. if (error->active_bo)
  1051. error->active_bo_count =
  1052. capture_active_bo(error->active_bo,
  1053. error->active_bo_count,
  1054. &dev_priv->mm.active_list);
  1055. if (error->pinned_bo)
  1056. error->pinned_bo_count =
  1057. capture_pinned_bo(error->pinned_bo,
  1058. error->pinned_bo_count,
  1059. &dev_priv->mm.bound_list);
  1060. do_gettimeofday(&error->time);
  1061. error->overlay = intel_overlay_capture_error_state(dev);
  1062. error->display = intel_display_capture_error_state(dev);
  1063. spin_lock_irqsave(&dev_priv->error_lock, flags);
  1064. if (dev_priv->first_error == NULL) {
  1065. dev_priv->first_error = error;
  1066. error = NULL;
  1067. }
  1068. spin_unlock_irqrestore(&dev_priv->error_lock, flags);
  1069. if (error)
  1070. i915_error_state_free(&error->ref);
  1071. }
  1072. void i915_destroy_error_state(struct drm_device *dev)
  1073. {
  1074. struct drm_i915_private *dev_priv = dev->dev_private;
  1075. struct drm_i915_error_state *error;
  1076. unsigned long flags;
  1077. spin_lock_irqsave(&dev_priv->error_lock, flags);
  1078. error = dev_priv->first_error;
  1079. dev_priv->first_error = NULL;
  1080. spin_unlock_irqrestore(&dev_priv->error_lock, flags);
  1081. if (error)
  1082. kref_put(&error->ref, i915_error_state_free);
  1083. }
  1084. #else
  1085. #define i915_capture_error_state(x)
  1086. #endif
  1087. static void i915_report_and_clear_eir(struct drm_device *dev)
  1088. {
  1089. struct drm_i915_private *dev_priv = dev->dev_private;
  1090. uint32_t instdone[I915_NUM_INSTDONE_REG];
  1091. u32 eir = I915_READ(EIR);
  1092. int pipe, i;
  1093. if (!eir)
  1094. return;
  1095. pr_err("render error detected, EIR: 0x%08x\n", eir);
  1096. i915_get_extra_instdone(dev, instdone);
  1097. if (IS_G4X(dev)) {
  1098. if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
  1099. u32 ipeir = I915_READ(IPEIR_I965);
  1100. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1101. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1102. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1103. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1104. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1105. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1106. I915_WRITE(IPEIR_I965, ipeir);
  1107. POSTING_READ(IPEIR_I965);
  1108. }
  1109. if (eir & GM45_ERROR_PAGE_TABLE) {
  1110. u32 pgtbl_err = I915_READ(PGTBL_ER);
  1111. pr_err("page table error\n");
  1112. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  1113. I915_WRITE(PGTBL_ER, pgtbl_err);
  1114. POSTING_READ(PGTBL_ER);
  1115. }
  1116. }
  1117. if (!IS_GEN2(dev)) {
  1118. if (eir & I915_ERROR_PAGE_TABLE) {
  1119. u32 pgtbl_err = I915_READ(PGTBL_ER);
  1120. pr_err("page table error\n");
  1121. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  1122. I915_WRITE(PGTBL_ER, pgtbl_err);
  1123. POSTING_READ(PGTBL_ER);
  1124. }
  1125. }
  1126. if (eir & I915_ERROR_MEMORY_REFRESH) {
  1127. pr_err("memory refresh error:\n");
  1128. for_each_pipe(pipe)
  1129. pr_err("pipe %c stat: 0x%08x\n",
  1130. pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
  1131. /* pipestat has already been acked */
  1132. }
  1133. if (eir & I915_ERROR_INSTRUCTION) {
  1134. pr_err("instruction error\n");
  1135. pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
  1136. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1137. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1138. if (INTEL_INFO(dev)->gen < 4) {
  1139. u32 ipeir = I915_READ(IPEIR);
  1140. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
  1141. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
  1142. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
  1143. I915_WRITE(IPEIR, ipeir);
  1144. POSTING_READ(IPEIR);
  1145. } else {
  1146. u32 ipeir = I915_READ(IPEIR_I965);
  1147. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1148. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1149. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1150. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1151. I915_WRITE(IPEIR_I965, ipeir);
  1152. POSTING_READ(IPEIR_I965);
  1153. }
  1154. }
  1155. I915_WRITE(EIR, eir);
  1156. POSTING_READ(EIR);
  1157. eir = I915_READ(EIR);
  1158. if (eir) {
  1159. /*
  1160. * some errors might have become stuck,
  1161. * mask them.
  1162. */
  1163. DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
  1164. I915_WRITE(EMR, I915_READ(EMR) | eir);
  1165. I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1166. }
  1167. }
  1168. /**
  1169. * i915_handle_error - handle an error interrupt
  1170. * @dev: drm device
  1171. *
  1172. * Do some basic checking of regsiter state at error interrupt time and
  1173. * dump it to the syslog. Also call i915_capture_error_state() to make
  1174. * sure we get a record and make it available in debugfs. Fire a uevent
  1175. * so userspace knows something bad happened (should trigger collection
  1176. * of a ring dump etc.).
  1177. */
  1178. void i915_handle_error(struct drm_device *dev, bool wedged)
  1179. {
  1180. struct drm_i915_private *dev_priv = dev->dev_private;
  1181. struct intel_ring_buffer *ring;
  1182. int i;
  1183. i915_capture_error_state(dev);
  1184. i915_report_and_clear_eir(dev);
  1185. if (wedged) {
  1186. INIT_COMPLETION(dev_priv->error_completion);
  1187. atomic_set(&dev_priv->mm.wedged, 1);
  1188. /*
  1189. * Wakeup waiting processes so they don't hang
  1190. */
  1191. for_each_ring(ring, dev_priv, i)
  1192. wake_up_all(&ring->irq_queue);
  1193. }
  1194. queue_work(dev_priv->wq, &dev_priv->error_work);
  1195. }
  1196. static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
  1197. {
  1198. drm_i915_private_t *dev_priv = dev->dev_private;
  1199. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  1200. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1201. struct drm_i915_gem_object *obj;
  1202. struct intel_unpin_work *work;
  1203. unsigned long flags;
  1204. bool stall_detected;
  1205. /* Ignore early vblank irqs */
  1206. if (intel_crtc == NULL)
  1207. return;
  1208. spin_lock_irqsave(&dev->event_lock, flags);
  1209. work = intel_crtc->unpin_work;
  1210. if (work == NULL || work->pending || !work->enable_stall_check) {
  1211. /* Either the pending flip IRQ arrived, or we're too early. Don't check */
  1212. spin_unlock_irqrestore(&dev->event_lock, flags);
  1213. return;
  1214. }
  1215. /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
  1216. obj = work->pending_flip_obj;
  1217. if (INTEL_INFO(dev)->gen >= 4) {
  1218. int dspsurf = DSPSURF(intel_crtc->plane);
  1219. stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
  1220. obj->gtt_offset;
  1221. } else {
  1222. int dspaddr = DSPADDR(intel_crtc->plane);
  1223. stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
  1224. crtc->y * crtc->fb->pitches[0] +
  1225. crtc->x * crtc->fb->bits_per_pixel/8);
  1226. }
  1227. spin_unlock_irqrestore(&dev->event_lock, flags);
  1228. if (stall_detected) {
  1229. DRM_DEBUG_DRIVER("Pageflip stall detected\n");
  1230. intel_prepare_page_flip(dev, intel_crtc->plane);
  1231. }
  1232. }
  1233. /* Called from drm generic code, passed 'crtc' which
  1234. * we use as a pipe index
  1235. */
  1236. static int i915_enable_vblank(struct drm_device *dev, int pipe)
  1237. {
  1238. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1239. unsigned long irqflags;
  1240. if (!i915_pipe_enabled(dev, pipe))
  1241. return -EINVAL;
  1242. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1243. if (INTEL_INFO(dev)->gen >= 4)
  1244. i915_enable_pipestat(dev_priv, pipe,
  1245. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1246. else
  1247. i915_enable_pipestat(dev_priv, pipe,
  1248. PIPE_VBLANK_INTERRUPT_ENABLE);
  1249. /* maintain vblank delivery even in deep C-states */
  1250. if (dev_priv->info->gen == 3)
  1251. I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
  1252. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1253. return 0;
  1254. }
  1255. static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
  1256. {
  1257. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1258. unsigned long irqflags;
  1259. if (!i915_pipe_enabled(dev, pipe))
  1260. return -EINVAL;
  1261. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1262. ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
  1263. DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
  1264. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1265. return 0;
  1266. }
  1267. static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
  1268. {
  1269. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1270. unsigned long irqflags;
  1271. if (!i915_pipe_enabled(dev, pipe))
  1272. return -EINVAL;
  1273. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1274. ironlake_enable_display_irq(dev_priv,
  1275. DE_PIPEA_VBLANK_IVB << (5 * pipe));
  1276. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1277. return 0;
  1278. }
  1279. static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
  1280. {
  1281. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1282. unsigned long irqflags;
  1283. u32 imr;
  1284. if (!i915_pipe_enabled(dev, pipe))
  1285. return -EINVAL;
  1286. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1287. imr = I915_READ(VLV_IMR);
  1288. if (pipe == 0)
  1289. imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1290. else
  1291. imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1292. I915_WRITE(VLV_IMR, imr);
  1293. i915_enable_pipestat(dev_priv, pipe,
  1294. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1295. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1296. return 0;
  1297. }
  1298. /* Called from drm generic code, passed 'crtc' which
  1299. * we use as a pipe index
  1300. */
  1301. static void i915_disable_vblank(struct drm_device *dev, int pipe)
  1302. {
  1303. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1304. unsigned long irqflags;
  1305. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1306. if (dev_priv->info->gen == 3)
  1307. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
  1308. i915_disable_pipestat(dev_priv, pipe,
  1309. PIPE_VBLANK_INTERRUPT_ENABLE |
  1310. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1311. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1312. }
  1313. static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
  1314. {
  1315. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1316. unsigned long irqflags;
  1317. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1318. ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
  1319. DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
  1320. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1321. }
  1322. static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
  1323. {
  1324. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1325. unsigned long irqflags;
  1326. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1327. ironlake_disable_display_irq(dev_priv,
  1328. DE_PIPEA_VBLANK_IVB << (pipe * 5));
  1329. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1330. }
  1331. static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
  1332. {
  1333. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1334. unsigned long irqflags;
  1335. u32 imr;
  1336. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1337. i915_disable_pipestat(dev_priv, pipe,
  1338. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1339. imr = I915_READ(VLV_IMR);
  1340. if (pipe == 0)
  1341. imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1342. else
  1343. imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1344. I915_WRITE(VLV_IMR, imr);
  1345. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1346. }
  1347. static u32
  1348. ring_last_seqno(struct intel_ring_buffer *ring)
  1349. {
  1350. return list_entry(ring->request_list.prev,
  1351. struct drm_i915_gem_request, list)->seqno;
  1352. }
  1353. static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
  1354. {
  1355. if (list_empty(&ring->request_list) ||
  1356. i915_seqno_passed(ring->get_seqno(ring, false),
  1357. ring_last_seqno(ring))) {
  1358. /* Issue a wake-up to catch stuck h/w. */
  1359. if (waitqueue_active(&ring->irq_queue)) {
  1360. DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
  1361. ring->name);
  1362. wake_up_all(&ring->irq_queue);
  1363. *err = true;
  1364. }
  1365. return true;
  1366. }
  1367. return false;
  1368. }
  1369. static bool kick_ring(struct intel_ring_buffer *ring)
  1370. {
  1371. struct drm_device *dev = ring->dev;
  1372. struct drm_i915_private *dev_priv = dev->dev_private;
  1373. u32 tmp = I915_READ_CTL(ring);
  1374. if (tmp & RING_WAIT) {
  1375. DRM_ERROR("Kicking stuck wait on %s\n",
  1376. ring->name);
  1377. I915_WRITE_CTL(ring, tmp);
  1378. return true;
  1379. }
  1380. return false;
  1381. }
  1382. static bool i915_hangcheck_hung(struct drm_device *dev)
  1383. {
  1384. drm_i915_private_t *dev_priv = dev->dev_private;
  1385. if (dev_priv->hangcheck_count++ > 1) {
  1386. bool hung = true;
  1387. DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
  1388. i915_handle_error(dev, true);
  1389. if (!IS_GEN2(dev)) {
  1390. struct intel_ring_buffer *ring;
  1391. int i;
  1392. /* Is the chip hanging on a WAIT_FOR_EVENT?
  1393. * If so we can simply poke the RB_WAIT bit
  1394. * and break the hang. This should work on
  1395. * all but the second generation chipsets.
  1396. */
  1397. for_each_ring(ring, dev_priv, i)
  1398. hung &= !kick_ring(ring);
  1399. }
  1400. return hung;
  1401. }
  1402. return false;
  1403. }
  1404. /**
  1405. * This is called when the chip hasn't reported back with completed
  1406. * batchbuffers in a long time. The first time this is called we simply record
  1407. * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
  1408. * again, we assume the chip is wedged and try to fix it.
  1409. */
  1410. void i915_hangcheck_elapsed(unsigned long data)
  1411. {
  1412. struct drm_device *dev = (struct drm_device *)data;
  1413. drm_i915_private_t *dev_priv = dev->dev_private;
  1414. uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
  1415. struct intel_ring_buffer *ring;
  1416. bool err = false, idle;
  1417. int i;
  1418. if (!i915_enable_hangcheck)
  1419. return;
  1420. memset(acthd, 0, sizeof(acthd));
  1421. idle = true;
  1422. for_each_ring(ring, dev_priv, i) {
  1423. idle &= i915_hangcheck_ring_idle(ring, &err);
  1424. acthd[i] = intel_ring_get_active_head(ring);
  1425. }
  1426. /* If all work is done then ACTHD clearly hasn't advanced. */
  1427. if (idle) {
  1428. if (err) {
  1429. if (i915_hangcheck_hung(dev))
  1430. return;
  1431. goto repeat;
  1432. }
  1433. dev_priv->hangcheck_count = 0;
  1434. return;
  1435. }
  1436. i915_get_extra_instdone(dev, instdone);
  1437. if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
  1438. memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
  1439. if (i915_hangcheck_hung(dev))
  1440. return;
  1441. } else {
  1442. dev_priv->hangcheck_count = 0;
  1443. memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
  1444. memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
  1445. }
  1446. repeat:
  1447. /* Reset timer case chip hangs without another request being added */
  1448. mod_timer(&dev_priv->hangcheck_timer,
  1449. round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  1450. }
  1451. /* drm_dma.h hooks
  1452. */
  1453. static void ironlake_irq_preinstall(struct drm_device *dev)
  1454. {
  1455. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1456. atomic_set(&dev_priv->irq_received, 0);
  1457. I915_WRITE(HWSTAM, 0xeffe);
  1458. /* XXX hotplug from PCH */
  1459. I915_WRITE(DEIMR, 0xffffffff);
  1460. I915_WRITE(DEIER, 0x0);
  1461. POSTING_READ(DEIER);
  1462. /* and GT */
  1463. I915_WRITE(GTIMR, 0xffffffff);
  1464. I915_WRITE(GTIER, 0x0);
  1465. POSTING_READ(GTIER);
  1466. /* south display irq */
  1467. I915_WRITE(SDEIMR, 0xffffffff);
  1468. I915_WRITE(SDEIER, 0x0);
  1469. POSTING_READ(SDEIER);
  1470. }
  1471. static void valleyview_irq_preinstall(struct drm_device *dev)
  1472. {
  1473. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1474. int pipe;
  1475. atomic_set(&dev_priv->irq_received, 0);
  1476. /* VLV magic */
  1477. I915_WRITE(VLV_IMR, 0);
  1478. I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
  1479. I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
  1480. I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
  1481. /* and GT */
  1482. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1483. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1484. I915_WRITE(GTIMR, 0xffffffff);
  1485. I915_WRITE(GTIER, 0x0);
  1486. POSTING_READ(GTIER);
  1487. I915_WRITE(DPINVGTT, 0xff);
  1488. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1489. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1490. for_each_pipe(pipe)
  1491. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1492. I915_WRITE(VLV_IIR, 0xffffffff);
  1493. I915_WRITE(VLV_IMR, 0xffffffff);
  1494. I915_WRITE(VLV_IER, 0x0);
  1495. POSTING_READ(VLV_IER);
  1496. }
  1497. /*
  1498. * Enable digital hotplug on the PCH, and configure the DP short pulse
  1499. * duration to 2ms (which is the minimum in the Display Port spec)
  1500. *
  1501. * This register is the same on all known PCH chips.
  1502. */
  1503. static void ironlake_enable_pch_hotplug(struct drm_device *dev)
  1504. {
  1505. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1506. u32 hotplug;
  1507. hotplug = I915_READ(PCH_PORT_HOTPLUG);
  1508. hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
  1509. hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
  1510. hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
  1511. hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
  1512. I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  1513. }
  1514. static int ironlake_irq_postinstall(struct drm_device *dev)
  1515. {
  1516. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1517. /* enable kind of interrupts always enabled */
  1518. u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  1519. DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
  1520. u32 render_irqs;
  1521. u32 hotplug_mask;
  1522. dev_priv->irq_mask = ~display_mask;
  1523. /* should always can generate irq */
  1524. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1525. I915_WRITE(DEIMR, dev_priv->irq_mask);
  1526. I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
  1527. POSTING_READ(DEIER);
  1528. dev_priv->gt_irq_mask = ~0;
  1529. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1530. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  1531. if (IS_GEN6(dev))
  1532. render_irqs =
  1533. GT_USER_INTERRUPT |
  1534. GEN6_BSD_USER_INTERRUPT |
  1535. GEN6_BLITTER_USER_INTERRUPT;
  1536. else
  1537. render_irqs =
  1538. GT_USER_INTERRUPT |
  1539. GT_PIPE_NOTIFY |
  1540. GT_BSD_USER_INTERRUPT;
  1541. I915_WRITE(GTIER, render_irqs);
  1542. POSTING_READ(GTIER);
  1543. if (HAS_PCH_CPT(dev)) {
  1544. hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
  1545. SDE_PORTB_HOTPLUG_CPT |
  1546. SDE_PORTC_HOTPLUG_CPT |
  1547. SDE_PORTD_HOTPLUG_CPT);
  1548. } else {
  1549. hotplug_mask = (SDE_CRT_HOTPLUG |
  1550. SDE_PORTB_HOTPLUG |
  1551. SDE_PORTC_HOTPLUG |
  1552. SDE_PORTD_HOTPLUG |
  1553. SDE_AUX_MASK);
  1554. }
  1555. dev_priv->pch_irq_mask = ~hotplug_mask;
  1556. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  1557. I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
  1558. I915_WRITE(SDEIER, hotplug_mask);
  1559. POSTING_READ(SDEIER);
  1560. ironlake_enable_pch_hotplug(dev);
  1561. if (IS_IRONLAKE_M(dev)) {
  1562. /* Clear & enable PCU event interrupts */
  1563. I915_WRITE(DEIIR, DE_PCU_EVENT);
  1564. I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
  1565. ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
  1566. }
  1567. return 0;
  1568. }
  1569. static int ivybridge_irq_postinstall(struct drm_device *dev)
  1570. {
  1571. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1572. /* enable kind of interrupts always enabled */
  1573. u32 display_mask =
  1574. DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
  1575. DE_PLANEC_FLIP_DONE_IVB |
  1576. DE_PLANEB_FLIP_DONE_IVB |
  1577. DE_PLANEA_FLIP_DONE_IVB;
  1578. u32 render_irqs;
  1579. u32 hotplug_mask;
  1580. dev_priv->irq_mask = ~display_mask;
  1581. /* should always can generate irq */
  1582. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1583. I915_WRITE(DEIMR, dev_priv->irq_mask);
  1584. I915_WRITE(DEIER,
  1585. display_mask |
  1586. DE_PIPEC_VBLANK_IVB |
  1587. DE_PIPEB_VBLANK_IVB |
  1588. DE_PIPEA_VBLANK_IVB);
  1589. POSTING_READ(DEIER);
  1590. dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  1591. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1592. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  1593. render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
  1594. GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
  1595. I915_WRITE(GTIER, render_irqs);
  1596. POSTING_READ(GTIER);
  1597. hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
  1598. SDE_PORTB_HOTPLUG_CPT |
  1599. SDE_PORTC_HOTPLUG_CPT |
  1600. SDE_PORTD_HOTPLUG_CPT);
  1601. dev_priv->pch_irq_mask = ~hotplug_mask;
  1602. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  1603. I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
  1604. I915_WRITE(SDEIER, hotplug_mask);
  1605. POSTING_READ(SDEIER);
  1606. ironlake_enable_pch_hotplug(dev);
  1607. return 0;
  1608. }
  1609. static int valleyview_irq_postinstall(struct drm_device *dev)
  1610. {
  1611. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1612. u32 enable_mask;
  1613. u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  1614. u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
  1615. u32 render_irqs;
  1616. u16 msid;
  1617. enable_mask = I915_DISPLAY_PORT_INTERRUPT;
  1618. enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1619. I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  1620. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1621. I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1622. /*
  1623. *Leave vblank interrupts masked initially. enable/disable will
  1624. * toggle them based on usage.
  1625. */
  1626. dev_priv->irq_mask = (~enable_mask) |
  1627. I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  1628. I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1629. dev_priv->pipestat[0] = 0;
  1630. dev_priv->pipestat[1] = 0;
  1631. /* Hack for broken MSIs on VLV */
  1632. pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
  1633. pci_read_config_word(dev->pdev, 0x98, &msid);
  1634. msid &= 0xff; /* mask out delivery bits */
  1635. msid |= (1<<14);
  1636. pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
  1637. I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  1638. I915_WRITE(VLV_IER, enable_mask);
  1639. I915_WRITE(VLV_IIR, 0xffffffff);
  1640. I915_WRITE(PIPESTAT(0), 0xffff);
  1641. I915_WRITE(PIPESTAT(1), 0xffff);
  1642. POSTING_READ(VLV_IER);
  1643. i915_enable_pipestat(dev_priv, 0, pipestat_enable);
  1644. i915_enable_pipestat(dev_priv, 1, pipestat_enable);
  1645. I915_WRITE(VLV_IIR, 0xffffffff);
  1646. I915_WRITE(VLV_IIR, 0xffffffff);
  1647. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1648. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  1649. render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
  1650. GEN6_BLITTER_USER_INTERRUPT;
  1651. I915_WRITE(GTIER, render_irqs);
  1652. POSTING_READ(GTIER);
  1653. /* ack & enable invalid PTE error interrupts */
  1654. #if 0 /* FIXME: add support to irq handler for checking these bits */
  1655. I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  1656. I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
  1657. #endif
  1658. I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  1659. /* Note HDMI and DP share bits */
  1660. if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
  1661. hotplug_en |= HDMIB_HOTPLUG_INT_EN;
  1662. if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
  1663. hotplug_en |= HDMIC_HOTPLUG_INT_EN;
  1664. if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
  1665. hotplug_en |= HDMID_HOTPLUG_INT_EN;
  1666. if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
  1667. hotplug_en |= SDVOC_HOTPLUG_INT_EN;
  1668. if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
  1669. hotplug_en |= SDVOB_HOTPLUG_INT_EN;
  1670. if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
  1671. hotplug_en |= CRT_HOTPLUG_INT_EN;
  1672. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  1673. }
  1674. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  1675. return 0;
  1676. }
  1677. static void valleyview_irq_uninstall(struct drm_device *dev)
  1678. {
  1679. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1680. int pipe;
  1681. if (!dev_priv)
  1682. return;
  1683. for_each_pipe(pipe)
  1684. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1685. I915_WRITE(HWSTAM, 0xffffffff);
  1686. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1687. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1688. for_each_pipe(pipe)
  1689. I915_WRITE(PIPESTAT(pipe), 0xffff);
  1690. I915_WRITE(VLV_IIR, 0xffffffff);
  1691. I915_WRITE(VLV_IMR, 0xffffffff);
  1692. I915_WRITE(VLV_IER, 0x0);
  1693. POSTING_READ(VLV_IER);
  1694. }
  1695. static void ironlake_irq_uninstall(struct drm_device *dev)
  1696. {
  1697. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1698. if (!dev_priv)
  1699. return;
  1700. I915_WRITE(HWSTAM, 0xffffffff);
  1701. I915_WRITE(DEIMR, 0xffffffff);
  1702. I915_WRITE(DEIER, 0x0);
  1703. I915_WRITE(DEIIR, I915_READ(DEIIR));
  1704. I915_WRITE(GTIMR, 0xffffffff);
  1705. I915_WRITE(GTIER, 0x0);
  1706. I915_WRITE(GTIIR, I915_READ(GTIIR));
  1707. I915_WRITE(SDEIMR, 0xffffffff);
  1708. I915_WRITE(SDEIER, 0x0);
  1709. I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  1710. }
  1711. static void i8xx_irq_preinstall(struct drm_device * dev)
  1712. {
  1713. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1714. int pipe;
  1715. atomic_set(&dev_priv->irq_received, 0);
  1716. for_each_pipe(pipe)
  1717. I915_WRITE(PIPESTAT(pipe), 0);
  1718. I915_WRITE16(IMR, 0xffff);
  1719. I915_WRITE16(IER, 0x0);
  1720. POSTING_READ16(IER);
  1721. }
  1722. static int i8xx_irq_postinstall(struct drm_device *dev)
  1723. {
  1724. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1725. dev_priv->pipestat[0] = 0;
  1726. dev_priv->pipestat[1] = 0;
  1727. I915_WRITE16(EMR,
  1728. ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  1729. /* Unmask the interrupts that we always want on. */
  1730. dev_priv->irq_mask =
  1731. ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1732. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1733. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  1734. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  1735. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1736. I915_WRITE16(IMR, dev_priv->irq_mask);
  1737. I915_WRITE16(IER,
  1738. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1739. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1740. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  1741. I915_USER_INTERRUPT);
  1742. POSTING_READ16(IER);
  1743. return 0;
  1744. }
  1745. static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  1746. {
  1747. struct drm_device *dev = (struct drm_device *) arg;
  1748. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1749. u16 iir, new_iir;
  1750. u32 pipe_stats[2];
  1751. unsigned long irqflags;
  1752. int irq_received;
  1753. int pipe;
  1754. u16 flip_mask =
  1755. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  1756. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  1757. atomic_inc(&dev_priv->irq_received);
  1758. iir = I915_READ16(IIR);
  1759. if (iir == 0)
  1760. return IRQ_NONE;
  1761. while (iir & ~flip_mask) {
  1762. /* Can't rely on pipestat interrupt bit in iir as it might
  1763. * have been cleared after the pipestat interrupt was received.
  1764. * It doesn't set the bit in iir again, but it still produces
  1765. * interrupts (for non-MSI).
  1766. */
  1767. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1768. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  1769. i915_handle_error(dev, false);
  1770. for_each_pipe(pipe) {
  1771. int reg = PIPESTAT(pipe);
  1772. pipe_stats[pipe] = I915_READ(reg);
  1773. /*
  1774. * Clear the PIPE*STAT regs before the IIR
  1775. */
  1776. if (pipe_stats[pipe] & 0x8000ffff) {
  1777. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  1778. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  1779. pipe_name(pipe));
  1780. I915_WRITE(reg, pipe_stats[pipe]);
  1781. irq_received = 1;
  1782. }
  1783. }
  1784. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1785. I915_WRITE16(IIR, iir & ~flip_mask);
  1786. new_iir = I915_READ16(IIR); /* Flush posted writes */
  1787. i915_update_dri1_breadcrumb(dev);
  1788. if (iir & I915_USER_INTERRUPT)
  1789. notify_ring(dev, &dev_priv->ring[RCS]);
  1790. if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
  1791. drm_handle_vblank(dev, 0)) {
  1792. if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
  1793. intel_prepare_page_flip(dev, 0);
  1794. intel_finish_page_flip(dev, 0);
  1795. flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
  1796. }
  1797. }
  1798. if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
  1799. drm_handle_vblank(dev, 1)) {
  1800. if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
  1801. intel_prepare_page_flip(dev, 1);
  1802. intel_finish_page_flip(dev, 1);
  1803. flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  1804. }
  1805. }
  1806. iir = new_iir;
  1807. }
  1808. return IRQ_HANDLED;
  1809. }
  1810. static void i8xx_irq_uninstall(struct drm_device * dev)
  1811. {
  1812. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1813. int pipe;
  1814. for_each_pipe(pipe) {
  1815. /* Clear enable bits; then clear status bits */
  1816. I915_WRITE(PIPESTAT(pipe), 0);
  1817. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  1818. }
  1819. I915_WRITE16(IMR, 0xffff);
  1820. I915_WRITE16(IER, 0x0);
  1821. I915_WRITE16(IIR, I915_READ16(IIR));
  1822. }
  1823. static void i915_irq_preinstall(struct drm_device * dev)
  1824. {
  1825. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1826. int pipe;
  1827. atomic_set(&dev_priv->irq_received, 0);
  1828. if (I915_HAS_HOTPLUG(dev)) {
  1829. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1830. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1831. }
  1832. I915_WRITE16(HWSTAM, 0xeffe);
  1833. for_each_pipe(pipe)
  1834. I915_WRITE(PIPESTAT(pipe), 0);
  1835. I915_WRITE(IMR, 0xffffffff);
  1836. I915_WRITE(IER, 0x0);
  1837. POSTING_READ(IER);
  1838. }
  1839. static int i915_irq_postinstall(struct drm_device *dev)
  1840. {
  1841. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1842. u32 enable_mask;
  1843. dev_priv->pipestat[0] = 0;
  1844. dev_priv->pipestat[1] = 0;
  1845. I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  1846. /* Unmask the interrupts that we always want on. */
  1847. dev_priv->irq_mask =
  1848. ~(I915_ASLE_INTERRUPT |
  1849. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1850. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1851. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  1852. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  1853. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1854. enable_mask =
  1855. I915_ASLE_INTERRUPT |
  1856. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  1857. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  1858. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  1859. I915_USER_INTERRUPT;
  1860. if (I915_HAS_HOTPLUG(dev)) {
  1861. /* Enable in IER... */
  1862. enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  1863. /* and unmask in IMR */
  1864. dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  1865. }
  1866. I915_WRITE(IMR, dev_priv->irq_mask);
  1867. I915_WRITE(IER, enable_mask);
  1868. POSTING_READ(IER);
  1869. if (I915_HAS_HOTPLUG(dev)) {
  1870. u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  1871. if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
  1872. hotplug_en |= HDMIB_HOTPLUG_INT_EN;
  1873. if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
  1874. hotplug_en |= HDMIC_HOTPLUG_INT_EN;
  1875. if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
  1876. hotplug_en |= HDMID_HOTPLUG_INT_EN;
  1877. if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
  1878. hotplug_en |= SDVOC_HOTPLUG_INT_EN;
  1879. if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
  1880. hotplug_en |= SDVOB_HOTPLUG_INT_EN;
  1881. if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
  1882. hotplug_en |= CRT_HOTPLUG_INT_EN;
  1883. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  1884. }
  1885. /* Ignore TV since it's buggy */
  1886. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  1887. }
  1888. intel_opregion_enable_asle(dev);
  1889. return 0;
  1890. }
  1891. static irqreturn_t i915_irq_handler(int irq, void *arg)
  1892. {
  1893. struct drm_device *dev = (struct drm_device *) arg;
  1894. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1895. u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
  1896. unsigned long irqflags;
  1897. u32 flip_mask =
  1898. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  1899. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  1900. u32 flip[2] = {
  1901. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
  1902. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
  1903. };
  1904. int pipe, ret = IRQ_NONE;
  1905. atomic_inc(&dev_priv->irq_received);
  1906. iir = I915_READ(IIR);
  1907. do {
  1908. bool irq_received = (iir & ~flip_mask) != 0;
  1909. bool blc_event = false;
  1910. /* Can't rely on pipestat interrupt bit in iir as it might
  1911. * have been cleared after the pipestat interrupt was received.
  1912. * It doesn't set the bit in iir again, but it still produces
  1913. * interrupts (for non-MSI).
  1914. */
  1915. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1916. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  1917. i915_handle_error(dev, false);
  1918. for_each_pipe(pipe) {
  1919. int reg = PIPESTAT(pipe);
  1920. pipe_stats[pipe] = I915_READ(reg);
  1921. /* Clear the PIPE*STAT regs before the IIR */
  1922. if (pipe_stats[pipe] & 0x8000ffff) {
  1923. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  1924. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  1925. pipe_name(pipe));
  1926. I915_WRITE(reg, pipe_stats[pipe]);
  1927. irq_received = true;
  1928. }
  1929. }
  1930. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1931. if (!irq_received)
  1932. break;
  1933. /* Consume port. Then clear IIR or we'll miss events */
  1934. if ((I915_HAS_HOTPLUG(dev)) &&
  1935. (iir & I915_DISPLAY_PORT_INTERRUPT)) {
  1936. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  1937. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  1938. hotplug_status);
  1939. if (hotplug_status & dev_priv->hotplug_supported_mask)
  1940. queue_work(dev_priv->wq,
  1941. &dev_priv->hotplug_work);
  1942. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  1943. POSTING_READ(PORT_HOTPLUG_STAT);
  1944. }
  1945. I915_WRITE(IIR, iir & ~flip_mask);
  1946. new_iir = I915_READ(IIR); /* Flush posted writes */
  1947. if (iir & I915_USER_INTERRUPT)
  1948. notify_ring(dev, &dev_priv->ring[RCS]);
  1949. for_each_pipe(pipe) {
  1950. int plane = pipe;
  1951. if (IS_MOBILE(dev))
  1952. plane = !plane;
  1953. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  1954. drm_handle_vblank(dev, pipe)) {
  1955. if (iir & flip[plane]) {
  1956. intel_prepare_page_flip(dev, plane);
  1957. intel_finish_page_flip(dev, pipe);
  1958. flip_mask &= ~flip[plane];
  1959. }
  1960. }
  1961. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  1962. blc_event = true;
  1963. }
  1964. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  1965. intel_opregion_asle_intr(dev);
  1966. /* With MSI, interrupts are only generated when iir
  1967. * transitions from zero to nonzero. If another bit got
  1968. * set while we were handling the existing iir bits, then
  1969. * we would never get another interrupt.
  1970. *
  1971. * This is fine on non-MSI as well, as if we hit this path
  1972. * we avoid exiting the interrupt handler only to generate
  1973. * another one.
  1974. *
  1975. * Note that for MSI this could cause a stray interrupt report
  1976. * if an interrupt landed in the time between writing IIR and
  1977. * the posting read. This should be rare enough to never
  1978. * trigger the 99% of 100,000 interrupts test for disabling
  1979. * stray interrupts.
  1980. */
  1981. ret = IRQ_HANDLED;
  1982. iir = new_iir;
  1983. } while (iir & ~flip_mask);
  1984. i915_update_dri1_breadcrumb(dev);
  1985. return ret;
  1986. }
  1987. static void i915_irq_uninstall(struct drm_device * dev)
  1988. {
  1989. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1990. int pipe;
  1991. if (I915_HAS_HOTPLUG(dev)) {
  1992. I915_WRITE(PORT_HOTPLUG_EN, 0);
  1993. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  1994. }
  1995. I915_WRITE16(HWSTAM, 0xffff);
  1996. for_each_pipe(pipe) {
  1997. /* Clear enable bits; then clear status bits */
  1998. I915_WRITE(PIPESTAT(pipe), 0);
  1999. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  2000. }
  2001. I915_WRITE(IMR, 0xffffffff);
  2002. I915_WRITE(IER, 0x0);
  2003. I915_WRITE(IIR, I915_READ(IIR));
  2004. }
  2005. static void i965_irq_preinstall(struct drm_device * dev)
  2006. {
  2007. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2008. int pipe;
  2009. atomic_set(&dev_priv->irq_received, 0);
  2010. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2011. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2012. I915_WRITE(HWSTAM, 0xeffe);
  2013. for_each_pipe(pipe)
  2014. I915_WRITE(PIPESTAT(pipe), 0);
  2015. I915_WRITE(IMR, 0xffffffff);
  2016. I915_WRITE(IER, 0x0);
  2017. POSTING_READ(IER);
  2018. }
  2019. static int i965_irq_postinstall(struct drm_device *dev)
  2020. {
  2021. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2022. u32 hotplug_en;
  2023. u32 enable_mask;
  2024. u32 error_mask;
  2025. /* Unmask the interrupts that we always want on. */
  2026. dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
  2027. I915_DISPLAY_PORT_INTERRUPT |
  2028. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2029. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2030. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2031. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2032. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2033. enable_mask = ~dev_priv->irq_mask;
  2034. enable_mask |= I915_USER_INTERRUPT;
  2035. if (IS_G4X(dev))
  2036. enable_mask |= I915_BSD_USER_INTERRUPT;
  2037. dev_priv->pipestat[0] = 0;
  2038. dev_priv->pipestat[1] = 0;
  2039. /*
  2040. * Enable some error detection, note the instruction error mask
  2041. * bit is reserved, so we leave it masked.
  2042. */
  2043. if (IS_G4X(dev)) {
  2044. error_mask = ~(GM45_ERROR_PAGE_TABLE |
  2045. GM45_ERROR_MEM_PRIV |
  2046. GM45_ERROR_CP_PRIV |
  2047. I915_ERROR_MEMORY_REFRESH);
  2048. } else {
  2049. error_mask = ~(I915_ERROR_PAGE_TABLE |
  2050. I915_ERROR_MEMORY_REFRESH);
  2051. }
  2052. I915_WRITE(EMR, error_mask);
  2053. I915_WRITE(IMR, dev_priv->irq_mask);
  2054. I915_WRITE(IER, enable_mask);
  2055. POSTING_READ(IER);
  2056. /* Note HDMI and DP share hotplug bits */
  2057. hotplug_en = 0;
  2058. if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
  2059. hotplug_en |= HDMIB_HOTPLUG_INT_EN;
  2060. if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
  2061. hotplug_en |= HDMIC_HOTPLUG_INT_EN;
  2062. if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
  2063. hotplug_en |= HDMID_HOTPLUG_INT_EN;
  2064. if (IS_G4X(dev)) {
  2065. if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
  2066. hotplug_en |= SDVOC_HOTPLUG_INT_EN;
  2067. if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
  2068. hotplug_en |= SDVOB_HOTPLUG_INT_EN;
  2069. } else {
  2070. if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
  2071. hotplug_en |= SDVOC_HOTPLUG_INT_EN;
  2072. if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
  2073. hotplug_en |= SDVOB_HOTPLUG_INT_EN;
  2074. }
  2075. if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
  2076. hotplug_en |= CRT_HOTPLUG_INT_EN;
  2077. /* Programming the CRT detection parameters tends
  2078. to generate a spurious hotplug event about three
  2079. seconds later. So just do it once.
  2080. */
  2081. if (IS_G4X(dev))
  2082. hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  2083. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  2084. }
  2085. /* Ignore TV since it's buggy */
  2086. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  2087. intel_opregion_enable_asle(dev);
  2088. return 0;
  2089. }
  2090. static irqreturn_t i965_irq_handler(int irq, void *arg)
  2091. {
  2092. struct drm_device *dev = (struct drm_device *) arg;
  2093. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2094. u32 iir, new_iir;
  2095. u32 pipe_stats[I915_MAX_PIPES];
  2096. unsigned long irqflags;
  2097. int irq_received;
  2098. int ret = IRQ_NONE, pipe;
  2099. atomic_inc(&dev_priv->irq_received);
  2100. iir = I915_READ(IIR);
  2101. for (;;) {
  2102. bool blc_event = false;
  2103. irq_received = iir != 0;
  2104. /* Can't rely on pipestat interrupt bit in iir as it might
  2105. * have been cleared after the pipestat interrupt was received.
  2106. * It doesn't set the bit in iir again, but it still produces
  2107. * interrupts (for non-MSI).
  2108. */
  2109. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2110. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2111. i915_handle_error(dev, false);
  2112. for_each_pipe(pipe) {
  2113. int reg = PIPESTAT(pipe);
  2114. pipe_stats[pipe] = I915_READ(reg);
  2115. /*
  2116. * Clear the PIPE*STAT regs before the IIR
  2117. */
  2118. if (pipe_stats[pipe] & 0x8000ffff) {
  2119. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2120. DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2121. pipe_name(pipe));
  2122. I915_WRITE(reg, pipe_stats[pipe]);
  2123. irq_received = 1;
  2124. }
  2125. }
  2126. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2127. if (!irq_received)
  2128. break;
  2129. ret = IRQ_HANDLED;
  2130. /* Consume port. Then clear IIR or we'll miss events */
  2131. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  2132. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2133. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2134. hotplug_status);
  2135. if (hotplug_status & dev_priv->hotplug_supported_mask)
  2136. queue_work(dev_priv->wq,
  2137. &dev_priv->hotplug_work);
  2138. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2139. I915_READ(PORT_HOTPLUG_STAT);
  2140. }
  2141. I915_WRITE(IIR, iir);
  2142. new_iir = I915_READ(IIR); /* Flush posted writes */
  2143. if (iir & I915_USER_INTERRUPT)
  2144. notify_ring(dev, &dev_priv->ring[RCS]);
  2145. if (iir & I915_BSD_USER_INTERRUPT)
  2146. notify_ring(dev, &dev_priv->ring[VCS]);
  2147. if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
  2148. intel_prepare_page_flip(dev, 0);
  2149. if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
  2150. intel_prepare_page_flip(dev, 1);
  2151. for_each_pipe(pipe) {
  2152. if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  2153. drm_handle_vblank(dev, pipe)) {
  2154. i915_pageflip_stall_check(dev, pipe);
  2155. intel_finish_page_flip(dev, pipe);
  2156. }
  2157. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  2158. blc_event = true;
  2159. }
  2160. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  2161. intel_opregion_asle_intr(dev);
  2162. /* With MSI, interrupts are only generated when iir
  2163. * transitions from zero to nonzero. If another bit got
  2164. * set while we were handling the existing iir bits, then
  2165. * we would never get another interrupt.
  2166. *
  2167. * This is fine on non-MSI as well, as if we hit this path
  2168. * we avoid exiting the interrupt handler only to generate
  2169. * another one.
  2170. *
  2171. * Note that for MSI this could cause a stray interrupt report
  2172. * if an interrupt landed in the time between writing IIR and
  2173. * the posting read. This should be rare enough to never
  2174. * trigger the 99% of 100,000 interrupts test for disabling
  2175. * stray interrupts.
  2176. */
  2177. iir = new_iir;
  2178. }
  2179. i915_update_dri1_breadcrumb(dev);
  2180. return ret;
  2181. }
  2182. static void i965_irq_uninstall(struct drm_device * dev)
  2183. {
  2184. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2185. int pipe;
  2186. if (!dev_priv)
  2187. return;
  2188. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2189. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2190. I915_WRITE(HWSTAM, 0xffffffff);
  2191. for_each_pipe(pipe)
  2192. I915_WRITE(PIPESTAT(pipe), 0);
  2193. I915_WRITE(IMR, 0xffffffff);
  2194. I915_WRITE(IER, 0x0);
  2195. for_each_pipe(pipe)
  2196. I915_WRITE(PIPESTAT(pipe),
  2197. I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
  2198. I915_WRITE(IIR, I915_READ(IIR));
  2199. }
  2200. void intel_irq_init(struct drm_device *dev)
  2201. {
  2202. struct drm_i915_private *dev_priv = dev->dev_private;
  2203. INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
  2204. INIT_WORK(&dev_priv->error_work, i915_error_work_func);
  2205. INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
  2206. INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
  2207. dev->driver->get_vblank_counter = i915_get_vblank_counter;
  2208. dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  2209. if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  2210. dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
  2211. dev->driver->get_vblank_counter = gm45_get_vblank_counter;
  2212. }
  2213. if (drm_core_check_feature(dev, DRIVER_MODESET))
  2214. dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
  2215. else
  2216. dev->driver->get_vblank_timestamp = NULL;
  2217. dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  2218. if (IS_VALLEYVIEW(dev)) {
  2219. dev->driver->irq_handler = valleyview_irq_handler;
  2220. dev->driver->irq_preinstall = valleyview_irq_preinstall;
  2221. dev->driver->irq_postinstall = valleyview_irq_postinstall;
  2222. dev->driver->irq_uninstall = valleyview_irq_uninstall;
  2223. dev->driver->enable_vblank = valleyview_enable_vblank;
  2224. dev->driver->disable_vblank = valleyview_disable_vblank;
  2225. } else if (IS_IVYBRIDGE(dev)) {
  2226. /* Share pre & uninstall handlers with ILK/SNB */
  2227. dev->driver->irq_handler = ivybridge_irq_handler;
  2228. dev->driver->irq_preinstall = ironlake_irq_preinstall;
  2229. dev->driver->irq_postinstall = ivybridge_irq_postinstall;
  2230. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  2231. dev->driver->enable_vblank = ivybridge_enable_vblank;
  2232. dev->driver->disable_vblank = ivybridge_disable_vblank;
  2233. } else if (IS_HASWELL(dev)) {
  2234. /* Share interrupts handling with IVB */
  2235. dev->driver->irq_handler = ivybridge_irq_handler;
  2236. dev->driver->irq_preinstall = ironlake_irq_preinstall;
  2237. dev->driver->irq_postinstall = ivybridge_irq_postinstall;
  2238. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  2239. dev->driver->enable_vblank = ivybridge_enable_vblank;
  2240. dev->driver->disable_vblank = ivybridge_disable_vblank;
  2241. } else if (HAS_PCH_SPLIT(dev)) {
  2242. dev->driver->irq_handler = ironlake_irq_handler;
  2243. dev->driver->irq_preinstall = ironlake_irq_preinstall;
  2244. dev->driver->irq_postinstall = ironlake_irq_postinstall;
  2245. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  2246. dev->driver->enable_vblank = ironlake_enable_vblank;
  2247. dev->driver->disable_vblank = ironlake_disable_vblank;
  2248. } else {
  2249. if (INTEL_INFO(dev)->gen == 2) {
  2250. dev->driver->irq_preinstall = i8xx_irq_preinstall;
  2251. dev->driver->irq_postinstall = i8xx_irq_postinstall;
  2252. dev->driver->irq_handler = i8xx_irq_handler;
  2253. dev->driver->irq_uninstall = i8xx_irq_uninstall;
  2254. } else if (INTEL_INFO(dev)->gen == 3) {
  2255. dev->driver->irq_preinstall = i915_irq_preinstall;
  2256. dev->driver->irq_postinstall = i915_irq_postinstall;
  2257. dev->driver->irq_uninstall = i915_irq_uninstall;
  2258. dev->driver->irq_handler = i915_irq_handler;
  2259. } else {
  2260. dev->driver->irq_preinstall = i965_irq_preinstall;
  2261. dev->driver->irq_postinstall = i965_irq_postinstall;
  2262. dev->driver->irq_uninstall = i965_irq_uninstall;
  2263. dev->driver->irq_handler = i965_irq_handler;
  2264. }
  2265. dev->driver->enable_vblank = i915_enable_vblank;
  2266. dev->driver->disable_vblank = i915_disable_vblank;
  2267. }
  2268. }