i915_irq.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #include "drmP.h"
  29. #include "drm.h"
  30. #include "i915_drm.h"
  31. #include "i915_drv.h"
  32. #include "intel_drv.h"
  33. #define MAX_NOPID ((u32)~0)
  34. /**
  35. * Interrupts that are always left unmasked.
  36. *
  37. * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
  38. * we leave them always unmasked in IMR and then control enabling them through
  39. * PIPESTAT alone.
  40. */
  41. #define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
  42. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
  43. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
  44. /** Interrupts that we mask and unmask at runtime. */
  45. #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
  46. #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
  47. PIPE_VBLANK_INTERRUPT_STATUS)
  48. #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
  49. PIPE_VBLANK_INTERRUPT_ENABLE)
  50. #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
  51. DRM_I915_VBLANK_PIPE_B)
  52. void
  53. igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
  54. {
  55. if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
  56. dev_priv->gt_irq_mask_reg &= ~mask;
  57. I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
  58. (void) I915_READ(GTIMR);
  59. }
  60. }
  61. static inline void
  62. igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
  63. {
  64. if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
  65. dev_priv->gt_irq_mask_reg |= mask;
  66. I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
  67. (void) I915_READ(GTIMR);
  68. }
  69. }
  70. /* For display hotplug interrupt */
  71. void
  72. igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  73. {
  74. if ((dev_priv->irq_mask_reg & mask) != 0) {
  75. dev_priv->irq_mask_reg &= ~mask;
  76. I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
  77. (void) I915_READ(DEIMR);
  78. }
  79. }
  80. static inline void
  81. igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  82. {
  83. if ((dev_priv->irq_mask_reg & mask) != mask) {
  84. dev_priv->irq_mask_reg |= mask;
  85. I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
  86. (void) I915_READ(DEIMR);
  87. }
  88. }
  89. void
  90. i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
  91. {
  92. if ((dev_priv->irq_mask_reg & mask) != 0) {
  93. dev_priv->irq_mask_reg &= ~mask;
  94. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  95. (void) I915_READ(IMR);
  96. }
  97. }
  98. static inline void
  99. i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
  100. {
  101. if ((dev_priv->irq_mask_reg & mask) != mask) {
  102. dev_priv->irq_mask_reg |= mask;
  103. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  104. (void) I915_READ(IMR);
  105. }
  106. }
  107. static inline u32
  108. i915_pipestat(int pipe)
  109. {
  110. if (pipe == 0)
  111. return PIPEASTAT;
  112. if (pipe == 1)
  113. return PIPEBSTAT;
  114. BUG();
  115. }
  116. void
  117. i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  118. {
  119. if ((dev_priv->pipestat[pipe] & mask) != mask) {
  120. u32 reg = i915_pipestat(pipe);
  121. dev_priv->pipestat[pipe] |= mask;
  122. /* Enable the interrupt, clear any pending status */
  123. I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
  124. (void) I915_READ(reg);
  125. }
  126. }
  127. void
  128. i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  129. {
  130. if ((dev_priv->pipestat[pipe] & mask) != 0) {
  131. u32 reg = i915_pipestat(pipe);
  132. dev_priv->pipestat[pipe] &= ~mask;
  133. I915_WRITE(reg, dev_priv->pipestat[pipe]);
  134. (void) I915_READ(reg);
  135. }
  136. }
  137. /**
  138. * i915_pipe_enabled - check if a pipe is enabled
  139. * @dev: DRM device
  140. * @pipe: pipe to check
  141. *
  142. * Reading certain registers when the pipe is disabled can hang the chip.
  143. * Use this routine to make sure the PLL is running and the pipe is active
  144. * before reading such registers if unsure.
  145. */
  146. static int
  147. i915_pipe_enabled(struct drm_device *dev, int pipe)
  148. {
  149. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  150. unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
  151. if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
  152. return 1;
  153. return 0;
  154. }
  155. /* Called from drm generic code, passed a 'crtc', which
  156. * we use as a pipe index
  157. */
  158. u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
  159. {
  160. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  161. unsigned long high_frame;
  162. unsigned long low_frame;
  163. u32 high1, high2, low, count;
  164. high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
  165. low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
  166. if (!i915_pipe_enabled(dev, pipe)) {
  167. DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
  168. return 0;
  169. }
  170. /*
  171. * High & low register fields aren't synchronized, so make sure
  172. * we get a low value that's stable across two reads of the high
  173. * register.
  174. */
  175. do {
  176. high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
  177. PIPE_FRAME_HIGH_SHIFT);
  178. low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
  179. PIPE_FRAME_LOW_SHIFT);
  180. high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
  181. PIPE_FRAME_HIGH_SHIFT);
  182. } while (high1 != high2);
  183. count = (high1 << 8) | low;
  184. return count;
  185. }
  186. u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
  187. {
  188. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  189. int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
  190. if (!i915_pipe_enabled(dev, pipe)) {
  191. DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
  192. return 0;
  193. }
  194. return I915_READ(reg);
  195. }
  196. /*
  197. * Handle hotplug events outside the interrupt handler proper.
  198. */
  199. static void i915_hotplug_work_func(struct work_struct *work)
  200. {
  201. drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  202. hotplug_work);
  203. struct drm_device *dev = dev_priv->dev;
  204. struct drm_mode_config *mode_config = &dev->mode_config;
  205. struct drm_connector *connector;
  206. if (mode_config->num_connector) {
  207. list_for_each_entry(connector, &mode_config->connector_list, head) {
  208. struct intel_output *intel_output = to_intel_output(connector);
  209. if (intel_output->hot_plug)
  210. (*intel_output->hot_plug) (intel_output);
  211. }
  212. }
  213. /* Just fire off a uevent and let userspace tell us what to do */
  214. drm_sysfs_hotplug_event(dev);
  215. }
  216. irqreturn_t igdng_irq_handler(struct drm_device *dev)
  217. {
  218. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  219. int ret = IRQ_NONE;
  220. u32 de_iir, gt_iir;
  221. u32 new_de_iir, new_gt_iir;
  222. struct drm_i915_master_private *master_priv;
  223. de_iir = I915_READ(DEIIR);
  224. gt_iir = I915_READ(GTIIR);
  225. for (;;) {
  226. if (de_iir == 0 && gt_iir == 0)
  227. break;
  228. ret = IRQ_HANDLED;
  229. I915_WRITE(DEIIR, de_iir);
  230. new_de_iir = I915_READ(DEIIR);
  231. I915_WRITE(GTIIR, gt_iir);
  232. new_gt_iir = I915_READ(GTIIR);
  233. if (dev->primary->master) {
  234. master_priv = dev->primary->master->driver_priv;
  235. if (master_priv->sarea_priv)
  236. master_priv->sarea_priv->last_dispatch =
  237. READ_BREADCRUMB(dev_priv);
  238. }
  239. if (gt_iir & GT_USER_INTERRUPT) {
  240. dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
  241. DRM_WAKEUP(&dev_priv->irq_queue);
  242. }
  243. de_iir = new_de_iir;
  244. gt_iir = new_gt_iir;
  245. }
  246. return ret;
  247. }
  248. irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
  249. {
  250. struct drm_device *dev = (struct drm_device *) arg;
  251. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  252. struct drm_i915_master_private *master_priv;
  253. u32 iir, new_iir;
  254. u32 pipea_stats, pipeb_stats;
  255. u32 vblank_status;
  256. u32 vblank_enable;
  257. int vblank = 0;
  258. unsigned long irqflags;
  259. int irq_received;
  260. int ret = IRQ_NONE;
  261. atomic_inc(&dev_priv->irq_received);
  262. if (IS_IGDNG(dev))
  263. return igdng_irq_handler(dev);
  264. iir = I915_READ(IIR);
  265. if (IS_I965G(dev)) {
  266. vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
  267. vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
  268. } else {
  269. vblank_status = I915_VBLANK_INTERRUPT_STATUS;
  270. vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
  271. }
  272. for (;;) {
  273. irq_received = iir != 0;
  274. /* Can't rely on pipestat interrupt bit in iir as it might
  275. * have been cleared after the pipestat interrupt was received.
  276. * It doesn't set the bit in iir again, but it still produces
  277. * interrupts (for non-MSI).
  278. */
  279. spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
  280. pipea_stats = I915_READ(PIPEASTAT);
  281. pipeb_stats = I915_READ(PIPEBSTAT);
  282. /*
  283. * Clear the PIPE(A|B)STAT regs before the IIR
  284. */
  285. if (pipea_stats & 0x8000ffff) {
  286. I915_WRITE(PIPEASTAT, pipea_stats);
  287. irq_received = 1;
  288. }
  289. if (pipeb_stats & 0x8000ffff) {
  290. I915_WRITE(PIPEBSTAT, pipeb_stats);
  291. irq_received = 1;
  292. }
  293. spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
  294. if (!irq_received)
  295. break;
  296. ret = IRQ_HANDLED;
  297. /* Consume port. Then clear IIR or we'll miss events */
  298. if ((I915_HAS_HOTPLUG(dev)) &&
  299. (iir & I915_DISPLAY_PORT_INTERRUPT)) {
  300. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  301. DRM_DEBUG("hotplug event received, stat 0x%08x\n",
  302. hotplug_status);
  303. if (hotplug_status & dev_priv->hotplug_supported_mask)
  304. schedule_work(&dev_priv->hotplug_work);
  305. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  306. I915_READ(PORT_HOTPLUG_STAT);
  307. }
  308. I915_WRITE(IIR, iir);
  309. new_iir = I915_READ(IIR); /* Flush posted writes */
  310. if (dev->primary->master) {
  311. master_priv = dev->primary->master->driver_priv;
  312. if (master_priv->sarea_priv)
  313. master_priv->sarea_priv->last_dispatch =
  314. READ_BREADCRUMB(dev_priv);
  315. }
  316. if (iir & I915_USER_INTERRUPT) {
  317. dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
  318. DRM_WAKEUP(&dev_priv->irq_queue);
  319. }
  320. if (pipea_stats & vblank_status) {
  321. vblank++;
  322. drm_handle_vblank(dev, 0);
  323. }
  324. if (pipeb_stats & vblank_status) {
  325. vblank++;
  326. drm_handle_vblank(dev, 1);
  327. }
  328. if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
  329. (iir & I915_ASLE_INTERRUPT))
  330. opregion_asle_intr(dev);
  331. /* With MSI, interrupts are only generated when iir
  332. * transitions from zero to nonzero. If another bit got
  333. * set while we were handling the existing iir bits, then
  334. * we would never get another interrupt.
  335. *
  336. * This is fine on non-MSI as well, as if we hit this path
  337. * we avoid exiting the interrupt handler only to generate
  338. * another one.
  339. *
  340. * Note that for MSI this could cause a stray interrupt report
  341. * if an interrupt landed in the time between writing IIR and
  342. * the posting read. This should be rare enough to never
  343. * trigger the 99% of 100,000 interrupts test for disabling
  344. * stray interrupts.
  345. */
  346. iir = new_iir;
  347. }
  348. return ret;
  349. }
  350. static int i915_emit_irq(struct drm_device * dev)
  351. {
  352. drm_i915_private_t *dev_priv = dev->dev_private;
  353. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  354. RING_LOCALS;
  355. i915_kernel_lost_context(dev);
  356. DRM_DEBUG("\n");
  357. dev_priv->counter++;
  358. if (dev_priv->counter > 0x7FFFFFFFUL)
  359. dev_priv->counter = 1;
  360. if (master_priv->sarea_priv)
  361. master_priv->sarea_priv->last_enqueue = dev_priv->counter;
  362. BEGIN_LP_RING(4);
  363. OUT_RING(MI_STORE_DWORD_INDEX);
  364. OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  365. OUT_RING(dev_priv->counter);
  366. OUT_RING(MI_USER_INTERRUPT);
  367. ADVANCE_LP_RING();
  368. return dev_priv->counter;
  369. }
  370. void i915_user_irq_get(struct drm_device *dev)
  371. {
  372. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  373. unsigned long irqflags;
  374. spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
  375. if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
  376. if (IS_IGDNG(dev))
  377. igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
  378. else
  379. i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
  380. }
  381. spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
  382. }
  383. void i915_user_irq_put(struct drm_device *dev)
  384. {
  385. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  386. unsigned long irqflags;
  387. spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
  388. BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
  389. if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
  390. if (IS_IGDNG(dev))
  391. igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
  392. else
  393. i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
  394. }
  395. spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
  396. }
  397. static int i915_wait_irq(struct drm_device * dev, int irq_nr)
  398. {
  399. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  400. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  401. int ret = 0;
  402. DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
  403. READ_BREADCRUMB(dev_priv));
  404. if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
  405. if (master_priv->sarea_priv)
  406. master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  407. return 0;
  408. }
  409. if (master_priv->sarea_priv)
  410. master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  411. i915_user_irq_get(dev);
  412. DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
  413. READ_BREADCRUMB(dev_priv) >= irq_nr);
  414. i915_user_irq_put(dev);
  415. if (ret == -EBUSY) {
  416. DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
  417. READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
  418. }
  419. return ret;
  420. }
  421. /* Needs the lock as it touches the ring.
  422. */
  423. int i915_irq_emit(struct drm_device *dev, void *data,
  424. struct drm_file *file_priv)
  425. {
  426. drm_i915_private_t *dev_priv = dev->dev_private;
  427. drm_i915_irq_emit_t *emit = data;
  428. int result;
  429. if (!dev_priv || !dev_priv->ring.virtual_start) {
  430. DRM_ERROR("called with no initialization\n");
  431. return -EINVAL;
  432. }
  433. RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  434. mutex_lock(&dev->struct_mutex);
  435. result = i915_emit_irq(dev);
  436. mutex_unlock(&dev->struct_mutex);
  437. if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
  438. DRM_ERROR("copy_to_user\n");
  439. return -EFAULT;
  440. }
  441. return 0;
  442. }
  443. /* Doesn't need the hardware lock.
  444. */
  445. int i915_irq_wait(struct drm_device *dev, void *data,
  446. struct drm_file *file_priv)
  447. {
  448. drm_i915_private_t *dev_priv = dev->dev_private;
  449. drm_i915_irq_wait_t *irqwait = data;
  450. if (!dev_priv) {
  451. DRM_ERROR("called with no initialization\n");
  452. return -EINVAL;
  453. }
  454. return i915_wait_irq(dev, irqwait->irq_seq);
  455. }
  456. /* Called from drm generic code, passed 'crtc' which
  457. * we use as a pipe index
  458. */
  459. int i915_enable_vblank(struct drm_device *dev, int pipe)
  460. {
  461. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  462. unsigned long irqflags;
  463. int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
  464. u32 pipeconf;
  465. pipeconf = I915_READ(pipeconf_reg);
  466. if (!(pipeconf & PIPEACONF_ENABLE))
  467. return -EINVAL;
  468. if (IS_IGDNG(dev))
  469. return 0;
  470. spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
  471. if (IS_I965G(dev))
  472. i915_enable_pipestat(dev_priv, pipe,
  473. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  474. else
  475. i915_enable_pipestat(dev_priv, pipe,
  476. PIPE_VBLANK_INTERRUPT_ENABLE);
  477. spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
  478. return 0;
  479. }
  480. /* Called from drm generic code, passed 'crtc' which
  481. * we use as a pipe index
  482. */
  483. void i915_disable_vblank(struct drm_device *dev, int pipe)
  484. {
  485. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  486. unsigned long irqflags;
  487. if (IS_IGDNG(dev))
  488. return;
  489. spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
  490. i915_disable_pipestat(dev_priv, pipe,
  491. PIPE_VBLANK_INTERRUPT_ENABLE |
  492. PIPE_START_VBLANK_INTERRUPT_ENABLE);
  493. spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
  494. }
  495. void i915_enable_interrupt (struct drm_device *dev)
  496. {
  497. struct drm_i915_private *dev_priv = dev->dev_private;
  498. if (!IS_IGDNG(dev))
  499. opregion_enable_asle(dev);
  500. dev_priv->irq_enabled = 1;
  501. }
  502. /* Set the vblank monitor pipe
  503. */
  504. int i915_vblank_pipe_set(struct drm_device *dev, void *data,
  505. struct drm_file *file_priv)
  506. {
  507. drm_i915_private_t *dev_priv = dev->dev_private;
  508. if (!dev_priv) {
  509. DRM_ERROR("called with no initialization\n");
  510. return -EINVAL;
  511. }
  512. return 0;
  513. }
  514. int i915_vblank_pipe_get(struct drm_device *dev, void *data,
  515. struct drm_file *file_priv)
  516. {
  517. drm_i915_private_t *dev_priv = dev->dev_private;
  518. drm_i915_vblank_pipe_t *pipe = data;
  519. if (!dev_priv) {
  520. DRM_ERROR("called with no initialization\n");
  521. return -EINVAL;
  522. }
  523. pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  524. return 0;
  525. }
  526. /**
  527. * Schedule buffer swap at given vertical blank.
  528. */
  529. int i915_vblank_swap(struct drm_device *dev, void *data,
  530. struct drm_file *file_priv)
  531. {
  532. /* The delayed swap mechanism was fundamentally racy, and has been
  533. * removed. The model was that the client requested a delayed flip/swap
  534. * from the kernel, then waited for vblank before continuing to perform
  535. * rendering. The problem was that the kernel might wake the client
  536. * up before it dispatched the vblank swap (since the lock has to be
  537. * held while touching the ringbuffer), in which case the client would
  538. * clear and start the next frame before the swap occurred, and
  539. * flicker would occur in addition to likely missing the vblank.
  540. *
  541. * In the absence of this ioctl, userland falls back to a correct path
  542. * of waiting for a vblank, then dispatching the swap on its own.
  543. * Context switching to userland and back is plenty fast enough for
  544. * meeting the requirements of vblank swapping.
  545. */
  546. return -EINVAL;
  547. }
  548. /* drm_dma.h hooks
  549. */
  550. static void igdng_irq_preinstall(struct drm_device *dev)
  551. {
  552. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  553. I915_WRITE(HWSTAM, 0xeffe);
  554. /* XXX hotplug from PCH */
  555. I915_WRITE(DEIMR, 0xffffffff);
  556. I915_WRITE(DEIER, 0x0);
  557. (void) I915_READ(DEIER);
  558. /* and GT */
  559. I915_WRITE(GTIMR, 0xffffffff);
  560. I915_WRITE(GTIER, 0x0);
  561. (void) I915_READ(GTIER);
  562. }
  563. static int igdng_irq_postinstall(struct drm_device *dev)
  564. {
  565. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  566. /* enable kind of interrupts always enabled */
  567. u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
  568. u32 render_mask = GT_USER_INTERRUPT;
  569. dev_priv->irq_mask_reg = ~display_mask;
  570. dev_priv->de_irq_enable_reg = display_mask;
  571. /* should always can generate irq */
  572. I915_WRITE(DEIIR, I915_READ(DEIIR));
  573. I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
  574. I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
  575. (void) I915_READ(DEIER);
  576. /* user interrupt should be enabled, but masked initial */
  577. dev_priv->gt_irq_mask_reg = 0xffffffff;
  578. dev_priv->gt_irq_enable_reg = render_mask;
  579. I915_WRITE(GTIIR, I915_READ(GTIIR));
  580. I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
  581. I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
  582. (void) I915_READ(GTIER);
  583. return 0;
  584. }
  585. void i915_driver_irq_preinstall(struct drm_device * dev)
  586. {
  587. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  588. atomic_set(&dev_priv->irq_received, 0);
  589. INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
  590. if (IS_IGDNG(dev)) {
  591. igdng_irq_preinstall(dev);
  592. return;
  593. }
  594. if (I915_HAS_HOTPLUG(dev)) {
  595. I915_WRITE(PORT_HOTPLUG_EN, 0);
  596. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  597. }
  598. I915_WRITE(HWSTAM, 0xeffe);
  599. I915_WRITE(PIPEASTAT, 0);
  600. I915_WRITE(PIPEBSTAT, 0);
  601. I915_WRITE(IMR, 0xffffffff);
  602. I915_WRITE(IER, 0x0);
  603. (void) I915_READ(IER);
  604. }
  605. int i915_driver_irq_postinstall(struct drm_device *dev)
  606. {
  607. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  608. u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
  609. DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
  610. dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  611. if (IS_IGDNG(dev))
  612. return igdng_irq_postinstall(dev);
  613. /* Unmask the interrupts that we always want on. */
  614. dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
  615. dev_priv->pipestat[0] = 0;
  616. dev_priv->pipestat[1] = 0;
  617. if (I915_HAS_HOTPLUG(dev)) {
  618. u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  619. /* Leave other bits alone */
  620. hotplug_en |= HOTPLUG_EN_MASK;
  621. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  622. dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
  623. TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
  624. SDVOB_HOTPLUG_INT_STATUS;
  625. if (IS_G4X(dev)) {
  626. dev_priv->hotplug_supported_mask |=
  627. HDMIB_HOTPLUG_INT_STATUS |
  628. HDMIC_HOTPLUG_INT_STATUS |
  629. HDMID_HOTPLUG_INT_STATUS;
  630. }
  631. /* Enable in IER... */
  632. enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  633. /* and unmask in IMR */
  634. i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT);
  635. }
  636. /* Disable pipe interrupt enables, clear pending pipe status */
  637. I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
  638. I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
  639. /* Clear pending interrupt status */
  640. I915_WRITE(IIR, I915_READ(IIR));
  641. I915_WRITE(IER, enable_mask);
  642. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  643. (void) I915_READ(IER);
  644. opregion_enable_asle(dev);
  645. return 0;
  646. }
  647. static void igdng_irq_uninstall(struct drm_device *dev)
  648. {
  649. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  650. I915_WRITE(HWSTAM, 0xffffffff);
  651. I915_WRITE(DEIMR, 0xffffffff);
  652. I915_WRITE(DEIER, 0x0);
  653. I915_WRITE(DEIIR, I915_READ(DEIIR));
  654. I915_WRITE(GTIMR, 0xffffffff);
  655. I915_WRITE(GTIER, 0x0);
  656. I915_WRITE(GTIIR, I915_READ(GTIIR));
  657. }
  658. void i915_driver_irq_uninstall(struct drm_device * dev)
  659. {
  660. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  661. if (!dev_priv)
  662. return;
  663. dev_priv->vblank_pipe = 0;
  664. if (IS_IGDNG(dev)) {
  665. igdng_irq_uninstall(dev);
  666. return;
  667. }
  668. if (I915_HAS_HOTPLUG(dev)) {
  669. I915_WRITE(PORT_HOTPLUG_EN, 0);
  670. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  671. }
  672. I915_WRITE(HWSTAM, 0xffffffff);
  673. I915_WRITE(PIPEASTAT, 0);
  674. I915_WRITE(PIPEBSTAT, 0);
  675. I915_WRITE(IMR, 0xffffffff);
  676. I915_WRITE(IER, 0x0);
  677. I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
  678. I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
  679. I915_WRITE(IIR, I915_READ(IIR));
  680. }