i915_irq.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #include "drmP.h"
  29. #include "drm.h"
  30. #include "i915_drm.h"
  31. #include "i915_drv.h"
  32. #define USER_INT_FLAG (1<<1)
  33. #define VSYNC_PIPEB_FLAG (1<<5)
  34. #define VSYNC_PIPEA_FLAG (1<<7)
  35. #define MAX_NOPID ((u32)~0)
  36. /**
  37. * Emit blits for scheduled buffer swaps.
  38. *
  39. * This function will be called with the HW lock held.
  40. */
  41. static void i915_vblank_tasklet(struct drm_device *dev)
  42. {
  43. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  44. unsigned long irqflags;
  45. struct list_head *list, *tmp, hits, *hit;
  46. int nhits, nrects, slice[2], upper[2], lower[2], i;
  47. unsigned counter[2] = { atomic_read(&dev->vbl_received),
  48. atomic_read(&dev->vbl_received2) };
  49. struct drm_drawable_info *drw;
  50. drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
  51. u32 cpp = dev_priv->cpp;
  52. u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
  53. XY_SRC_COPY_BLT_WRITE_ALPHA |
  54. XY_SRC_COPY_BLT_WRITE_RGB)
  55. : XY_SRC_COPY_BLT_CMD;
  56. u32 pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) |
  57. (cpp << 23) | (1 << 24);
  58. RING_LOCALS;
  59. DRM_DEBUG("\n");
  60. INIT_LIST_HEAD(&hits);
  61. nhits = nrects = 0;
  62. spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
  63. /* Find buffer swaps scheduled for this vertical blank */
  64. list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
  65. drm_i915_vbl_swap_t *vbl_swap =
  66. list_entry(list, drm_i915_vbl_swap_t, head);
  67. if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
  68. continue;
  69. list_del(list);
  70. dev_priv->swaps_pending--;
  71. spin_unlock(&dev_priv->swaps_lock);
  72. spin_lock(&dev->drw_lock);
  73. drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
  74. if (!drw) {
  75. spin_unlock(&dev->drw_lock);
  76. drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
  77. spin_lock(&dev_priv->swaps_lock);
  78. continue;
  79. }
  80. list_for_each(hit, &hits) {
  81. drm_i915_vbl_swap_t *swap_cmp =
  82. list_entry(hit, drm_i915_vbl_swap_t, head);
  83. struct drm_drawable_info *drw_cmp =
  84. drm_get_drawable_info(dev, swap_cmp->drw_id);
  85. if (drw_cmp &&
  86. drw_cmp->rects[0].y1 > drw->rects[0].y1) {
  87. list_add_tail(list, hit);
  88. break;
  89. }
  90. }
  91. spin_unlock(&dev->drw_lock);
  92. /* List of hits was empty, or we reached the end of it */
  93. if (hit == &hits)
  94. list_add_tail(list, hits.prev);
  95. nhits++;
  96. spin_lock(&dev_priv->swaps_lock);
  97. }
  98. if (nhits == 0) {
  99. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  100. return;
  101. }
  102. spin_unlock(&dev_priv->swaps_lock);
  103. i915_kernel_lost_context(dev);
  104. if (IS_I965G(dev)) {
  105. BEGIN_LP_RING(4);
  106. OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
  107. OUT_RING(0);
  108. OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
  109. OUT_RING(0);
  110. ADVANCE_LP_RING();
  111. } else {
  112. BEGIN_LP_RING(6);
  113. OUT_RING(GFX_OP_DRAWRECT_INFO);
  114. OUT_RING(0);
  115. OUT_RING(0);
  116. OUT_RING(sarea_priv->width | sarea_priv->height << 16);
  117. OUT_RING(sarea_priv->width | sarea_priv->height << 16);
  118. OUT_RING(0);
  119. ADVANCE_LP_RING();
  120. }
  121. sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
  122. upper[0] = upper[1] = 0;
  123. slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
  124. slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
  125. lower[0] = sarea_priv->pipeA_y + slice[0];
  126. lower[1] = sarea_priv->pipeB_y + slice[0];
  127. spin_lock(&dev->drw_lock);
  128. /* Emit blits for buffer swaps, partitioning both outputs into as many
  129. * slices as there are buffer swaps scheduled in order to avoid tearing
  130. * (based on the assumption that a single buffer swap would always
  131. * complete before scanout starts).
  132. */
  133. for (i = 0; i++ < nhits;
  134. upper[0] = lower[0], lower[0] += slice[0],
  135. upper[1] = lower[1], lower[1] += slice[1]) {
  136. if (i == nhits)
  137. lower[0] = lower[1] = sarea_priv->height;
  138. list_for_each(hit, &hits) {
  139. drm_i915_vbl_swap_t *swap_hit =
  140. list_entry(hit, drm_i915_vbl_swap_t, head);
  141. struct drm_clip_rect *rect;
  142. int num_rects, pipe;
  143. unsigned short top, bottom;
  144. drw = drm_get_drawable_info(dev, swap_hit->drw_id);
  145. if (!drw)
  146. continue;
  147. rect = drw->rects;
  148. pipe = swap_hit->pipe;
  149. top = upper[pipe];
  150. bottom = lower[pipe];
  151. for (num_rects = drw->num_rects; num_rects--; rect++) {
  152. int y1 = max(rect->y1, top);
  153. int y2 = min(rect->y2, bottom);
  154. if (y1 >= y2)
  155. continue;
  156. BEGIN_LP_RING(8);
  157. OUT_RING(cmd);
  158. OUT_RING(pitchropcpp);
  159. OUT_RING((y1 << 16) | rect->x1);
  160. OUT_RING((y2 << 16) | rect->x2);
  161. OUT_RING(sarea_priv->front_offset);
  162. OUT_RING((y1 << 16) | rect->x1);
  163. OUT_RING(pitchropcpp & 0xffff);
  164. OUT_RING(sarea_priv->back_offset);
  165. ADVANCE_LP_RING();
  166. }
  167. }
  168. }
  169. spin_unlock_irqrestore(&dev->drw_lock, irqflags);
  170. list_for_each_safe(hit, tmp, &hits) {
  171. drm_i915_vbl_swap_t *swap_hit =
  172. list_entry(hit, drm_i915_vbl_swap_t, head);
  173. list_del(hit);
  174. drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
  175. }
  176. }
  177. irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
  178. {
  179. struct drm_device *dev = (struct drm_device *) arg;
  180. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  181. u16 temp;
  182. u32 pipea_stats, pipeb_stats;
  183. pipea_stats = I915_READ(I915REG_PIPEASTAT);
  184. pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
  185. temp = I915_READ16(I915REG_INT_IDENTITY_R);
  186. temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
  187. DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
  188. if (temp == 0)
  189. return IRQ_NONE;
  190. I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
  191. (void) I915_READ16(I915REG_INT_IDENTITY_R);
  192. DRM_READMEMORYBARRIER();
  193. dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  194. if (temp & USER_INT_FLAG)
  195. DRM_WAKEUP(&dev_priv->irq_queue);
  196. if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
  197. int vblank_pipe = dev_priv->vblank_pipe;
  198. if ((vblank_pipe &
  199. (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
  200. == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
  201. if (temp & VSYNC_PIPEA_FLAG)
  202. atomic_inc(&dev->vbl_received);
  203. if (temp & VSYNC_PIPEB_FLAG)
  204. atomic_inc(&dev->vbl_received2);
  205. } else if (((temp & VSYNC_PIPEA_FLAG) &&
  206. (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
  207. ((temp & VSYNC_PIPEB_FLAG) &&
  208. (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
  209. atomic_inc(&dev->vbl_received);
  210. DRM_WAKEUP(&dev->vbl_queue);
  211. drm_vbl_send_signals(dev);
  212. if (dev_priv->swaps_pending > 0)
  213. drm_locked_tasklet(dev, i915_vblank_tasklet);
  214. I915_WRITE(I915REG_PIPEASTAT,
  215. pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
  216. I915_VBLANK_CLEAR);
  217. I915_WRITE(I915REG_PIPEBSTAT,
  218. pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
  219. I915_VBLANK_CLEAR);
  220. }
  221. return IRQ_HANDLED;
  222. }
  223. static int i915_emit_irq(struct drm_device * dev)
  224. {
  225. drm_i915_private_t *dev_priv = dev->dev_private;
  226. RING_LOCALS;
  227. i915_kernel_lost_context(dev);
  228. DRM_DEBUG("\n");
  229. dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
  230. if (dev_priv->counter > 0x7FFFFFFFUL)
  231. dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
  232. BEGIN_LP_RING(6);
  233. OUT_RING(CMD_STORE_DWORD_IDX);
  234. OUT_RING(20);
  235. OUT_RING(dev_priv->counter);
  236. OUT_RING(0);
  237. OUT_RING(0);
  238. OUT_RING(GFX_OP_USER_INTERRUPT);
  239. ADVANCE_LP_RING();
  240. return dev_priv->counter;
  241. }
  242. static int i915_wait_irq(struct drm_device * dev, int irq_nr)
  243. {
  244. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  245. int ret = 0;
  246. DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
  247. READ_BREADCRUMB(dev_priv));
  248. if (READ_BREADCRUMB(dev_priv) >= irq_nr)
  249. return 0;
  250. dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  251. DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
  252. READ_BREADCRUMB(dev_priv) >= irq_nr);
  253. if (ret == -EBUSY) {
  254. DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
  255. READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
  256. }
  257. dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  258. return ret;
  259. }
  260. static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
  261. atomic_t *counter)
  262. {
  263. drm_i915_private_t *dev_priv = dev->dev_private;
  264. unsigned int cur_vblank;
  265. int ret = 0;
  266. if (!dev_priv) {
  267. DRM_ERROR("called with no initialization\n");
  268. return -EINVAL;
  269. }
  270. DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
  271. (((cur_vblank = atomic_read(counter))
  272. - *sequence) <= (1<<23)));
  273. *sequence = cur_vblank;
  274. return ret;
  275. }
  276. int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
  277. {
  278. return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
  279. }
  280. int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
  281. {
  282. return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
  283. }
  284. /* Needs the lock as it touches the ring.
  285. */
  286. int i915_irq_emit(struct drm_device *dev, void *data,
  287. struct drm_file *file_priv)
  288. {
  289. drm_i915_private_t *dev_priv = dev->dev_private;
  290. drm_i915_irq_emit_t *emit = data;
  291. int result;
  292. LOCK_TEST_WITH_RETURN(dev, file_priv);
  293. if (!dev_priv) {
  294. DRM_ERROR("called with no initialization\n");
  295. return -EINVAL;
  296. }
  297. result = i915_emit_irq(dev);
  298. if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
  299. DRM_ERROR("copy_to_user\n");
  300. return -EFAULT;
  301. }
  302. return 0;
  303. }
  304. /* Doesn't need the hardware lock.
  305. */
  306. int i915_irq_wait(struct drm_device *dev, void *data,
  307. struct drm_file *file_priv)
  308. {
  309. drm_i915_private_t *dev_priv = dev->dev_private;
  310. drm_i915_irq_wait_t *irqwait = data;
  311. if (!dev_priv) {
  312. DRM_ERROR("called with no initialization\n");
  313. return -EINVAL;
  314. }
  315. return i915_wait_irq(dev, irqwait->irq_seq);
  316. }
  317. static void i915_enable_interrupt (struct drm_device *dev)
  318. {
  319. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  320. u16 flag;
  321. flag = 0;
  322. if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
  323. flag |= VSYNC_PIPEA_FLAG;
  324. if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
  325. flag |= VSYNC_PIPEB_FLAG;
  326. I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
  327. }
  328. /* Set the vblank monitor pipe
  329. */
  330. int i915_vblank_pipe_set(struct drm_device *dev, void *data,
  331. struct drm_file *file_priv)
  332. {
  333. drm_i915_private_t *dev_priv = dev->dev_private;
  334. drm_i915_vblank_pipe_t *pipe = data;
  335. if (!dev_priv) {
  336. DRM_ERROR("called with no initialization\n");
  337. return -EINVAL;
  338. }
  339. if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
  340. DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
  341. return -EINVAL;
  342. }
  343. dev_priv->vblank_pipe = pipe->pipe;
  344. i915_enable_interrupt (dev);
  345. return 0;
  346. }
  347. int i915_vblank_pipe_get(struct drm_device *dev, void *data,
  348. struct drm_file *file_priv)
  349. {
  350. drm_i915_private_t *dev_priv = dev->dev_private;
  351. drm_i915_vblank_pipe_t *pipe = data;
  352. u16 flag;
  353. if (!dev_priv) {
  354. DRM_ERROR("called with no initialization\n");
  355. return -EINVAL;
  356. }
  357. flag = I915_READ(I915REG_INT_ENABLE_R);
  358. pipe->pipe = 0;
  359. if (flag & VSYNC_PIPEA_FLAG)
  360. pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
  361. if (flag & VSYNC_PIPEB_FLAG)
  362. pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
  363. return 0;
  364. }
  365. /**
  366. * Schedule buffer swap at given vertical blank.
  367. */
  368. int i915_vblank_swap(struct drm_device *dev, void *data,
  369. struct drm_file *file_priv)
  370. {
  371. drm_i915_private_t *dev_priv = dev->dev_private;
  372. drm_i915_vblank_swap_t *swap = data;
  373. drm_i915_vbl_swap_t *vbl_swap;
  374. unsigned int pipe, seqtype, curseq;
  375. unsigned long irqflags;
  376. struct list_head *list;
  377. if (!dev_priv) {
  378. DRM_ERROR("%s called with no initialization\n", __func__);
  379. return -EINVAL;
  380. }
  381. if (dev_priv->sarea_priv->rotation) {
  382. DRM_DEBUG("Rotation not supported\n");
  383. return -EINVAL;
  384. }
  385. if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
  386. _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
  387. DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
  388. return -EINVAL;
  389. }
  390. pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
  391. seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
  392. if (!(dev_priv->vblank_pipe & (1 << pipe))) {
  393. DRM_ERROR("Invalid pipe %d\n", pipe);
  394. return -EINVAL;
  395. }
  396. spin_lock_irqsave(&dev->drw_lock, irqflags);
  397. if (!drm_get_drawable_info(dev, swap->drawable)) {
  398. spin_unlock_irqrestore(&dev->drw_lock, irqflags);
  399. DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
  400. return -EINVAL;
  401. }
  402. spin_unlock_irqrestore(&dev->drw_lock, irqflags);
  403. curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
  404. if (seqtype == _DRM_VBLANK_RELATIVE)
  405. swap->sequence += curseq;
  406. if ((curseq - swap->sequence) <= (1<<23)) {
  407. if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
  408. swap->sequence = curseq + 1;
  409. } else {
  410. DRM_DEBUG("Missed target sequence\n");
  411. return -EINVAL;
  412. }
  413. }
  414. spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
  415. list_for_each(list, &dev_priv->vbl_swaps.head) {
  416. vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
  417. if (vbl_swap->drw_id == swap->drawable &&
  418. vbl_swap->pipe == pipe &&
  419. vbl_swap->sequence == swap->sequence) {
  420. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  421. DRM_DEBUG("Already scheduled\n");
  422. return 0;
  423. }
  424. }
  425. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  426. if (dev_priv->swaps_pending >= 100) {
  427. DRM_DEBUG("Too many swaps queued\n");
  428. return -EBUSY;
  429. }
  430. vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
  431. if (!vbl_swap) {
  432. DRM_ERROR("Failed to allocate memory to queue swap\n");
  433. return -ENOMEM;
  434. }
  435. DRM_DEBUG("\n");
  436. vbl_swap->drw_id = swap->drawable;
  437. vbl_swap->pipe = pipe;
  438. vbl_swap->sequence = swap->sequence;
  439. spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
  440. list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
  441. dev_priv->swaps_pending++;
  442. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  443. return 0;
  444. }
  445. /* drm_dma.h hooks
  446. */
  447. void i915_driver_irq_preinstall(struct drm_device * dev)
  448. {
  449. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  450. I915_WRITE16(I915REG_HWSTAM, 0xfffe);
  451. I915_WRITE16(I915REG_INT_MASK_R, 0x0);
  452. I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
  453. }
  454. void i915_driver_irq_postinstall(struct drm_device * dev)
  455. {
  456. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  457. spin_lock_init(&dev_priv->swaps_lock);
  458. INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
  459. dev_priv->swaps_pending = 0;
  460. if (!dev_priv->vblank_pipe)
  461. dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
  462. i915_enable_interrupt(dev);
  463. DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
  464. }
  465. void i915_driver_irq_uninstall(struct drm_device * dev)
  466. {
  467. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  468. u16 temp;
  469. if (!dev_priv)
  470. return;
  471. I915_WRITE16(I915REG_HWSTAM, 0xffff);
  472. I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
  473. I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
  474. temp = I915_READ16(I915REG_INT_IDENTITY_R);
  475. I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
  476. }